From 1715b6359c1ae37f24d6774f0bcd73b6bf839eaa Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 6 Oct 2023 16:14:54 -0300 Subject: [PATCH 001/179] perf beauty socket/prctl_option: Cope with extended regexp complaint by grep Noticed on fedora 38, the extended regexp that so far was ok for both grep and sed now gets complaints by grep, that says '/' doesn't need to be escaped with '\'. So stop using '/' in sed, use '%' instead and remove the \ before / in the common extended regexp. Link: https://x.com/SMT_Solvers/status/1710380010098344192?s=20 Cc: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/ZUEddFPTJHVLhH%2F6@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/trace/beauty/prctl_option.sh | 4 ++-- tools/perf/trace/beauty/socket.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh index 8059342ca412..9455d9672f14 100755 --- a/tools/perf/trace/beauty/prctl_option.sh +++ b/tools/perf/trace/beauty/prctl_option.sh @@ -4,9 +4,9 @@ [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ printf "static const char *prctl_options[] = {\n" -regex='^#define[[:space:]]{1}PR_(\w+)[[:space:]]*([[:xdigit:]]+)([[:space:]]*\/.*)?$' +regex='^#define[[:space:]]{1}PR_(\w+)[[:space:]]*([[:xdigit:]]+)([[:space:]]*/.*)?$' grep -E $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \ - sed -r "s/$regex/\2 \1/g" | \ + sed -E "s%$regex%\2 \1%g" | \ sort -n | xargs printf "\t[%s] = \"%s\",\n" printf "};\n" diff --git a/tools/perf/trace/beauty/socket.sh b/tools/perf/trace/beauty/socket.sh index 8bc7ba62203e..670c6db298ae 100755 --- a/tools/perf/trace/beauty/socket.sh +++ b/tools/perf/trace/beauty/socket.sh @@ -18,10 +18,10 @@ grep -E $ipproto_regex ${uapi_header_dir}/in.h | \ printf "};\n\n" printf "static const char *socket_level[] = {\n" -socket_level_regex='^#define[[:space:]]+SOL_(\w+)[[:space:]]+([[:digit:]]+)([[:space:]]+\/.*)?' +socket_level_regex='^#define[[:space:]]+SOL_(\w+)[[:space:]]+([[:digit:]]+)([[:space:]]+/.*)?' grep -E $socket_level_regex ${beauty_header_dir}/socket.h | \ - sed -r "s/$socket_level_regex/\2 \1/g" | \ + sed -E "s%$socket_level_regex%\2 \1%g" | \ sort -n | xargs printf "\t[%s] = \"%s\",\n" printf "};\n\n" From c8e3ade38bc6545faece71cc6c642ad744d4cea3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 6 Oct 2023 18:11:05 -0300 Subject: [PATCH 002/179] perf tests make: Remove the last egrep call, use 'grep -E' instead One last case, caught while testing with amazonlinux:2, centos:stream, etc: 4 7.28 amazonlinux:2 : FAIL egrep: warning: egrep is obsolescent; using grep -E gcc version 7.3.1 20180712 (Red Hat 7.3.1-17) (GCC) 8 13.87 centos:stream : FAIL egrep: warning: egrep is obsolescent; using grep -E Reviewed-by: Guilherme Amadio Cc: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/ZUEdtblE8qDAQkBK@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/make | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/make b/tools/perf/tests/make index d9945ed25bc5..8a4da7eb637a 100644 --- a/tools/perf/tests/make +++ b/tools/perf/tests/make @@ -183,7 +183,7 @@ run += make_install_prefix_slash # run += make_install_pdf run += make_minimal -old_libbpf := $(shell echo '\#include ' | $(CC) -E -dM -x c -| egrep -q "define[[:space:]]+LIBBPF_MAJOR_VERSION[[:space:]]+0{1}") +old_libbpf := $(shell echo '\#include ' | $(CC) -E -dM -x c -| grep -q -E "define[[:space:]]+LIBBPF_MAJOR_VERSION[[:space:]]+0{1}") ifneq ($(old_libbpf),) run += make_libbpf_dynamic From 851bbccf6b0c152d98ecf0ec83d75fc97aebf43c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 11 Oct 2023 18:14:52 -0300 Subject: [PATCH 003/179] perf build: Warn about missing libelf before warning about missing libbpf As libelf is a requirement for libbpf if it is not available, as in some container build tests where NO_LIBELF=1 is used, then better warn about the most basic library first. Ditto for libz, check its availability before libbpf too. Cc: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/ZUEehyDk0FkPnvMR@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.config | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index b3e6ed10f40c..8b6cffbc4858 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -680,15 +680,15 @@ ifndef BUILD_BPF_SKEL endif ifeq ($(BUILD_BPF_SKEL),1) - ifeq ($(filter -DHAVE_LIBBPF_SUPPORT, $(CFLAGS)),) - dummy := $(warning Warning: Disabled BPF skeletons as libbpf is required) - BUILD_BPF_SKEL := 0 - else ifeq ($(filter -DHAVE_LIBELF_SUPPORT, $(CFLAGS)),) + ifeq ($(filter -DHAVE_LIBELF_SUPPORT, $(CFLAGS)),) dummy := $(warning Warning: Disabled BPF skeletons as libelf is required by bpftool) BUILD_BPF_SKEL := 0 else ifeq ($(filter -DHAVE_ZLIB_SUPPORT, $(CFLAGS)),) dummy := $(warning Warning: Disabled BPF skeletons as zlib is required by bpftool) BUILD_BPF_SKEL := 0 + else ifeq ($(filter -DHAVE_LIBBPF_SUPPORT, $(CFLAGS)),) + dummy := $(warning Warning: Disabled BPF skeletons as libbpf is required) + BUILD_BPF_SKEL := 0 else ifeq ($(call get-executable,$(CLANG)),) dummy := $(warning Warning: Disabled BPF skeletons as clang ($(CLANG)) is missing) BUILD_BPF_SKEL := 0 From 76db7aab1fca6688ddf9f388157521c442e0ffb8 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:24 -0700 Subject: [PATCH 004/179] tools headers UAPI: Sync include/uapi/linux/perf_event.h header with the kernel Sync the new sample type for the branch counters feature. Signed-off-by: Kan Liang Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexey Bayduraev Cc: Andi Kleen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tinghao Zhang Link: https://lore.kernel.org/r/20231025201626.3000228-6-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/perf_event.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 39c6a250dd1b..3a64499b0f5d 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */ + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */ + PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -235,6 +237,8 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; @@ -982,6 +986,12 @@ enum perf_event_type { * { u64 nr; * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX * { u64 from, to, flags } lbr[nr]; + * # + * # The format of the counters is decided by the + * # "branch_counter_nr" and "branch_counter_width", + * # which are defined in the ABI. + * # + * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi @@ -1427,6 +1437,9 @@ struct perf_branch_entry { reserved:31; }; +/* Size of used info bits in struct perf_branch_entry */ +#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 + union perf_sample_weight { __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) From ac9cd7245fffa0fc053afce3b345469e5afa533a Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:25 -0700 Subject: [PATCH 005/179] perf header: Support num and width of branch counters To support the branch counters feature, the information of the maximum number of supported counters and the width of the counters is exposed in the sysfs caps folder. The perf tool can use the information to parse the logged counters in each branch. Store the information in the perf_env for later usage. Reviewed-by: Ian Rogers Signed-off-by: Kan Liang Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexey Bayduraev Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tinghao Zhang Link: https://lore.kernel.org/r/20231025201626.3000228-7-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/env.h | 5 +++++ tools/perf/util/header.c | 18 +++++++++++++++--- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index 4566c51f2fd9..48d7f8759a2a 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -46,6 +46,9 @@ struct hybrid_node { struct pmu_caps { int nr_caps; unsigned int max_branches; + unsigned int br_cntr_nr; + unsigned int br_cntr_width; + char **caps; char *pmu_name; }; @@ -62,6 +65,8 @@ struct perf_env { unsigned long long total_mem; unsigned int msr_pmu_type; unsigned int max_branches; + unsigned int br_cntr_nr; + unsigned int br_cntr_width; int kernel_is_64_bit; int nr_cmdline; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index e86b9439ffee..eeb96a1b63a7 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -3259,7 +3259,9 @@ static int process_compressed(struct feat_fd *ff, } static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps, - char ***caps, unsigned int *max_branches) + char ***caps, unsigned int *max_branches, + unsigned int *br_cntr_nr, + unsigned int *br_cntr_width) { char *name, *value, *ptr; u32 nr_pmu_caps, i; @@ -3294,6 +3296,12 @@ static int __process_pmu_caps(struct feat_fd *ff, int *nr_caps, if (!strcmp(name, "branches")) *max_branches = atoi(value); + if (!strcmp(name, "branch_counter_nr")) + *br_cntr_nr = atoi(value); + + if (!strcmp(name, "branch_counter_width")) + *br_cntr_width = atoi(value); + free(value); free(name); } @@ -3318,7 +3326,9 @@ static int process_cpu_pmu_caps(struct feat_fd *ff, { int ret = __process_pmu_caps(ff, &ff->ph->env.nr_cpu_pmu_caps, &ff->ph->env.cpu_pmu_caps, - &ff->ph->env.max_branches); + &ff->ph->env.max_branches, + &ff->ph->env.br_cntr_nr, + &ff->ph->env.br_cntr_width); if (!ret && !ff->ph->env.cpu_pmu_caps) pr_debug("cpu pmu capabilities not available\n"); @@ -3347,7 +3357,9 @@ static int process_pmu_caps(struct feat_fd *ff, void *data __maybe_unused) for (i = 0; i < nr_pmu; i++) { ret = __process_pmu_caps(ff, &pmu_caps[i].nr_caps, &pmu_caps[i].caps, - &pmu_caps[i].max_branches); + &pmu_caps[i].max_branches, + &pmu_caps[i].br_cntr_nr, + &pmu_caps[i].br_cntr_width); if (ret) goto err; From 9fbb4b02302b0ae618303565025412070d32f85e Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Wed, 25 Oct 2023 13:16:26 -0700 Subject: [PATCH 006/179] perf tools: Add branch counter knob Add a new branch filter, "counter", for the branch counter option. It is used to mark the events which should be logged in the branch. If it is applied with the -j option, the counters of all the events should be logged in the branch. If the legacy kernel doesn't support the new branch sample type, switching off the branch counter filter. The stored counter values in each branch are displayed right after the regular branch stack information via perf report -D. Usage examples: # perf record -e "{branch-instructions,branch-misses}:S" -j any,counter Only the first event, branch-instructions, collect the LBR. Both branch-instructions and branch-misses are marked as logged events. The occurrences information of them can be found in the branch stack extension space of each branch. # perf record -e "{cpu/branch-instructions,branch_type=any/,cpu/branch-misses,branch_type=counter/}" Only the first event, branch-instructions, collect the LBR. Only the branch-misses event is marked as a logged event. Committer notes: I noticed 'perf test "Sample parsing"' failing, reported to the list and Kan provided a patch that checks if the evsel has a leader and that evsel->evlist is set, the comment in the source code further explains it. Reviewed-by: Ian Rogers Signed-off-by: Kan Liang Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexey Bayduraev Cc: Andi Kleen Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Tinghao Zhang Link: https://lore.kernel.org/r/20231025201626.3000228-8-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-record.txt | 4 +++ tools/perf/util/evsel.c | 35 ++++++++++++++++++++++- tools/perf/util/evsel.h | 1 + tools/perf/util/parse-branch-options.c | 1 + tools/perf/util/perf_event_attr_fprintf.c | 1 + tools/perf/util/sample.h | 1 + tools/perf/util/session.c | 15 ++++++++-- 7 files changed, 55 insertions(+), 3 deletions(-) diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index 1889f66addf2..6015fdd08fb6 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt @@ -445,6 +445,10 @@ following filters are defined: 4th-Gen Xeon+ server), the save branch type is unconditionally enabled when the taken branch stack sampling is enabled. - priv: save privilege state during sampling in case binary is not available later + - counter: save occurrences of the event since the last branch entry. Currently, the + feature is only supported by a newer CPU, e.g., Intel Sierra Forest and + later platforms. An error out is expected if it's used on the unsupported + kernel or CPUs. + The option requires at least one branch type among any, any_call, any_ret, ind_call, cond. diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 72a5dfc38d38..a5da74e3a517 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1832,6 +1832,8 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, static void evsel__disable_missing_features(struct evsel *evsel) { + if (perf_missing_features.branch_counters) + evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_COUNTERS; if (perf_missing_features.read_lost) evsel->core.attr.read_format &= ~PERF_FORMAT_LOST; if (perf_missing_features.weight_struct) { @@ -1885,7 +1887,12 @@ bool evsel__detect_missing_features(struct evsel *evsel) * Must probe features in the order they were added to the * perf_event_attr interface. */ - if (!perf_missing_features.read_lost && + if (!perf_missing_features.branch_counters && + (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS)) { + perf_missing_features.branch_counters = true; + pr_debug2("switching off branch counters support\n"); + return true; + } else if (!perf_missing_features.read_lost && (evsel->core.attr.read_format & PERF_FORMAT_LOST)) { perf_missing_features.read_lost = true; pr_debug2("switching off PERF_FORMAT_LOST support\n"); @@ -2318,6 +2325,22 @@ u64 evsel__bitfield_swap_branch_flags(u64 value) return new_val; } +static inline bool evsel__has_branch_counters(const struct evsel *evsel) +{ + struct evsel *cur, *leader = evsel__leader(evsel); + + /* The branch counters feature only supports group */ + if (!leader || !evsel->evlist) + return false; + + evlist__for_each_entry(evsel->evlist, cur) { + if ((leader == evsel__leader(cur)) && + (cur->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS)) + return true; + } + return false; +} + int evsel__parse_sample(struct evsel *evsel, union perf_event *event, struct perf_sample *data) { @@ -2551,6 +2574,16 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event, OVERFLOW_CHECK(array, sz, max_size); array = (void *)array + sz; + + if (evsel__has_branch_counters(evsel)) { + OVERFLOW_CHECK_u64(array); + + data->branch_stack_cntr = (u64 *)array; + sz = data->branch_stack->nr * sizeof(u64); + + OVERFLOW_CHECK(array, sz, max_size); + array = (void *)array + sz; + } } if (type & PERF_SAMPLE_REGS_USER) { diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index d791316a1792..f19ac9f027ef 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -191,6 +191,7 @@ struct perf_missing_features { bool code_page_size; bool weight_struct; bool read_lost; + bool branch_counters; }; extern struct perf_missing_features perf_missing_features; diff --git a/tools/perf/util/parse-branch-options.c b/tools/perf/util/parse-branch-options.c index fd67d204d720..f7f7aff3d85a 100644 --- a/tools/perf/util/parse-branch-options.c +++ b/tools/perf/util/parse-branch-options.c @@ -36,6 +36,7 @@ static const struct branch_mode branch_modes[] = { BRANCH_OPT("stack", PERF_SAMPLE_BRANCH_CALL_STACK), BRANCH_OPT("hw_index", PERF_SAMPLE_BRANCH_HW_INDEX), BRANCH_OPT("priv", PERF_SAMPLE_BRANCH_PRIV_SAVE), + BRANCH_OPT("counter", PERF_SAMPLE_BRANCH_COUNTERS), BRANCH_END }; diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c index 2247991451f3..8f04d3b7f3ec 100644 --- a/tools/perf/util/perf_event_attr_fprintf.c +++ b/tools/perf/util/perf_event_attr_fprintf.c @@ -55,6 +55,7 @@ static void __p_branch_sample_type(char *buf, size_t size, u64 value) bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP), bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES), bit_name(TYPE_SAVE), bit_name(HW_INDEX), bit_name(PRIV_SAVE), + bit_name(COUNTERS), { .name = NULL, } }; #undef bit_name diff --git a/tools/perf/util/sample.h b/tools/perf/util/sample.h index c92ad0f51ecd..70b2c3135555 100644 --- a/tools/perf/util/sample.h +++ b/tools/perf/util/sample.h @@ -113,6 +113,7 @@ struct perf_sample { void *raw_data; struct ip_callchain *callchain; struct branch_stack *branch_stack; + u64 *branch_stack_cntr; struct regs_dump user_regs; struct regs_dump intr_regs; struct stack_dump user_stack; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 1e9aa8ed15b6..4a094ab0362b 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1150,9 +1150,13 @@ static void callchain__printf(struct evsel *evsel, i, callchain->ips[i]); } -static void branch_stack__printf(struct perf_sample *sample, bool callstack) +static void branch_stack__printf(struct perf_sample *sample, + struct evsel *evsel) { struct branch_entry *entries = perf_sample__branch_entries(sample); + bool callstack = evsel__has_branch_callstack(evsel); + u64 *branch_stack_cntr = sample->branch_stack_cntr; + struct perf_env *env = evsel__env(evsel); uint64_t i; if (!callstack) { @@ -1194,6 +1198,13 @@ static void branch_stack__printf(struct perf_sample *sample, bool callstack) } } } + + if (branch_stack_cntr) { + printf("... branch stack counters: nr:%" PRIu64 " (counter width: %u max counter nr:%u)\n", + sample->branch_stack->nr, env->br_cntr_width, env->br_cntr_nr); + for (i = 0; i < sample->branch_stack->nr; i++) + printf("..... %2"PRIu64": %016" PRIx64 "\n", i, branch_stack_cntr[i]); + } } static void regs_dump__printf(u64 mask, u64 *regs, const char *arch) @@ -1355,7 +1366,7 @@ static void dump_sample(struct evsel *evsel, union perf_event *event, callchain__printf(evsel, sample); if (evsel__has_br_stack(evsel)) - branch_stack__printf(sample, evsel__has_branch_callstack(evsel)); + branch_stack__printf(sample, evsel); if (sample_type & PERF_SAMPLE_REGS_USER) regs_user__printf(sample, arch); From 7ff7b7afe364af17362f2a08cccdc79905feb0bc Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 3 Oct 2023 08:49:11 +0100 Subject: [PATCH 007/179] perf tools: Fix spelling mistake "parametrized" -> "parameterized" There are spelling mistakes in comments and a pr_debug message. Fix them. Reviewed-by: Athira Jajeev Reviewed-by: Ian Rogers Signed-off-by: Colin Ian King Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Peter Zijlstra Cc: kernel-janitors@vger.kernel.org Link: https://lore.kernel.org/r/20231003074911.220216-1-colin.i.king@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/parse-events.c | 4 ++-- tools/perf/tests/shell/stat_all_pmu.sh | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index f78be21a5999..e52f45c7c3d1 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -2549,7 +2549,7 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest if (strchr(ent->d_name, '.')) continue; - /* exclude parametrized ones (name contains '?') */ + /* exclude parameterized ones (name contains '?') */ n = snprintf(pmu_event, sizeof(pmu_event), "%s%s", path, ent->d_name); if (n >= PATH_MAX) { pr_err("pmu event name crossed PATH_MAX(%d) size\n", PATH_MAX); @@ -2578,7 +2578,7 @@ static int test__pmu_events(struct test_suite *test __maybe_unused, int subtest fclose(file); if (is_event_parameterized == 1) { - pr_debug("skipping parametrized PMU event: %s which contains ?\n", pmu_event); + pr_debug("skipping parameterized PMU event: %s which contains ?\n", pmu_event); continue; } diff --git a/tools/perf/tests/shell/stat_all_pmu.sh b/tools/perf/tests/shell/stat_all_pmu.sh index c77955419173..d2a3506e0d19 100755 --- a/tools/perf/tests/shell/stat_all_pmu.sh +++ b/tools/perf/tests/shell/stat_all_pmu.sh @@ -4,7 +4,7 @@ set -e -# Test all PMU events; however exclude parametrized ones (name contains '?') +# Test all PMU events; however exclude parameterized ones (name contains '?') for p in $(perf list --raw-dump pmu | sed 's/[[:graph:]]\+?[[:graph:]]\+[[:space:]]//g'); do echo "Testing $p" result=$(perf stat -e "$p" true 2>&1) From 1a27fc01700fbff2f205000edf0d1d315b5f85cc Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 2 Nov 2023 10:56:44 -0700 Subject: [PATCH 008/179] perf record: Lazy load kernel symbols Commit 5b7ba82a75915e73 ("perf symbols: Load kernel maps before using") changed it so that loading a kernel DSO would cause the symbols for the DSO to be eagerly loaded. For 'perf record' this is overhead as the symbols won't be used. Add a field to 'struct symbol_conf' to control the behavior and disable it for 'perf record' and 'perf inject'. Reviewed-by: Adrian Hunter Signed-off-by: Ian Rogers Acked-by: Namhyung Kim Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231102175735.2272696-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-inject.c | 6 ++++++ tools/perf/builtin-record.c | 2 ++ tools/perf/util/event.c | 4 ++-- tools/perf/util/symbol_conf.h | 3 ++- 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index c8cf2fdd9cff..eb3ef5c24b66 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -2265,6 +2265,12 @@ int cmd_inject(int argc, const char **argv) "perf inject []", NULL }; + + if (!inject.itrace_synth_opts.set) { + /* Disable eager loading of kernel symbols that adds overhead to perf inject. */ + symbol_conf.lazy_load_kernel_maps = true; + } + #ifndef HAVE_JITDUMP set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true); #endif diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index dcf288a4fb9a..8ec818568662 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -3989,6 +3989,8 @@ int cmd_record(int argc, const char **argv) # undef set_nobuild #endif + /* Disable eager loading of kernel symbols that adds overhead to perf record. */ + symbol_conf.lazy_load_kernel_maps = true; rec->opts.affinity = PERF_AFFINITY_SYS; rec->evlist = evlist__new(); diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 923c0fb15122..68f45e9e63b6 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -617,13 +617,13 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) { al->level = 'k'; maps = machine__kernel_maps(machine); - load_map = true; + load_map = !symbol_conf.lazy_load_kernel_maps; } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) { al->level = '.'; } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) { al->level = 'g'; maps = machine__kernel_maps(machine); - load_map = true; + load_map = !symbol_conf.lazy_load_kernel_maps; } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) { al->level = 'u'; } else { diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h index 0b589570d1d0..2b2fb9e224b0 100644 --- a/tools/perf/util/symbol_conf.h +++ b/tools/perf/util/symbol_conf.h @@ -42,7 +42,8 @@ struct symbol_conf { inline_name, disable_add2line_warn, buildid_mmap2, - guest_code; + guest_code, + lazy_load_kernel_maps; const char *vmlinux_name, *kallsyms_name, *source_prefix, From 9ffa6c7512ca7aaeb30e596e2c247cb1fae7123a Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 2 Nov 2023 10:56:47 -0700 Subject: [PATCH 009/179] perf machine thread: Remove exited threads by default 'struct thread' values hold onto references to mmaps, DSOs, etc. When a thread exits it is necessary to clean all of this memory up by removing the thread from the machine's threads. Some tools require this doesn't happen, such as auxtrace events, 'perf report' if offcpu events exist or if a task list is being generated, so add a 'struct symbol_conf' member to make the behavior optional. When an exited thread is left in the machine's threads, mark it as exited. This change relates to commit 40826c45eb0b8856 ("perf thread: Remove notion of dead threads") . Dead threads were removed as they had a reference count of 0 and were difficult to reason about with the reference count checker. Here a thread is removed from threads when it exits, unless via symbol_conf the exited thread isn't remove and is marked as exited. Reference counting behaves as it normally does. Reviewed-by: Adrian Hunter Signed-off-by: Ian Rogers Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231102175735.2272696-6-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 7 +++++++ tools/perf/util/machine.c | 10 +++++++--- tools/perf/util/session.c | 5 +++++ tools/perf/util/symbol_conf.h | 3 ++- tools/perf/util/thread.h | 14 ++++++++++++++ 5 files changed, 35 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 9cb1da2dc0c0..121a2781323c 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -1426,6 +1426,13 @@ int cmd_report(int argc, const char **argv) if (ret < 0) goto exit; + /* + * tasks_mode require access to exited threads to list those that are in + * the data file. Off-cpu events are synthesized after other events and + * reference exited threads. + */ + symbol_conf.keep_exited_threads = true; + annotation_options__init(&report.annotation_opts); ret = perf_config(report__config, &report); diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 90c750150b19..a985d004aa8d 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -2157,9 +2157,13 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event if (dump_trace) perf_event__fprintf_task(event, stdout); - if (thread != NULL) - thread__put(thread); - + if (thread != NULL) { + if (symbol_conf.keep_exited_threads) + thread__set_exited(thread, /*exited=*/true); + else + machine__remove_thread(machine, thread); + } + thread__put(thread); return 0; } diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 4a094ab0362b..199d3e8df315 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -115,6 +115,11 @@ static int perf_session__open(struct perf_session *session, int repipe_fd) return -1; } + if (perf_header__has_feat(&session->header, HEADER_AUXTRACE)) { + /* Auxiliary events may reference exited threads, hold onto dead ones. */ + symbol_conf.keep_exited_threads = true; + } + if (perf_data__is_pipe(data)) return 0; diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h index 2b2fb9e224b0..6040286e07a6 100644 --- a/tools/perf/util/symbol_conf.h +++ b/tools/perf/util/symbol_conf.h @@ -43,7 +43,8 @@ struct symbol_conf { disable_add2line_warn, buildid_mmap2, guest_code, - lazy_load_kernel_maps; + lazy_load_kernel_maps, + keep_exited_threads; const char *vmlinux_name, *kallsyms_name, *source_prefix, diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index e79225a0ea46..0df775b5c110 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -36,13 +36,22 @@ struct thread_rb_node { }; DECLARE_RC_STRUCT(thread) { + /** @maps: mmaps associated with this thread. */ struct maps *maps; pid_t pid_; /* Not all tools update this */ + /** @tid: thread ID number unique to a machine. */ pid_t tid; + /** @ppid: parent process of the process this thread belongs to. */ pid_t ppid; int cpu; int guest_cpu; /* For QEMU thread */ refcount_t refcnt; + /** + * @exited: Has the thread had an exit event. Such threads are usually + * removed from the machine's threads but some events/tools require + * access to dead threads. + */ + bool exited; bool comm_set; int comm_len; struct list_head namespaces_list; @@ -189,6 +198,11 @@ static inline refcount_t *thread__refcnt(struct thread *thread) return &RC_CHK_ACCESS(thread)->refcnt; } +static inline void thread__set_exited(struct thread *thread, bool exited) +{ + RC_CHK_ACCESS(thread)->exited = exited; +} + static inline bool thread__comm_set(const struct thread *thread) { return RC_CHK_ACCESS(thread)->comm_set; From 89d5c48c34c8a552acd9a2e3ee504b2f06879fea Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 3 Nov 2023 12:55:41 -0700 Subject: [PATCH 010/179] perf test: Simplify "object code reading" test It tries cycles (or cpu-clock on s390) event with exclude_kernel bit to open. But other arch on a VM can fail with the hardware event and need to fallback to the software event in the same way. So let's get rid of the cpuid check and use generic fallback mechanism using an array of event candidates. Now event in the odd index excludes the kernel so use that for the return value. Reviewed-by: Adrian Hunter Signed-off-by: Namhyung Kim Tested-by: James Clark Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Cc: Thomas Richter Link: https://lore.kernel.org/r/20231103195541.67788-1-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/code-reading.c | 76 ++++++++++----------------------- 1 file changed, 23 insertions(+), 53 deletions(-) diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index 3af81012014e..d6e845c57902 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -511,38 +511,6 @@ static void fs_something(void) } } -#ifdef __s390x__ -#include "header.h" // for get_cpuid() -#endif - -static const char *do_determine_event(bool excl_kernel) -{ - const char *event = excl_kernel ? "cycles:u" : "cycles"; - -#ifdef __s390x__ - char cpuid[128], model[16], model_c[16], cpum_cf_v[16]; - unsigned int family; - int ret, cpum_cf_a; - - if (get_cpuid(cpuid, sizeof(cpuid))) - goto out_clocks; - ret = sscanf(cpuid, "%*[^,],%u,%[^,],%[^,],%[^,],%x", &family, model_c, - model, cpum_cf_v, &cpum_cf_a); - if (ret != 5) /* Not available */ - goto out_clocks; - if (excl_kernel && (cpum_cf_a & 4)) - return event; - if (!excl_kernel && (cpum_cf_a & 2)) - return event; - - /* Fall through: missing authorization */ -out_clocks: - event = excl_kernel ? "cpu-clock:u" : "cpu-clock"; - -#endif - return event; -} - static void do_something(void) { fs_something(); @@ -583,8 +551,10 @@ static int do_test_code_reading(bool try_kcore) int err = -1, ret; pid_t pid; struct map *map; - bool have_vmlinux, have_kcore, excl_kernel = false; + bool have_vmlinux, have_kcore; struct dso *dso; + const char *events[] = { "cycles", "cycles:u", "cpu-clock", "cpu-clock:u", NULL }; + int evidx = 0; pid = getpid(); @@ -618,7 +588,7 @@ static int do_test_code_reading(bool try_kcore) /* No point getting kernel events if there is no kernel object */ if (!have_vmlinux && !have_kcore) - excl_kernel = true; + evidx++; threads = thread_map__new_by_tid(pid); if (!threads) { @@ -646,7 +616,7 @@ static int do_test_code_reading(bool try_kcore) goto out_put; } - while (1) { + while (events[evidx]) { const char *str; evlist = evlist__new(); @@ -657,7 +627,7 @@ static int do_test_code_reading(bool try_kcore) perf_evlist__set_maps(&evlist->core, cpus, threads); - str = do_determine_event(excl_kernel); + str = events[evidx]; pr_debug("Parsing event '%s'\n", str); ret = parse_event(evlist, str); if (ret < 0) { @@ -675,32 +645,32 @@ static int do_test_code_reading(bool try_kcore) ret = evlist__open(evlist); if (ret < 0) { - if (!excl_kernel) { - excl_kernel = true; - /* - * Both cpus and threads are now owned by evlist - * and will be freed by following perf_evlist__set_maps - * call. Getting reference to keep them alive. - */ - perf_cpu_map__get(cpus); - perf_thread_map__get(threads); - perf_evlist__set_maps(&evlist->core, NULL, NULL); - evlist__delete(evlist); - evlist = NULL; - continue; - } + evidx++; - if (verbose > 0) { + if (events[evidx] == NULL && verbose > 0) { char errbuf[512]; evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf)); pr_debug("perf_evlist__open() failed!\n%s\n", errbuf); } - goto out_put; + /* + * Both cpus and threads are now owned by evlist + * and will be freed by following perf_evlist__set_maps + * call. Getting reference to keep them alive. + */ + perf_cpu_map__get(cpus); + perf_thread_map__get(threads); + perf_evlist__set_maps(&evlist->core, NULL, NULL); + evlist__delete(evlist); + evlist = NULL; + continue; } break; } + if (events[evidx] == NULL) + goto out_put; + ret = evlist__mmap(evlist, UINT_MAX); if (ret < 0) { pr_debug("evlist__mmap failed\n"); @@ -721,7 +691,7 @@ static int do_test_code_reading(bool try_kcore) err = TEST_CODE_READING_NO_KERNEL_OBJ; else if (!have_vmlinux && !try_kcore) err = TEST_CODE_READING_NO_VMLINUX; - else if (excl_kernel) + else if (strstr(events[evidx], ":u")) err = TEST_CODE_READING_NO_ACCESS; else err = TEST_CODE_READING_OK; From de2c7eb59c342d1a61124caaf2993e325a9becb7 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 3 Nov 2023 12:19:03 -0700 Subject: [PATCH 011/179] perf annotate: Split branch stack cycles information out of 'struct annotation_line' The cycles info is used only when branch stack is provided. Separate them from 'struct annotation_line' into a separate struct and lazy allocate them to save some memory. Committer notes: Make annotation__compute_ipc() check if the lazy allocation works, bailing out if so, its callers already do error checking and propagation. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Christophe JAILLET Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231103191907.54531-2-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/annotate.c | 2 +- tools/perf/util/annotate.c | 61 ++++++++++++++++++++++--------- tools/perf/util/annotate.h | 15 +++++--- 3 files changed, 54 insertions(+), 24 deletions(-) diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index ccdb2cd11fbf..d2470f87344d 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -337,7 +337,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser, max_percent = percent; } - if (max_percent < 0.01 && pos->al.ipc == 0) { + if (max_percent < 0.01 && (!pos->al.cycles || pos->al.cycles->ipc == 0)) { RB_CLEAR_NODE(&pos->al.rb_node); continue; } diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 82956adf9963..99ff3bb9cad8 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1100,8 +1100,8 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 for (offset = start; offset <= end; offset++) { struct annotation_line *al = notes->offsets[offset]; - if (al && al->ipc == 0.0) { - al->ipc = ipc; + if (al && al->cycles && al->cycles->ipc == 0.0) { + al->cycles->ipc = ipc; cover_insn++; } } @@ -1114,12 +1114,13 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 } } -void annotation__compute_ipc(struct annotation *notes, size_t size) +static int annotation__compute_ipc(struct annotation *notes, size_t size) { + int err = 0; s64 offset; if (!notes->src || !notes->src->cycles_hist) - return; + return 0; notes->total_insn = annotation__count_insn(notes, 0, size - 1); notes->hit_cycles = 0; @@ -1134,18 +1135,39 @@ void annotation__compute_ipc(struct annotation *notes, size_t size) if (ch && ch->cycles) { struct annotation_line *al; + al = notes->offsets[offset]; + if (al && al->cycles == NULL) { + al->cycles = zalloc(sizeof(*al->cycles)); + if (al->cycles == NULL) { + err = ENOMEM; + break; + } + } if (ch->have_start) annotation__count_and_fill(notes, ch->start, offset, ch); - al = notes->offsets[offset]; if (al && ch->num_aggr) { - al->cycles = ch->cycles_aggr / ch->num_aggr; - al->cycles_max = ch->cycles_max; - al->cycles_min = ch->cycles_min; + al->cycles->avg = ch->cycles_aggr / ch->num_aggr; + al->cycles->max = ch->cycles_max; + al->cycles->min = ch->cycles_min; } notes->have_cycles = true; } } + + if (err) { + while (++offset < (s64)size) { + struct cyc_hist *ch = ¬es->src->cycles_hist[offset]; + + if (ch && ch->cycles) { + struct annotation_line *al = notes->offsets[offset]; + if (al) + zfree(&al->cycles); + } + } + } + annotation__unlock(notes); + return 0; } int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample, @@ -1225,6 +1247,7 @@ static void annotation_line__exit(struct annotation_line *al) { zfree_srcline(&al->path); zfree(&al->line); + zfree(&al->cycles); } static size_t disasm_line_size(int nr) @@ -3083,8 +3106,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati int printed; if (first_line && (al->offset == -1 || percent_max == 0.0)) { - if (notes->have_cycles) { - if (al->ipc == 0.0 && al->cycles == 0) + if (notes->have_cycles && al->cycles) { + if (al->cycles->ipc == 0.0 && al->cycles->avg == 0) show_title = true; } else show_title = true; @@ -3121,17 +3144,17 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati } if (notes->have_cycles) { - if (al->ipc) - obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->ipc); + if (al->cycles && al->cycles->ipc) + obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc); else if (!show_title) obj__printf(obj, "%*s", ANNOTATION__IPC_WIDTH, " "); else obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC"); if (!notes->options->show_minmax_cycle) { - if (al->cycles) + if (al->cycles && al->cycles->avg) obj__printf(obj, "%*" PRIu64 " ", - ANNOTATION__CYCLES_WIDTH - 1, al->cycles); + ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg); else if (!show_title) obj__printf(obj, "%*s", ANNOTATION__CYCLES_WIDTH, " "); @@ -3145,8 +3168,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati scnprintf(str, sizeof(str), "%" PRIu64 "(%" PRIu64 "/%" PRIu64 ")", - al->cycles, al->cycles_min, - al->cycles_max); + al->cycles->avg, al->cycles->min, + al->cycles->max); obj__printf(obj, "%*s ", ANNOTATION__MINMAX_CYCLES_WIDTH - 1, @@ -3264,7 +3287,11 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, annotation__set_offsets(notes, size); annotation__mark_jump_targets(notes, sym); - annotation__compute_ipc(notes, size); + + err = annotation__compute_ipc(notes, size); + if (err) + goto out_free_offsets; + annotation__init_column_widths(notes, sym); notes->nr_events = nr_pcnt; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 962780559176..19bc2f039175 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -130,6 +130,13 @@ struct annotation_data { struct sym_hist_entry he; }; +struct cycles_info { + float ipc; + u64 avg; + u64 max; + u64 min; +}; + struct annotation_line { struct list_head node; struct rb_node rb_node; @@ -137,12 +144,9 @@ struct annotation_line { char *line; int line_nr; char *fileloc; - int jump_sources; - float ipc; - u64 cycles; - u64 cycles_max; - u64 cycles_min; char *path; + struct cycles_info *cycles; + int jump_sources; u32 idx; int idx_asm; int data_nr; @@ -325,7 +329,6 @@ static inline bool annotation_line__filter(struct annotation_line *al, struct an } void annotation__set_offsets(struct annotation *notes, s64 size); -void annotation__compute_ipc(struct annotation *notes, size_t size); void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym); void annotation__update_column_widths(struct annotation *notes); void annotation__init_column_widths(struct annotation *notes, struct symbol *sym); From b7f87e32590bf48eca84f729d3422be7b8dc22d3 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 3 Nov 2023 12:19:04 -0700 Subject: [PATCH 012/179] perf annotate: Split branch stack cycles info from 'struct annotation' The cycles info is only meaningful when sample has branch stacks. To save the memory for normal cases, move those fields to a new 'struct annotated_branch' and dynamically allocate it when needed. Also move cycles_hist from annotated_source as it's related here. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Christophe JAILLET Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231103191907.54531-3-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 101 +++++++++++++++++++---------------- tools/perf/util/annotate.h | 19 ++++--- tools/perf/util/block-info.c | 4 +- tools/perf/util/sort.c | 14 ++--- 4 files changed, 75 insertions(+), 63 deletions(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 99ff3bb9cad8..4e40c94c85d1 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -810,7 +810,6 @@ static __maybe_unused void annotated_source__delete(struct annotated_source *src if (src == NULL) return; zfree(&src->histograms); - zfree(&src->cycles_hist); free(src); } @@ -845,18 +844,6 @@ static int annotated_source__alloc_histograms(struct annotated_source *src, return src->histograms ? 0 : -1; } -/* The cycles histogram is lazily allocated. */ -static int symbol__alloc_hist_cycles(struct symbol *sym) -{ - struct annotation *notes = symbol__annotation(sym); - const size_t size = symbol__size(sym); - - notes->src->cycles_hist = calloc(size, sizeof(struct cyc_hist)); - if (notes->src->cycles_hist == NULL) - return -1; - return 0; -} - void symbol__annotate_zero_histograms(struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); @@ -865,9 +852,10 @@ void symbol__annotate_zero_histograms(struct symbol *sym) if (notes->src != NULL) { memset(notes->src->histograms, 0, notes->src->nr_histograms * notes->src->sizeof_sym_hist); - if (notes->src->cycles_hist) - memset(notes->src->cycles_hist, 0, - symbol__size(sym) * sizeof(struct cyc_hist)); + } + if (notes->branch && notes->branch->cycles_hist) { + memset(notes->branch->cycles_hist, 0, + symbol__size(sym) * sizeof(struct cyc_hist)); } annotation__unlock(notes); } @@ -958,23 +946,33 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms, return 0; } +static struct annotated_branch *annotation__get_branch(struct annotation *notes) +{ + if (notes == NULL) + return NULL; + + if (notes->branch == NULL) + notes->branch = zalloc(sizeof(*notes->branch)); + + return notes->branch; +} + static struct cyc_hist *symbol__cycles_hist(struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); + struct annotated_branch *branch; - if (notes->src == NULL) { - notes->src = annotated_source__new(); - if (notes->src == NULL) - return NULL; - goto alloc_cycles_hist; + branch = annotation__get_branch(notes); + if (branch == NULL) + return NULL; + + if (branch->cycles_hist == NULL) { + const size_t size = symbol__size(sym); + + branch->cycles_hist = calloc(size, sizeof(struct cyc_hist)); } - if (!notes->src->cycles_hist) { -alloc_cycles_hist: - symbol__alloc_hist_cycles(sym); - } - - return notes->src->cycles_hist; + return branch->cycles_hist; } struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists) @@ -1083,6 +1081,14 @@ static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 return n_insn; } +static void annotated_branch__delete(struct annotated_branch *branch) +{ + if (branch) { + zfree(&branch->cycles_hist); + free(branch); + } +} + static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch) { unsigned n_insn; @@ -1091,6 +1097,7 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 n_insn = annotation__count_insn(notes, start, end); if (n_insn && ch->num && ch->cycles) { + struct annotated_branch *branch; float ipc = n_insn / ((double)ch->cycles / (double)ch->num); /* Hide data when there are too many overlaps. */ @@ -1106,10 +1113,11 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 } } - if (cover_insn) { - notes->hit_cycles += ch->cycles; - notes->hit_insn += n_insn * ch->num; - notes->cover_insn += cover_insn; + branch = annotation__get_branch(notes); + if (cover_insn && branch) { + branch->hit_cycles += ch->cycles; + branch->hit_insn += n_insn * ch->num; + branch->cover_insn += cover_insn; } } } @@ -1119,19 +1127,19 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size) int err = 0; s64 offset; - if (!notes->src || !notes->src->cycles_hist) + if (!notes->branch || !notes->branch->cycles_hist) return 0; - notes->total_insn = annotation__count_insn(notes, 0, size - 1); - notes->hit_cycles = 0; - notes->hit_insn = 0; - notes->cover_insn = 0; + notes->branch->total_insn = annotation__count_insn(notes, 0, size - 1); + notes->branch->hit_cycles = 0; + notes->branch->hit_insn = 0; + notes->branch->cover_insn = 0; annotation__lock(notes); for (offset = size - 1; offset >= 0; --offset) { struct cyc_hist *ch; - ch = ¬es->src->cycles_hist[offset]; + ch = ¬es->branch->cycles_hist[offset]; if (ch && ch->cycles) { struct annotation_line *al; @@ -1150,13 +1158,12 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size) al->cycles->max = ch->cycles_max; al->cycles->min = ch->cycles_min; } - notes->have_cycles = true; } } if (err) { while (++offset < (s64)size) { - struct cyc_hist *ch = ¬es->src->cycles_hist[offset]; + struct cyc_hist *ch = ¬es->branch->cycles_hist[offset]; if (ch && ch->cycles) { struct annotation_line *al = notes->offsets[offset]; @@ -1322,6 +1329,7 @@ int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool r void annotation__exit(struct annotation *notes) { annotated_source__delete(notes->src); + annotated_branch__delete(notes->branch); } static struct sharded_mutex *sharded_mutex; @@ -3075,13 +3083,14 @@ static void disasm_line__write(struct disasm_line *dl, struct annotation *notes, static void ipc_coverage_string(char *bf, int size, struct annotation *notes) { double ipc = 0.0, coverage = 0.0; + struct annotated_branch *branch = annotation__get_branch(notes); - if (notes->hit_cycles) - ipc = notes->hit_insn / ((double)notes->hit_cycles); + if (branch && branch->hit_cycles) + ipc = branch->hit_insn / ((double)branch->hit_cycles); - if (notes->total_insn) { - coverage = notes->cover_insn * 100.0 / - ((double)notes->total_insn); + if (branch && branch->total_insn) { + coverage = branch->cover_insn * 100.0 / + ((double)branch->total_insn); } scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)", @@ -3106,7 +3115,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati int printed; if (first_line && (al->offset == -1 || percent_max == 0.0)) { - if (notes->have_cycles && al->cycles) { + if (notes->branch && al->cycles) { if (al->cycles->ipc == 0.0 && al->cycles->avg == 0) show_title = true; } else @@ -3143,7 +3152,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati } } - if (notes->have_cycles) { + if (notes->branch) { if (al->cycles && al->cycles->ipc) obj__printf(obj, "%*.2f ", ANNOTATION__IPC_WIDTH - 1, al->cycles->ipc); else if (!show_title) diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 19bc2f039175..508b93d3dcde 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -271,17 +271,20 @@ struct annotated_source { struct list_head source; int nr_histograms; size_t sizeof_sym_hist; - struct cyc_hist *cycles_hist; struct sym_hist *histograms; }; +struct annotated_branch { + u64 hit_cycles; + u64 hit_insn; + unsigned int total_insn; + unsigned int cover_insn; + struct cyc_hist *cycles_hist; +}; + struct LOCKABLE annotation { u64 max_coverage; u64 start; - u64 hit_cycles; - u64 hit_insn; - unsigned int total_insn; - unsigned int cover_insn; struct annotation_options *options; struct annotation_line **offsets; int nr_events; @@ -297,8 +300,8 @@ struct LOCKABLE annotation { u8 max_addr; u8 max_ins_name; } widths; - bool have_cycles; struct annotated_source *src; + struct annotated_branch *branch; }; static inline void annotation__init(struct annotation *notes __maybe_unused) @@ -312,10 +315,10 @@ bool annotation__trylock(struct annotation *notes) EXCLUSIVE_TRYLOCK_FUNCTION(tr static inline int annotation__cycles_width(struct annotation *notes) { - if (notes->have_cycles && notes->options->show_minmax_cycle) + if (notes->branch && notes->options->show_minmax_cycle) return ANNOTATION__IPC_WIDTH + ANNOTATION__MINMAX_CYCLES_WIDTH; - return notes->have_cycles ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0; + return notes->branch ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0; } static inline int annotation__pcnt_width(struct annotation *notes) diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c index 591fc1edd385..08f82c1f166c 100644 --- a/tools/perf/util/block-info.c +++ b/tools/perf/util/block-info.c @@ -129,9 +129,9 @@ int block_info__process_sym(struct hist_entry *he, struct block_hist *bh, al.sym = he->ms.sym; notes = symbol__annotation(he->ms.sym); - if (!notes || !notes->src || !notes->src->cycles_hist) + if (!notes || !notes->branch || !notes->branch->cycles_hist) return 0; - ch = notes->src->cycles_hist; + ch = notes->branch->cycles_hist; for (unsigned int i = 0; i < symbol__size(he->ms.sym); i++) { if (ch[i].num_aggr) { struct block_info *bi; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 80e4f6132740..27b123ccd2d1 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -583,21 +583,21 @@ static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, { struct symbol *sym = he->ms.sym; - struct annotation *notes; + struct annotated_branch *branch; double ipc = 0.0, coverage = 0.0; char tmp[64]; if (!sym) return repsep_snprintf(bf, size, "%-*s", width, "-"); - notes = symbol__annotation(sym); + branch = symbol__annotation(sym)->branch; - if (notes->hit_cycles) - ipc = notes->hit_insn / ((double)notes->hit_cycles); + if (branch && branch->hit_cycles) + ipc = branch->hit_insn / ((double)branch->hit_cycles); - if (notes->total_insn) { - coverage = notes->cover_insn * 100.0 / - ((double)notes->total_insn); + if (branch && branch->total_insn) { + coverage = branch->cover_insn * 100.0 / + ((double)branch->total_insn); } snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); From 2b215ec71b888ec2f3ca86846ce3a7f20b0d5e4d Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 3 Nov 2023 12:19:05 -0700 Subject: [PATCH 013/179] perf annotate: Move max_coverage from 'struct annotation' to 'struct annotated_branch' The max_coverage field is only used when branch stack info is available so it'd be natural to move to 'struct annotated_branch'. Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Christophe JAILLET Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231103191907.54531-4-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-annotate.c | 7 +++++-- tools/perf/util/annotate.c | 2 +- tools/perf/util/annotate.h | 4 +++- tools/perf/util/block-range.c | 7 ++++++- 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index aeeb801f1ed7..a9129b51d511 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -94,6 +94,7 @@ static void process_basic_block(struct addr_map_symbol *start, struct annotation *notes = sym ? symbol__annotation(sym) : NULL; struct block_range_iter iter; struct block_range *entry; + struct annotated_branch *branch; /* * Sanity; NULL isn't executable and the CPU cannot execute backwards @@ -105,6 +106,8 @@ static void process_basic_block(struct addr_map_symbol *start, if (!block_range_iter__valid(&iter)) return; + branch = annotation__get_branch(notes); + /* * First block in range is a branch target. */ @@ -118,8 +121,8 @@ static void process_basic_block(struct addr_map_symbol *start, entry->coverage++; entry->sym = sym; - if (notes) - notes->max_coverage = max(notes->max_coverage, entry->coverage); + if (branch) + branch->max_coverage = max(branch->max_coverage, entry->coverage); } while (block_range_iter__next(&iter)); diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 4e40c94c85d1..077a297f4dad 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -946,7 +946,7 @@ static int __symbol__inc_addr_samples(struct map_symbol *ms, return 0; } -static struct annotated_branch *annotation__get_branch(struct annotation *notes) +struct annotated_branch *annotation__get_branch(struct annotation *notes) { if (notes == NULL) return NULL; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 508b93d3dcde..849713098953 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -280,10 +280,10 @@ struct annotated_branch { unsigned int total_insn; unsigned int cover_insn; struct cyc_hist *cycles_hist; + u64 max_coverage; }; struct LOCKABLE annotation { - u64 max_coverage; u64 start; struct annotation_options *options; struct annotation_line **offsets; @@ -355,6 +355,8 @@ static inline struct annotation *symbol__annotation(struct symbol *sym) int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, struct perf_sample *sample, struct evsel *evsel); +struct annotated_branch *annotation__get_branch(struct annotation *notes); + int addr_map_symbol__account_cycles(struct addr_map_symbol *ams, struct addr_map_symbol *start, unsigned cycles); diff --git a/tools/perf/util/block-range.c b/tools/perf/util/block-range.c index 680e92774d0c..15c42196c24c 100644 --- a/tools/perf/util/block-range.c +++ b/tools/perf/util/block-range.c @@ -311,6 +311,7 @@ struct block_range_iter block_range__create(u64 start, u64 end) double block_range__coverage(struct block_range *br) { struct symbol *sym; + struct annotated_branch *branch; if (!br) { if (block_ranges.blocks) @@ -323,5 +324,9 @@ double block_range__coverage(struct block_range *br) if (!sym) return -1; - return (double)br->coverage / symbol__annotation(sym)->max_coverage; + branch = symbol__annotation(sym)->branch; + if (!branch) + return -1; + + return (double)br->coverage / branch->max_coverage; } From 0aae4c99c5f8f748c6cb5ca03bb3b3ae8cfb10df Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 3 Nov 2023 12:19:06 -0700 Subject: [PATCH 014/179] perf annotate: Move some source code related fields from 'struct annotation' to 'struct annotated_source' Some fields in the 'struct annotation' are only used with 'struct annotated_source' so better to be moved there in order to reduce memory consumption for other symbols. Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Christophe JAILLET Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231103191907.54531-5-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/annotate.c | 12 ++++++------ tools/perf/util/annotate.c | 17 +++++++++-------- tools/perf/util/annotate.h | 14 +++++++------- 3 files changed, 22 insertions(+), 21 deletions(-) diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index d2470f87344d..1b42db70c998 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -384,7 +384,7 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser) if (al->idx_asm < offset) offset = al->idx; - browser->b.nr_entries = notes->nr_entries; + browser->b.nr_entries = notes->src->nr_entries; notes->options->hide_src_code = false; browser->b.seek(&browser->b, -offset, SEEK_CUR); browser->b.top_idx = al->idx - offset; @@ -402,7 +402,7 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser) if (al->idx_asm < offset) offset = al->idx_asm; - browser->b.nr_entries = notes->nr_asm_entries; + browser->b.nr_entries = notes->src->nr_asm_entries; notes->options->hide_src_code = true; browser->b.seek(&browser->b, -offset, SEEK_CUR); browser->b.top_idx = al->idx_asm - offset; @@ -435,7 +435,7 @@ static void ui_browser__init_asm_mode(struct ui_browser *browser) { struct annotation *notes = browser__annotation(browser); ui_browser__reset_index(browser); - browser->nr_entries = notes->nr_asm_entries; + browser->nr_entries = notes->src->nr_asm_entries; } static int sym_title(struct symbol *sym, struct map *map, char *title, @@ -860,7 +860,7 @@ static int annotate_browser__run(struct annotate_browser *browser, browser->b.height, browser->b.index, browser->b.top_idx, - notes->nr_asm_entries); + notes->src->nr_asm_entries); } continue; case K_ENTER: @@ -991,8 +991,8 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, ui_helpline__push("Press ESC to exit"); - browser.b.width = notes->max_line_len; - browser.b.nr_entries = notes->nr_entries; + browser.b.width = notes->src->max_line_len; + browser.b.nr_entries = notes->src->nr_entries; browser.b.entries = ¬es->src->source, browser.b.width += 18; /* Percentage */ diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 077a297f4dad..23e68b1abc2f 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -2825,19 +2825,20 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym) void annotation__set_offsets(struct annotation *notes, s64 size) { struct annotation_line *al; + struct annotated_source *src = notes->src; - notes->max_line_len = 0; - notes->nr_entries = 0; - notes->nr_asm_entries = 0; + src->max_line_len = 0; + src->nr_entries = 0; + src->nr_asm_entries = 0; - list_for_each_entry(al, ¬es->src->source, node) { + list_for_each_entry(al, &src->source, node) { size_t line_len = strlen(al->line); - if (notes->max_line_len < line_len) - notes->max_line_len = line_len; - al->idx = notes->nr_entries++; + if (src->max_line_len < line_len) + src->max_line_len = line_len; + al->idx = src->nr_entries++; if (al->offset != -1) { - al->idx_asm = notes->nr_asm_entries++; + al->idx_asm = src->nr_asm_entries++; /* * FIXME: short term bandaid to cope with assembly * routines that comes with labels in the same column diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 849713098953..9eb7b6d3fe95 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -268,10 +268,13 @@ struct cyc_hist { * returns. */ struct annotated_source { - struct list_head source; - int nr_histograms; - size_t sizeof_sym_hist; - struct sym_hist *histograms; + struct list_head source; + size_t sizeof_sym_hist; + struct sym_hist *histograms; + int nr_histograms; + int nr_entries; + int nr_asm_entries; + u16 max_line_len; }; struct annotated_branch { @@ -289,9 +292,6 @@ struct LOCKABLE annotation { struct annotation_line **offsets; int nr_events; int max_jump_sources; - int nr_entries; - int nr_asm_entries; - u16 max_line_len; struct { u8 addr; u8 jumps; From b753d48f53f9dcea655d359f028c8adfdd9504b5 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Fri, 3 Nov 2023 12:19:07 -0700 Subject: [PATCH 015/179] perf annotate: Move offsets array from 'struct annotation' to 'struct annotated_source' The offsets array keeps pointers to 'struct annotation_line' entries which are available in the 'struct annotated_source'. Let's move it to there. Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Christophe JAILLET Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231103191907.54531-6-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/annotate.c | 4 ++-- tools/perf/util/annotate.c | 20 ++++++++++---------- tools/perf/util/annotate.h | 2 +- 3 files changed, 13 insertions(+), 13 deletions(-) diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 1b42db70c998..163f916fff68 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -188,7 +188,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) * name right after the '<' token and probably treating this like a * 'call' instruction. */ - target = notes->offsets[cursor->ops.target.offset]; + target = notes->src->offsets[cursor->ops.target.offset]; if (target == NULL) { ui_helpline__printf("WARN: jump target inconsistency, press 'o', notes->offsets[%#x] = NULL\n", cursor->ops.target.offset); @@ -1006,6 +1006,6 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, out_free_offsets: if(not_annotated) - zfree(¬es->offsets); + zfree(¬es->src->offsets); return ret; } diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 23e68b1abc2f..9b68b8e3791c 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1075,7 +1075,7 @@ static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 u64 offset; for (offset = start; offset <= end; offset++) { - if (notes->offsets[offset]) + if (notes->src->offsets[offset]) n_insn++; } return n_insn; @@ -1105,7 +1105,7 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 return; for (offset = start; offset <= end; offset++) { - struct annotation_line *al = notes->offsets[offset]; + struct annotation_line *al = notes->src->offsets[offset]; if (al && al->cycles && al->cycles->ipc == 0.0) { al->cycles->ipc = ipc; @@ -1143,7 +1143,7 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size) if (ch && ch->cycles) { struct annotation_line *al; - al = notes->offsets[offset]; + al = notes->src->offsets[offset]; if (al && al->cycles == NULL) { al->cycles = zalloc(sizeof(*al->cycles)); if (al->cycles == NULL) { @@ -1166,7 +1166,7 @@ static int annotation__compute_ipc(struct annotation *notes, size_t size) struct cyc_hist *ch = ¬es->branch->cycles_hist[offset]; if (ch && ch->cycles) { - struct annotation_line *al = notes->offsets[offset]; + struct annotation_line *al = notes->src->offsets[offset]; if (al) zfree(&al->cycles); } @@ -2800,7 +2800,7 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym) return; for (offset = 0; offset < size; ++offset) { - struct annotation_line *al = notes->offsets[offset]; + struct annotation_line *al = notes->src->offsets[offset]; struct disasm_line *dl; dl = disasm_line(al); @@ -2808,7 +2808,7 @@ void annotation__mark_jump_targets(struct annotation *notes, struct symbol *sym) if (!disasm_line__is_valid_local_jump(dl, sym)) continue; - al = notes->offsets[dl->ops.target.offset]; + al = notes->src->offsets[dl->ops.target.offset]; /* * FIXME: Oops, no jump target? Buggy disassembler? Or do we @@ -2847,7 +2847,7 @@ void annotation__set_offsets(struct annotation *notes, s64 size) * E.g. copy_user_generic_unrolled */ if (al->offset < size) - notes->offsets[al->offset] = al; + notes->src->offsets[al->offset] = al; } else al->idx_asm = -1; } @@ -3280,8 +3280,8 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, size_t size = symbol__size(sym); int nr_pcnt = 1, err; - notes->offsets = zalloc(size * sizeof(struct annotation_line *)); - if (notes->offsets == NULL) + notes->src->offsets = zalloc(size * sizeof(struct annotation_line *)); + if (notes->src->offsets == NULL) return ENOMEM; if (evsel__is_group_event(evsel)) @@ -3311,7 +3311,7 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, return 0; out_free_offsets: - zfree(¬es->offsets); + zfree(¬es->src->offsets); return err; } diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 9eb7b6d3fe95..de59c1aff08e 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -271,6 +271,7 @@ struct annotated_source { struct list_head source; size_t sizeof_sym_hist; struct sym_hist *histograms; + struct annotation_line **offsets; int nr_histograms; int nr_entries; int nr_asm_entries; @@ -289,7 +290,6 @@ struct annotated_branch { struct LOCKABLE annotation { u64 start; struct annotation_options *options; - struct annotation_line **offsets; int nr_events; int max_jump_sources; struct { From 4a5aaaf308b91e3da21c3b8f7e78dc5a256d9324 Mon Sep 17 00:00:00 2001 From: zhaimingbing Date: Mon, 30 Oct 2023 15:58:25 +0800 Subject: [PATCH 016/179] perf tests attr: Fix spelling mistake "whic" to "which" There is a spelling mistake, Please fix it. Reviewed-by: Ian Rogers Signed-off-by: zhaimingbing Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: kernel-janitors@vger.kernel.org Link: https://lore.kernel.org/r/20231030075825.3701-1-zhaimingbing@cmss.chinamobile.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/attr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/attr.c b/tools/perf/tests/attr.c index 61186d0d1cfa..97e1bdd6ec0e 100644 --- a/tools/perf/tests/attr.c +++ b/tools/perf/tests/attr.c @@ -188,7 +188,7 @@ static int test__attr(struct test_suite *test __maybe_unused, int subtest __mayb if (perf_pmus__num_core_pmus() > 1) { /* * TODO: Attribute tests hard code the PMU type. If there are >1 - * core PMU then each PMU will have a different type whic + * core PMU then each PMU will have a different type which * requires additional support. */ pr_debug("Skip test on hybrid systems"); From 36c70e44a37b84cbb4094bf6f5cdb01cfd6659df Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Mon, 30 Oct 2023 11:14:38 +0000 Subject: [PATCH 017/179] perf tools: Add the python_ext_build directory to .gitignore `python_ext_build` is the build directory for python.so, ignore it for cleaner git status. Signed-off-by: Yang Jihong Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231030111438.1357962-2-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore index f533e76fb480..ee5c14f3b8b1 100644 --- a/tools/perf/.gitignore +++ b/tools/perf/.gitignore @@ -49,3 +49,4 @@ libtraceevent/ libtraceevent_plugins/ fixdep Documentation/doc.dep +python_ext_build/ From b861fd7e0efc4dc2376e7212f59a9b32d1383c00 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Mon, 6 Nov 2023 10:16:27 +0100 Subject: [PATCH 018/179] perf tests offcpu: Adjust test case perf record offcpu profiling tests for s390 On s390 using linux-next the test case: 87: perf record offcpu profiling tests fails. The root cause is this command # ./perf record --off-cpu -e dummy -- ./perf bench sched messaging -l 10 # Running 'sched/messaging' benchmark: # 20 sender and receiver processes per group # 10 groups == 400 processes run Total time: 0.231 [sec] [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.077 MB perf.data (401 samples) ] # It does not generate 800+ sample entries, on s390 usually around 40[1-9], sometimes a few more, but never more than 450. The higher the number of CPUs the lower the number of samples. Looking at function chain: bench_sched_messaging() +--> group() the senders and receiver threads are created. The senders and receivers call function ready() which writes one bytes and wait for a reply using poll system() call. As context switches are counted, the function ready() will trigger a context switch when no input data is available after the write system call. The write system call does not trigger context switches when the data size is small. And writing 1000 bytes (10 iterations with 100 bytes) is not much and certainly won't block. The 400+ context switch on s390 occur when the some receiver/sender threads call ready() and wait for the response from function bench_sched_messaging() being kicked off. Lower the number of expected context switches to 400 to succeed on s390. Suggested-by: Namhyung Kim Signed-off-by: Ilya Leoshkevich Signed-off-by: Thomas Richter Acked-by: Namhyung Kim Cc: Heiko Carstens Cc: Sumanth Korikkar Cc: Sven Schnelle Cc: Vasily Gorbik Co-developed-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20231106091627.2022530-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/record_offcpu.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/tests/shell/record_offcpu.sh b/tools/perf/tests/shell/record_offcpu.sh index a1ef8f0d2b5c..67c925f3a15a 100755 --- a/tools/perf/tests/shell/record_offcpu.sh +++ b/tools/perf/tests/shell/record_offcpu.sh @@ -77,9 +77,9 @@ test_offcpu_child() { err=1 return fi - # each process waits for read and write, so it should be more than 800 events + # each process waits at least for poll, so it should be more than 400 events if ! perf report -i ${perfdata} -s comm -q -n -t ';' --percent-limit=90 | \ - awk -F ";" '{ if (NF > 3 && int($3) < 800) exit 1; }' + awk -F ";" '{ if (NF > 3 && int($3) < 400) exit 1; }' then echo "Child task off-cpu test [Failed invalid output]" err=1 From 33ce9fc4f8ddba30b7040bd01cea11080f40b344 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 6 Nov 2023 15:10:48 +0000 Subject: [PATCH 019/179] perf test: Add option to change objdump binary All of the other Perf subcommands that use objdump have an option to specify the binary, so add the same option to 'perf test'. This is useful if you have built the kernel with a different toolchain to the system one, where the system objdump may fail to disassemble vmlinux. Now this can be fixed with something like this: $ perf test --objdump llvm-objdump "object code reading" Reviewed-by: Ian Rogers Signed-off-by: James Clark Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Athira Jajeev Cc: Fangrui Song Cc: Ingo Molnar Cc: Jiri Olsa Cc: Kajol Jain Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Tom Rix Cc: Yang Jihong Cc: llvm@lists.linux.dev Link: https://lore.kernel.org/r/20231106151051.129440-2-james.clark@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/builtin-test.c | 3 +++ tools/perf/tests/code-reading.c | 2 +- tools/perf/tests/tests.h | 1 + 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index cb6f1dd00dc4..a8d17dd50588 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -32,6 +32,7 @@ static bool dont_fork; const char *dso_to_test; +const char *test_objdump_path = "objdump"; /* * List of architecture specific tests. Not a weak symbol as the array length is @@ -529,6 +530,8 @@ int cmd_test(int argc, const char **argv) "Do not fork for testcase"), OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"), OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"), + OPT_STRING(0, "objdump", &test_objdump_path, "path", + "objdump binary to use for disassembly and annotations"), OPT_END() }; const char * const test_subcommands[] = { "list", NULL }; diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index d6e845c57902..8620146d0378 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -185,7 +185,7 @@ static int read_via_objdump(const char *filename, u64 addr, void *buf, int ret; fmt = "%s -z -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s"; - ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len, + ret = snprintf(cmd, sizeof(cmd), fmt, test_objdump_path, addr, addr + len, filename); if (ret <= 0 || (size_t)ret >= sizeof(cmd)) return -1; diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h index b394f3ac2d66..dad3d7414142 100644 --- a/tools/perf/tests/tests.h +++ b/tools/perf/tests/tests.h @@ -207,5 +207,6 @@ DECLARE_WORKLOAD(brstack); DECLARE_WORKLOAD(datasym); extern const char *dso_to_test; +extern const char *test_objdump_path; #endif /* TESTS_H */ From 6aad765d10c5cd8a62b258c359bae643ab2d45da Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 6 Nov 2023 15:10:49 +0000 Subject: [PATCH 020/179] perf test: Add support for setting objdump binary via perf config Add a 'perf config' variable that does the same thing as "perf test --objdump ". Also update the man page. Committer testing: # perf config test.objdump # perf test "object code reading" 26: Object code reading : Ok # perf config test.objdump=blah # perf config test.objdump test.objdump=blah # perf test "object code reading" 26: Object code reading : FAILED! # perf test -v "object code reading" 26: Object code reading : --- start --- test child forked, pid 600599 Looking at the vmlinux_path (8 entries long) Using /proc/kcore for kernel data Using /proc/kallsyms for symbols Parsing event 'cycles' Using CPUID AuthenticAMD-25-21-0 mmap size 528384B Reading object code for memory address: 0x4d9a02 File is: /home/acme/bin/perf On file address is: 0xd9a02 Objdump command is: blah -z -d --start-address=0x4d9a02 --stop-address=0x4d9a82 /home/acme/bin/perf objdump read too few bytes: 128 Bytes read differ from those read by objdump buf1 (dso): 0x48 0x85 0xff 0x74 0x29 0xe8 0x94 0xdf 0x07 0x00 0x8b 0x73 0x1c 0x48 0x8b 0x43 0x08 0xeb 0xa5 0x0f 0x1f 0x00 0x48 0x8b 0x45 0xe8 0x64 0x48 0x2b 0x04 0x25 0x28 0x00 0x00 0x00 0x75 0x0f 0x48 0x8b 0x5d 0xf8 0xc9 0xc3 0x0f 0x1f 0x00 0x48 0x8b 0x43 0x08 0xeb 0x84 0xe8 0xc5 0x3e 0xf3 0xff 0x0f 0x1f 0x44 0x00 0x00 0x55 0x48 0x89 0xe5 0x41 0x56 0x41 0x55 0x49 0x89 0xd5 0x41 0x54 0x49 0x89 0xfc 0x53 0x48 0x89 0xf3 0x48 0x83 0xec 0x30 0x48 0x8b 0x7e 0x20 0x64 0x48 0x8b 0x04 0x25 0x28 0x00 0x00 0x00 0x48 0x89 0x45 0xd8 0x31 0xc0 0x48 0x89 0x75 0xb0 0x48 0xc7 0x45 0xb8 0x00 0x00 0x00 0x00 0x48 0xc7 0x45 0xc0 0x00 0x00 0x00 0x00 0xe8 0xad 0xfa buf2 (objdump): 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x00 test child finished with -1 ---- end ---- Object code reading: FAILED! # perf config test.objdump=/usr/bin/objdump # perf config test.objdump test.objdump=/usr/bin/objdump # perf test "object code reading" 26: Object code reading : Ok # Signed-off-by: James Clark Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Athira Jajeev Cc: Fangrui Song Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Tom Rix Cc: Yang Jihong Cc: Yonghong Song Cc: llvm@lists.linux.dev Link: https://lore.kernel.org/r/20231106151051.129440-3-james.clark@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-config.txt | 4 ++++ tools/perf/tests/builtin-test.c | 12 ++++++++++++ 2 files changed, 16 insertions(+) diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt index 0b4e79dbd3f6..16398babd1ef 100644 --- a/tools/perf/Documentation/perf-config.txt +++ b/tools/perf/Documentation/perf-config.txt @@ -722,6 +722,10 @@ session-.*:: Defines new record session for daemon. The value is record's command line without the 'record' keyword. +test.*:: + + test.objdump:: + objdump binary to use for disassembly and annotations. SEE ALSO -------- diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index a8d17dd50588..113e92119e1d 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -14,6 +14,7 @@ #include #include #include "builtin.h" +#include "config.h" #include "hist.h" #include "intlist.h" #include "tests.h" @@ -514,6 +515,15 @@ static int run_workload(const char *work, int argc, const char **argv) return -1; } +static int perf_test__config(const char *var, const char *value, + void *data __maybe_unused) +{ + if (!strcmp(var, "test.objdump")) + test_objdump_path = value; + + return 0; +} + int cmd_test(int argc, const char **argv) { const char *test_usage[] = { @@ -541,6 +551,8 @@ int cmd_test(int argc, const char **argv) if (ret < 0) return ret; + perf_config(perf_test__config, NULL); + /* Unbuffered output */ setvbuf(stdout, NULL, _IONBF, 0); From 6512b6aa237db36d881a81cc312db39668e61853 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 2 Nov 2023 10:56:54 -0700 Subject: [PATCH 021/179] perf bpf: Don't synthesize BPF events when disabled If BPF sideband events are disabled on the command line, don't synthesize BPF events too. Signed-off-by: Ian Rogers Acked-by: Song Liu Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231102175735.2272696-13-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/bpf-event.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c index 38fcf3ba5749..830711cae30d 100644 --- a/tools/perf/util/bpf-event.c +++ b/tools/perf/util/bpf-event.c @@ -386,6 +386,9 @@ int perf_event__synthesize_bpf_events(struct perf_session *session, int err; int fd; + if (opts->no_bpf_event) + return 0; + event = malloc(sizeof(event->bpf) + KSYM_NAME_LEN + machine->id_hdr_size); if (!event) return -1; From a399ee6773d6a0203f9bd764f8bd9d978878cef1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 9 Nov 2023 16:34:09 -0300 Subject: [PATCH 022/179] tools: Disable __packed attribute compiler warning due to -Werror=attributes Noticed on several perf tools cross build test containers: [perfbuilder@five ~]$ grep FAIL ~/dm.log/summary 19 10.18 debian:experimental-x-mips : FAIL gcc version 12.3.0 (Debian 12.3.0-6) 20 11.21 debian:experimental-x-mips64 : FAIL gcc version 12.3.0 (Debian 12.3.0-6) 21 11.30 debian:experimental-x-mipsel : FAIL gcc version 12.3.0 (Debian 12.3.0-6) 37 12.07 ubuntu:18.04-x-arm : FAIL gcc version 7.5.0 (Ubuntu/Linaro 7.5.0-3ubuntu1~18.04) 42 11.91 ubuntu:18.04-x-riscv64 : FAIL gcc version 7.5.0 (Ubuntu 7.5.0-3ubuntu1~18.04) 44 13.17 ubuntu:18.04-x-sh4 : FAIL gcc version 7.5.0 (Ubuntu 7.5.0-3ubuntu1~18.04) 45 12.09 ubuntu:18.04-x-sparc64 : FAIL gcc version 7.5.0 (Ubuntu 7.5.0-3ubuntu1~18.04) [perfbuilder@five ~]$ In file included from util/intel-pt-decoder/intel-pt-pkt-decoder.c:10: /tmp/perf-6.6.0-rc1/tools/include/asm-generic/unaligned.h: In function 'get_unaligned_le16': /tmp/perf-6.6.0-rc1/tools/include/asm-generic/unaligned.h:13:29: error: packed attribute causes inefficient alignment for 'x' [-Werror=attributes] 13 | const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \ | ^ /tmp/perf-6.6.0-rc1/tools/include/asm-generic/unaligned.h:27:28: note: in expansion of macro '__get_unaligned_t' 27 | return le16_to_cpu(__get_unaligned_t(__le16, p)); | ^~~~~~~~~~~~~~~~~ This comes from the kernel, where the -Wattributes and -Wpacked isn't used, -Wpacked is already disabled, do it for the attributes as well. Fixes: a91c987254651443 ("perf tools: Add get_unaligned_leNN()") Suggested-by: Adrian Hunter Cc: Ian Rogers Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/7c5b626c-1de9-4c12-a781-e44985b4a797@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/asm-generic/unaligned.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/include/asm-generic/unaligned.h b/tools/include/asm-generic/unaligned.h index 156743d399ae..2fd551915c20 100644 --- a/tools/include/asm-generic/unaligned.h +++ b/tools/include/asm-generic/unaligned.h @@ -8,6 +8,7 @@ */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wpacked" +#pragma GCC diagnostic ignored "-Wattributes" #define __get_unaligned_t(type, ptr) ({ \ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \ From dd678532f913c1a742f1a2add6adacfb7ae2b166 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 7 Nov 2023 14:03:31 +0530 Subject: [PATCH 023/179] perf header: Additional note on AMD IBS for max_precise pmu cap x86 core PMU exposes supported maximum precision level via max_precise PMU capability. Although, AMD core PMU does not support precise mode, certain core PMU events with precise_ip > 0 are allowed and forwarded to IBS OP PMU. Display a note about this in the 'perf report' header output and document the details in the perf-list man page. Signed-off-by: Ravi Bangoria Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ananth Narayan Cc: Changbin Du Cc: Huacai Chen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kan Liang Cc: Kim Phillips Cc: Mark Rutland Cc: Ming Wang Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ross Zwisler Cc: Sandipan Das Cc: Santosh Shukla Cc: Yang Jihong Link: https://lore.kernel.org/r/20231107083331.901-2-ravi.bangoria@amd.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-list.txt | 12 +++++++----- tools/perf/util/env.c | 18 ++++++++++++++++++ tools/perf/util/env.h | 2 ++ tools/perf/util/header.c | 8 ++++++++ 4 files changed, 35 insertions(+), 5 deletions(-) diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt index d5f78e125efe..1b90575ee3c8 100644 --- a/tools/perf/Documentation/perf-list.txt +++ b/tools/perf/Documentation/perf-list.txt @@ -81,11 +81,13 @@ For Intel systems precise event sampling is implemented with PEBS which supports up to precise-level 2, and precise level 3 for some special cases -On AMD systems it is implemented using IBS (up to precise-level 2). -The precise modifier works with event types 0x76 (cpu-cycles, CPU -clocks not halted) and 0xC1 (micro-ops retired). Both events map to -IBS execution sampling (IBS op) with the IBS Op Counter Control bit -(IbsOpCntCtl) set respectively (see the +On AMD systems it is implemented using IBS OP (up to precise-level 2). +Unlike Intel PEBS which provides levels of precision, AMD core pmu is +inherently non-precise and IBS is inherently precise. (i.e. ibs_op//, +ibs_op//p, ibs_op//pp and ibs_op//ppp are all same). The precise modifier +works with event types 0x76 (cpu-cycles, CPU clocks not halted) and 0xC1 +(micro-ops retired). Both events map to IBS execution sampling (IBS op) +with the IBS Op Counter Control bit (IbsOpCntCtl) set respectively (see the Core Complex (CCX) -> Processor x86 Core -> Instruction Based Sampling (IBS) section of the [AMD Processor Programming Reference (PPR)] relevant to the family, model and stepping of the processor being used). diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index 44140b7f596a..cbc18b22ace5 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -531,6 +531,24 @@ int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu) return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1; } +bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name) +{ + char *pmu_mapping = env->pmu_mappings, *colon; + + for (int i = 0; i < env->nr_pmu_mappings; ++i) { + if (strtoul(pmu_mapping, &colon, 0) == ULONG_MAX || *colon != ':') + goto out_error; + + pmu_mapping = colon + 1; + if (strcmp(pmu_mapping, pmu_name) == 0) + return true; + + pmu_mapping += strlen(pmu_mapping) + 1; + } +out_error: + return false; +} + char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name, const char *cap) { diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index 48d7f8759a2a..94596ff124d5 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -179,4 +179,6 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id); int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu); char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name, const char *cap); + +bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name); #endif /* __PERF_ENV_H */ diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index eeb96a1b63a7..1c687b5789c0 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2145,6 +2145,14 @@ static void print_pmu_caps(struct feat_fd *ff, FILE *fp) __print_pmu_caps(fp, pmu_caps->nr_caps, pmu_caps->caps, pmu_caps->pmu_name); } + + if (strcmp(perf_env__arch(&ff->ph->env), "x86") == 0 && + perf_env__has_pmu_mapping(&ff->ph->env, "ibs_op")) { + char *max_precise = perf_env__find_pmu_cap(&ff->ph->env, "cpu", "max_precise"); + + if (max_precise != NULL && atoi(max_precise) == 0) + fprintf(fp, "# AMD systems uses ibs_op// PMU for some precise events, e.g.: cycles:p, see the 'perf list' man page for further details.\n"); + } } static void print_pmu_mappings(struct feat_fd *ff, FILE *fp) From ded8c48497b825f15436048e8ea3a731a3f7dece Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 9 Nov 2023 15:59:20 -0800 Subject: [PATCH 024/179] perf annotate: Pass "-l" option to objdump conditionally The "-l" option is to print line numbers in the objdump output. perf annotate TUI only can show the line numbers later but it causes big slow downs for the kernel binary. Similarly, showing source code also takes a long time and it already has an option to control it. $ time objdump ... -d -S -C vmlinux > /dev/null real 0m3.474s user 0m3.047s sys 0m0.428s $ time objdump ... -d -l -C vmlinux > /dev/null real 0m1.796s user 0m1.459s sys 0m0.338s $ time objdump ... -d -C vmlinux > /dev/null real 0m0.051s user 0m0.036s sys 0m0.016s As it's not needed for data type profiling, let's make it conditional so that it can skip the unnecessary work. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Andi Kleen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu (Google) Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231110000012.3538610-2-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 9b68b8e3791c..118195c787b9 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -2144,12 +2144,13 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) err = asprintf(&command, "%s %s%s --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 - " -l -d %s %s %s %c%s%c %s%s -C \"$1\"", + " %s -d %s %s %s %c%s%c %s%s -C \"$1\"", opts->objdump_path ?: "objdump", opts->disassembler_style ? "-M " : "", opts->disassembler_style ?: "", map__rip_2objdump(map, sym->start), map__rip_2objdump(map, sym->end), + opts->show_linenr ? "-l" : "", opts->show_asm_raw ? "" : "--no-show-raw-insn", opts->annotate_src ? "-S" : "", opts->prefix ? "--prefix " : "", From fb7fd2a14a503b9a23fe10303923fc0605c22288 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 9 Nov 2023 15:59:21 -0800 Subject: [PATCH 025/179] perf annotate: Move raw_comment and raw_func_start fields out of 'struct ins_operands' Thoese two fields are used only for the jump_ops, so move them into the union to save some bytes. Also add jump__delete() callback not to free the fields as they didn't allocate new strings. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Andi Kleen Cc: Huacai Chen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu (Google) Cc: Peter Zijlstra Cc: Stephane Eranian Cc: WANG Rui Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231110000012.3538610-3-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- .../perf/arch/loongarch/annotate/instructions.c | 6 +++--- tools/perf/util/annotate.c | 17 +++++++++++++---- tools/perf/util/annotate.h | 6 ++++-- 3 files changed, 20 insertions(+), 9 deletions(-) diff --git a/tools/perf/arch/loongarch/annotate/instructions.c b/tools/perf/arch/loongarch/annotate/instructions.c index 98e19c5366ac..21cc7e4149f7 100644 --- a/tools/perf/arch/loongarch/annotate/instructions.c +++ b/tools/perf/arch/loongarch/annotate/instructions.c @@ -61,10 +61,10 @@ static int loongarch_jump__parse(struct arch *arch, struct ins_operands *ops, st const char *c = strchr(ops->raw, '#'); u64 start, end; - ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char); - ops->raw_func_start = strchr(ops->raw, '<'); + ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char); + ops->jump.raw_func_start = strchr(ops->raw, '<'); - if (ops->raw_func_start && c > ops->raw_func_start) + if (ops->jump.raw_func_start && c > ops->jump.raw_func_start) c = NULL; if (c++ != NULL) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 118195c787b9..3364edf30f50 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -340,10 +340,10 @@ bool ins__is_call(const struct ins *ins) */ static inline const char *validate_comma(const char *c, struct ins_operands *ops) { - if (ops->raw_comment && c > ops->raw_comment) + if (ops->jump.raw_comment && c > ops->jump.raw_comment) return NULL; - if (ops->raw_func_start && c > ops->raw_func_start) + if (ops->jump.raw_func_start && c > ops->jump.raw_func_start) return NULL; return c; @@ -359,8 +359,8 @@ static int jump__parse(struct arch *arch, struct ins_operands *ops, struct map_s const char *c = strchr(ops->raw, ','); u64 start, end; - ops->raw_comment = strchr(ops->raw, arch->objdump.comment_char); - ops->raw_func_start = strchr(ops->raw, '<'); + ops->jump.raw_comment = strchr(ops->raw, arch->objdump.comment_char); + ops->jump.raw_func_start = strchr(ops->raw, '<'); c = validate_comma(c, ops); @@ -462,7 +462,16 @@ static int jump__scnprintf(struct ins *ins, char *bf, size_t size, ops->target.offset); } +static void jump__delete(struct ins_operands *ops __maybe_unused) +{ + /* + * The ops->jump.raw_comment and ops->jump.raw_func_start belong to the + * raw string, don't free them. + */ +} + static struct ins_ops jump_ops = { + .free = jump__delete, .parse = jump__parse, .scnprintf = jump__scnprintf, }; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index de59c1aff08e..bc8b95e8b1be 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -31,8 +31,6 @@ struct ins { struct ins_operands { char *raw; - char *raw_comment; - char *raw_func_start; struct { char *raw; char *name; @@ -52,6 +50,10 @@ struct ins_operands { struct ins ins; struct ins_operands *ops; } locked; + struct { + char *raw_comment; + char *raw_func_start; + } jump; }; }; From 6f1b6291cf73cb3223f6fb9ec16862a5fe7ed957 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 9 Nov 2023 15:59:22 -0800 Subject: [PATCH 026/179] perf tools: Add util/debuginfo.[ch] files Split debuginfo data structure and related functions into a separate file so that it can be used by other components than the probe-finder. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Andi Kleen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu (Google) Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231110000012.3538610-4-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/Build | 1 + tools/perf/util/debuginfo.c | 205 +++++++++++++++++++++++++++++++++ tools/perf/util/debuginfo.h | 64 ++++++++++ tools/perf/util/probe-finder.c | 193 +------------------------------ tools/perf/util/probe-finder.h | 19 +-- 5 files changed, 272 insertions(+), 210 deletions(-) create mode 100644 tools/perf/util/debuginfo.c create mode 100644 tools/perf/util/debuginfo.h diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 96058f949ec9..73e3f194f949 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -195,6 +195,7 @@ endif perf-$(CONFIG_DWARF) += probe-finder.o perf-$(CONFIG_DWARF) += dwarf-aux.o perf-$(CONFIG_DWARF) += dwarf-regs.o +perf-$(CONFIG_DWARF) += debuginfo.o perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o diff --git a/tools/perf/util/debuginfo.c b/tools/perf/util/debuginfo.c new file mode 100644 index 000000000000..19acf4775d35 --- /dev/null +++ b/tools/perf/util/debuginfo.c @@ -0,0 +1,205 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * DWARF debug information handling code. Copied from probe-finder.c. + * + * Written by Masami Hiramatsu + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "build-id.h" +#include "dso.h" +#include "debug.h" +#include "debuginfo.h" +#include "symbol.h" + +#ifdef HAVE_DEBUGINFOD_SUPPORT +#include +#endif + +/* Dwarf FL wrappers */ +static char *debuginfo_path; /* Currently dummy */ + +static const Dwfl_Callbacks offline_callbacks = { + .find_debuginfo = dwfl_standard_find_debuginfo, + .debuginfo_path = &debuginfo_path, + + .section_address = dwfl_offline_section_address, + + /* We use this table for core files too. */ + .find_elf = dwfl_build_id_find_elf, +}; + +/* Get a Dwarf from offline image */ +static int debuginfo__init_offline_dwarf(struct debuginfo *dbg, + const char *path) +{ + GElf_Addr dummy; + int fd; + + fd = open(path, O_RDONLY); + if (fd < 0) + return fd; + + dbg->dwfl = dwfl_begin(&offline_callbacks); + if (!dbg->dwfl) + goto error; + + dwfl_report_begin(dbg->dwfl); + dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd); + if (!dbg->mod) + goto error; + + dbg->dbg = dwfl_module_getdwarf(dbg->mod, &dbg->bias); + if (!dbg->dbg) + goto error; + + dwfl_module_build_id(dbg->mod, &dbg->build_id, &dummy); + + dwfl_report_end(dbg->dwfl, NULL, NULL); + + return 0; +error: + if (dbg->dwfl) + dwfl_end(dbg->dwfl); + else + close(fd); + memset(dbg, 0, sizeof(*dbg)); + + return -ENOENT; +} + +static struct debuginfo *__debuginfo__new(const char *path) +{ + struct debuginfo *dbg = zalloc(sizeof(*dbg)); + if (!dbg) + return NULL; + + if (debuginfo__init_offline_dwarf(dbg, path) < 0) + zfree(&dbg); + if (dbg) + pr_debug("Open Debuginfo file: %s\n", path); + return dbg; +} + +enum dso_binary_type distro_dwarf_types[] = { + DSO_BINARY_TYPE__FEDORA_DEBUGINFO, + DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, + DSO_BINARY_TYPE__BUILDID_DEBUGINFO, + DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, + DSO_BINARY_TYPE__NOT_FOUND, +}; + +struct debuginfo *debuginfo__new(const char *path) +{ + enum dso_binary_type *type; + char buf[PATH_MAX], nil = '\0'; + struct dso *dso; + struct debuginfo *dinfo = NULL; + struct build_id bid; + + /* Try to open distro debuginfo files */ + dso = dso__new(path); + if (!dso) + goto out; + + /* Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO */ + if (is_regular_file(path) && filename__read_build_id(path, &bid) > 0) + dso__set_build_id(dso, &bid); + + for (type = distro_dwarf_types; + !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND; + type++) { + if (dso__read_binary_type_filename(dso, *type, &nil, + buf, PATH_MAX) < 0) + continue; + dinfo = __debuginfo__new(buf); + } + dso__put(dso); + +out: + /* if failed to open all distro debuginfo, open given binary */ + return dinfo ? : __debuginfo__new(path); +} + +void debuginfo__delete(struct debuginfo *dbg) +{ + if (dbg) { + if (dbg->dwfl) + dwfl_end(dbg->dwfl); + free(dbg); + } +} + +/* For the kernel module, we need a special code to get a DIE */ +int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, + bool adjust_offset) +{ + int n, i; + Elf32_Word shndx; + Elf_Scn *scn; + Elf *elf; + GElf_Shdr mem, *shdr; + const char *p; + + elf = dwfl_module_getelf(dbg->mod, &dbg->bias); + if (!elf) + return -EINVAL; + + /* Get the number of relocations */ + n = dwfl_module_relocations(dbg->mod); + if (n < 0) + return -ENOENT; + /* Search the relocation related .text section */ + for (i = 0; i < n; i++) { + p = dwfl_module_relocation_info(dbg->mod, i, &shndx); + if (strcmp(p, ".text") == 0) { + /* OK, get the section header */ + scn = elf_getscn(elf, shndx); + if (!scn) + return -ENOENT; + shdr = gelf_getshdr(scn, &mem); + if (!shdr) + return -ENOENT; + *offs = shdr->sh_addr; + if (adjust_offset) + *offs -= shdr->sh_offset; + } + } + return 0; +} + +#ifdef HAVE_DEBUGINFOD_SUPPORT +int get_source_from_debuginfod(const char *raw_path, + const char *sbuild_id, char **new_path) +{ + debuginfod_client *c = debuginfod_begin(); + const char *p = raw_path; + int fd; + + if (!c) + return -ENOMEM; + + fd = debuginfod_find_source(c, (const unsigned char *)sbuild_id, + 0, p, new_path); + pr_debug("Search %s from debuginfod -> %d\n", p, fd); + if (fd >= 0) + close(fd); + debuginfod_end(c); + if (fd < 0) { + pr_debug("Failed to find %s in debuginfod (%s)\n", + raw_path, sbuild_id); + return -ENOENT; + } + pr_debug("Got a source %s\n", *new_path); + + return 0; +} +#endif /* HAVE_DEBUGINFOD_SUPPORT */ diff --git a/tools/perf/util/debuginfo.h b/tools/perf/util/debuginfo.h new file mode 100644 index 000000000000..4d65b8c605fc --- /dev/null +++ b/tools/perf/util/debuginfo.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _PERF_DEBUGINFO_H +#define _PERF_DEBUGINFO_H + +#include +#include + +#ifdef HAVE_DWARF_SUPPORT + +#include "dwarf-aux.h" + +/* debug information structure */ +struct debuginfo { + Dwarf *dbg; + Dwfl_Module *mod; + Dwfl *dwfl; + Dwarf_Addr bias; + const unsigned char *build_id; +}; + +/* This also tries to open distro debuginfo */ +struct debuginfo *debuginfo__new(const char *path); +void debuginfo__delete(struct debuginfo *dbg); + +int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, + bool adjust_offset); + +#else /* HAVE_DWARF_SUPPORT */ + +/* dummy debug information structure */ +struct debuginfo { +}; + +static inline struct debuginfo *debuginfo__new(const char *path __maybe_unused) +{ + return NULL; +} + +static inline void debuginfo__delete(struct debuginfo *dbg __maybe_unused) +{ +} + +static inline int debuginfo__get_text_offset(struct debuginfo *dbg __maybe_unused, + Dwarf_Addr *offs __maybe_unused, + bool adjust_offset __maybe_unused) +{ + return -EINVAL; +} + +#endif /* HAVE_DWARF_SUPPORT */ + +#ifdef HAVE_DEBUGINFOD_SUPPORT +int get_source_from_debuginfod(const char *raw_path, const char *sbuild_id, + char **new_path); +#else /* HAVE_DEBUGINFOD_SUPPORT */ +static inline int get_source_from_debuginfod(const char *raw_path __maybe_unused, + const char *sbuild_id __maybe_unused, + char **new_path __maybe_unused) +{ + return -ENOTSUP; +} +#endif /* HAVE_DEBUGINFOD_SUPPORT */ + +#endif /* _PERF_DEBUGINFO_H */ diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index f171360b0ef4..8d3dd85f9ff4 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -23,6 +23,7 @@ #include "event.h" #include "dso.h" #include "debug.h" +#include "debuginfo.h" #include "intlist.h" #include "strbuf.h" #include "strlist.h" @@ -31,128 +32,9 @@ #include "probe-file.h" #include "string2.h" -#ifdef HAVE_DEBUGINFOD_SUPPORT -#include -#endif - /* Kprobe tracer basic type is up to u64 */ #define MAX_BASIC_TYPE_BITS 64 -/* Dwarf FL wrappers */ -static char *debuginfo_path; /* Currently dummy */ - -static const Dwfl_Callbacks offline_callbacks = { - .find_debuginfo = dwfl_standard_find_debuginfo, - .debuginfo_path = &debuginfo_path, - - .section_address = dwfl_offline_section_address, - - /* We use this table for core files too. */ - .find_elf = dwfl_build_id_find_elf, -}; - -/* Get a Dwarf from offline image */ -static int debuginfo__init_offline_dwarf(struct debuginfo *dbg, - const char *path) -{ - GElf_Addr dummy; - int fd; - - fd = open(path, O_RDONLY); - if (fd < 0) - return fd; - - dbg->dwfl = dwfl_begin(&offline_callbacks); - if (!dbg->dwfl) - goto error; - - dwfl_report_begin(dbg->dwfl); - dbg->mod = dwfl_report_offline(dbg->dwfl, "", "", fd); - if (!dbg->mod) - goto error; - - dbg->dbg = dwfl_module_getdwarf(dbg->mod, &dbg->bias); - if (!dbg->dbg) - goto error; - - dwfl_module_build_id(dbg->mod, &dbg->build_id, &dummy); - - dwfl_report_end(dbg->dwfl, NULL, NULL); - - return 0; -error: - if (dbg->dwfl) - dwfl_end(dbg->dwfl); - else - close(fd); - memset(dbg, 0, sizeof(*dbg)); - - return -ENOENT; -} - -static struct debuginfo *__debuginfo__new(const char *path) -{ - struct debuginfo *dbg = zalloc(sizeof(*dbg)); - if (!dbg) - return NULL; - - if (debuginfo__init_offline_dwarf(dbg, path) < 0) - zfree(&dbg); - if (dbg) - pr_debug("Open Debuginfo file: %s\n", path); - return dbg; -} - -enum dso_binary_type distro_dwarf_types[] = { - DSO_BINARY_TYPE__FEDORA_DEBUGINFO, - DSO_BINARY_TYPE__UBUNTU_DEBUGINFO, - DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO, - DSO_BINARY_TYPE__BUILDID_DEBUGINFO, - DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO, - DSO_BINARY_TYPE__NOT_FOUND, -}; - -struct debuginfo *debuginfo__new(const char *path) -{ - enum dso_binary_type *type; - char buf[PATH_MAX], nil = '\0'; - struct dso *dso; - struct debuginfo *dinfo = NULL; - struct build_id bid; - - /* Try to open distro debuginfo files */ - dso = dso__new(path); - if (!dso) - goto out; - - /* Set the build id for DSO_BINARY_TYPE__BUILDID_DEBUGINFO */ - if (is_regular_file(path) && filename__read_build_id(path, &bid) > 0) - dso__set_build_id(dso, &bid); - - for (type = distro_dwarf_types; - !dinfo && *type != DSO_BINARY_TYPE__NOT_FOUND; - type++) { - if (dso__read_binary_type_filename(dso, *type, &nil, - buf, PATH_MAX) < 0) - continue; - dinfo = __debuginfo__new(buf); - } - dso__put(dso); - -out: - /* if failed to open all distro debuginfo, open given binary */ - return dinfo ? : __debuginfo__new(path); -} - -void debuginfo__delete(struct debuginfo *dbg) -{ - if (dbg) { - if (dbg->dwfl) - dwfl_end(dbg->dwfl); - free(dbg); - } -} - /* * Probe finder related functions */ @@ -1677,44 +1559,6 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg, return (ret < 0) ? ret : af.nvls; } -/* For the kernel module, we need a special code to get a DIE */ -int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, - bool adjust_offset) -{ - int n, i; - Elf32_Word shndx; - Elf_Scn *scn; - Elf *elf; - GElf_Shdr mem, *shdr; - const char *p; - - elf = dwfl_module_getelf(dbg->mod, &dbg->bias); - if (!elf) - return -EINVAL; - - /* Get the number of relocations */ - n = dwfl_module_relocations(dbg->mod); - if (n < 0) - return -ENOENT; - /* Search the relocation related .text section */ - for (i = 0; i < n; i++) { - p = dwfl_module_relocation_info(dbg->mod, i, &shndx); - if (strcmp(p, ".text") == 0) { - /* OK, get the section header */ - scn = elf_getscn(elf, shndx); - if (!scn) - return -ENOENT; - shdr = gelf_getshdr(scn, &mem); - if (!shdr) - return -ENOENT; - *offs = shdr->sh_addr; - if (adjust_offset) - *offs -= shdr->sh_offset; - } - } - return 0; -} - /* Reverse search */ int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr, struct perf_probe_point *ppt) @@ -2009,41 +1853,6 @@ int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr) return (ret < 0) ? ret : lf.found; } -#ifdef HAVE_DEBUGINFOD_SUPPORT -/* debuginfod doesn't require the comp_dir but buildid is required */ -static int get_source_from_debuginfod(const char *raw_path, - const char *sbuild_id, char **new_path) -{ - debuginfod_client *c = debuginfod_begin(); - const char *p = raw_path; - int fd; - - if (!c) - return -ENOMEM; - - fd = debuginfod_find_source(c, (const unsigned char *)sbuild_id, - 0, p, new_path); - pr_debug("Search %s from debuginfod -> %d\n", p, fd); - if (fd >= 0) - close(fd); - debuginfod_end(c); - if (fd < 0) { - pr_debug("Failed to find %s in debuginfod (%s)\n", - raw_path, sbuild_id); - return -ENOENT; - } - pr_debug("Got a source %s\n", *new_path); - - return 0; -} -#else -static inline int get_source_from_debuginfod(const char *raw_path __maybe_unused, - const char *sbuild_id __maybe_unused, - char **new_path __maybe_unused) -{ - return -ENOTSUP; -} -#endif /* * Find a src file from a DWARF tag path. Prepend optional source path prefix * and chop off leading directories that do not exist. Result is passed back as diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h index 8bc1c80d3c1c..3add5ff516e1 100644 --- a/tools/perf/util/probe-finder.h +++ b/tools/perf/util/probe-finder.h @@ -24,21 +24,7 @@ static inline int is_c_varname(const char *name) #ifdef HAVE_DWARF_SUPPORT #include "dwarf-aux.h" - -/* TODO: export debuginfo data structure even if no dwarf support */ - -/* debug information structure */ -struct debuginfo { - Dwarf *dbg; - Dwfl_Module *mod; - Dwfl *dwfl; - Dwarf_Addr bias; - const unsigned char *build_id; -}; - -/* This also tries to open distro debuginfo */ -struct debuginfo *debuginfo__new(const char *path); -void debuginfo__delete(struct debuginfo *dbg); +#include "debuginfo.h" /* Find probe_trace_events specified by perf_probe_event from debuginfo */ int debuginfo__find_trace_events(struct debuginfo *dbg, @@ -49,9 +35,6 @@ int debuginfo__find_trace_events(struct debuginfo *dbg, int debuginfo__find_probe_point(struct debuginfo *dbg, u64 addr, struct perf_probe_point *ppt); -int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs, - bool adjust_offset); - /* Find a line range */ int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr); From a65e8c0b7855e951138eff077af4a6a8721a7ef6 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 9 Nov 2023 15:59:23 -0800 Subject: [PATCH 027/179] perf dwarf-aux: Fix die_get_typename() for void * The die_get_typename() is to return a C-like type name from DWARF debug entry and it follows data type if the target entry is a pointer type. But I found that void pointers don't have the type attribute to follow and then the function returns an error for that case. This results in a broken type string for void pointer types. For example, the following type entries are pointer types. <1><48c>: Abbrev Number: 4 (DW_TAG_pointer_type) <48d> DW_AT_byte_size : 8 <48d> DW_AT_type : <0x481> <1><491>: Abbrev Number: 211 (DW_TAG_pointer_type) <493> DW_AT_byte_size : 8 <1><494>: Abbrev Number: 4 (DW_TAG_pointer_type) <495> DW_AT_byte_size : 8 <495> DW_AT_type : <0x49e> The first one at offset 48c and the third one at offset 494 have type information. Then they are pointer types for the referenced types. But the second one at offset 491 doesn't have the type attribute. Signed-off-by: Namhyung Kim Acked-by: Masami Hiramatsu (Google) Cc: Adrian Hunter Cc: Andi Kleen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231110000012.3538610-5-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/dwarf-aux.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 2941d88f2199..4849c3bbfd95 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -1090,7 +1090,14 @@ int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf) return strbuf_addf(buf, "%s%s", tmp, name ?: ""); } ret = die_get_typename(&type, buf); - return ret ? ret : strbuf_addstr(buf, tmp); + if (ret < 0) { + /* void pointer has no type attribute */ + if (tag == DW_TAG_pointer_type && ret == -ENOENT) + return strbuf_addf(buf, "void*"); + + return ret; + } + return strbuf_addstr(buf, tmp); } /** From 3796eba7c137e073d1caa95b48d4a320fd2d9600 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 9 Nov 2023 15:59:24 -0800 Subject: [PATCH 028/179] perf dwarf-aux: Move #else block of #ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT code to the header file It's a usual convention that the conditional code is handled in a header file. As I'm planning to add some more of them, let's move the current code to the header first. Signed-off-by: Namhyung Kim Acked-by: Masami Hiramatsu (Google) Cc: Adrian Hunter Cc: Andi Kleen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231110000012.3538610-6-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/dwarf-aux.c | 7 ------- tools/perf/util/dwarf-aux.h | 19 +++++++++++++++++-- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 4849c3bbfd95..adef2635587d 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -1245,13 +1245,6 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf) out: return ret; } -#else -int die_get_var_range(Dwarf_Die *sp_die __maybe_unused, - Dwarf_Die *vr_die __maybe_unused, - struct strbuf *buf __maybe_unused) -{ - return -ENOTSUP; -} #endif /* diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index 7ec8bc1083bb..4f5d0211ee4f 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -121,7 +121,6 @@ int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf); /* Get the name and type of given variable DIE, stored as "type\tname" */ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf); -int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf); /* Check if target program is compiled with optimization */ bool die_is_optimized_target(Dwarf_Die *cu_die); @@ -130,4 +129,20 @@ bool die_is_optimized_target(Dwarf_Die *cu_die); void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die, Dwarf_Addr *entrypc); -#endif +#ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT + +/* Get byte offset range of given variable DIE */ +int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf); + +#else /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ + +static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused, + Dwarf_Die *vr_die __maybe_unused, + struct strbuf *buf __maybe_unused) +{ + return -ENOTSUP; +} + +#endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ + +#endif /* _DWARF_AUX_H */ From 981620fd2776c45a514ed621a718e2a6941ad7c5 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 9 Nov 2023 15:59:25 -0800 Subject: [PATCH 029/179] perf dwarf-aux: Add die_get_scopes() alternative to dwarf_getscopes() The die_get_scopes() returns the number of enclosing DIEs for the given address and it fills an array of DIEs like dwarf_getscopes(). But it doesn't follow the abstract origin of inlined functions as we want information of the concrete instance. This is needed to check the location of parameters and local variables properly. Users can check the origin separately if needed. Signed-off-by: Namhyung Kim Acked-by: Masami Hiramatsu (Google) Cc: Adrian Hunter Cc: Andi Kleen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231110000012.3538610-7-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/dwarf-aux.c | 53 +++++++++++++++++++++++++++++++++++++ tools/perf/util/dwarf-aux.h | 3 +++ 2 files changed, 56 insertions(+) diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index adef2635587d..10aa32334d6f 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -1425,3 +1425,56 @@ void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die, *entrypc = postprologue_addr; } + +/* Internal parameters for __die_find_scope_cb() */ +struct find_scope_data { + /* Target instruction address */ + Dwarf_Addr pc; + /* Number of scopes found [output] */ + int nr; + /* Array of scopes found, 0 for the outermost one. [output] */ + Dwarf_Die *scopes; +}; + +static int __die_find_scope_cb(Dwarf_Die *die_mem, void *arg) +{ + struct find_scope_data *data = arg; + + if (dwarf_haspc(die_mem, data->pc)) { + Dwarf_Die *tmp; + + tmp = realloc(data->scopes, (data->nr + 1) * sizeof(*tmp)); + if (tmp == NULL) + return DIE_FIND_CB_END; + + memcpy(tmp + data->nr, die_mem, sizeof(*die_mem)); + data->scopes = tmp; + data->nr++; + return DIE_FIND_CB_CHILD; + } + return DIE_FIND_CB_SIBLING; +} + +/** + * die_get_scopes - Return a list of scopes including the address + * @cu_die: a compile unit DIE + * @pc: the address to find + * @scopes: the array of DIEs for scopes (result) + * + * This function does the same as the dwarf_getscopes() but doesn't follow + * the origins of inlined functions. It returns the number of scopes saved + * in the @scopes argument. The outer scope will be saved first (index 0) and + * the last one is the innermost scope at the @pc. + */ +int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes) +{ + struct find_scope_data data = { + .pc = pc, + }; + Dwarf_Die die_mem; + + die_find_child(cu_die, __die_find_scope_cb, &data, &die_mem); + + *scopes = data.scopes; + return data.nr; +} diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index 4f5d0211ee4f..f9d765f80fb0 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -129,6 +129,9 @@ bool die_is_optimized_target(Dwarf_Die *cu_die); void die_skip_prologue(Dwarf_Die *sp_die, Dwarf_Die *cu_die, Dwarf_Addr *entrypc); +/* Get the list of including scopes */ +int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes); + #ifdef HAVE_DWARF_GETLOCATIONS_SUPPORT /* Get byte offset range of given variable DIE */ From 3f5928e461e394d27bc1ed7d0785b1e63c43d6a1 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 9 Nov 2023 15:59:26 -0800 Subject: [PATCH 030/179] perf dwarf-aux: Add die_find_variable_by_reg() helper The die_find_variable_by_reg() will search for a variable or a parameter sub-DIE in the given scope DIE where the location matches to the given register. For the simplest and most common case, memory access usually happens with a base register and an offset to the field so the register holds a pointer in a variable or function parameter. Then we can find one if it has a location expression at the (instruction) address. This function only handles such a simple case for now. In this case, the expression has a DW_OP_regN operation where N < 32. If the register index (N) is greater than or equal to 32, DW_OP_regx operation with an operand which saves the value for the N would be used. It rejects expressions with more operations. Signed-off-by: Namhyung Kim Acked-by: Masami Hiramatsu (Google) Cc: Adrian Hunter Cc: Andi Kleen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231110000012.3538610-8-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/dwarf-aux.c | 67 +++++++++++++++++++++++++++++++++++++ tools/perf/util/dwarf-aux.h | 12 +++++++ 2 files changed, 79 insertions(+) diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 10aa32334d6f..652e6e7368a2 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -1245,6 +1245,73 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf) out: return ret; } + +/* Interval parameters for __die_find_var_reg_cb() */ +struct find_var_data { + /* Target instruction address */ + Dwarf_Addr pc; + /* Target register */ + unsigned reg; +}; + +/* Max number of registers DW_OP_regN supports */ +#define DWARF_OP_DIRECT_REGS 32 + +/* Only checks direct child DIEs in the given scope. */ +static int __die_find_var_reg_cb(Dwarf_Die *die_mem, void *arg) +{ + struct find_var_data *data = arg; + int tag = dwarf_tag(die_mem); + ptrdiff_t off = 0; + Dwarf_Attribute attr; + Dwarf_Addr base, start, end; + Dwarf_Op *ops; + size_t nops; + + if (tag != DW_TAG_variable && tag != DW_TAG_formal_parameter) + return DIE_FIND_CB_SIBLING; + + if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL) + return DIE_FIND_CB_SIBLING; + + while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) { + /* Assuming the location list is sorted by address */ + if (end < data->pc) + continue; + if (start > data->pc) + break; + + /* Only match with a simple case */ + if (data->reg < DWARF_OP_DIRECT_REGS) { + if (ops->atom == (DW_OP_reg0 + data->reg) && nops == 1) + return DIE_FIND_CB_END; + } else { + if (ops->atom == DW_OP_regx && ops->number == data->reg && + nops == 1) + return DIE_FIND_CB_END; + } + } + return DIE_FIND_CB_SIBLING; +} + +/** + * die_find_variable_by_reg - Find a variable saved in a register + * @sc_die: a scope DIE + * @pc: the program address to find + * @reg: the register number to find + * @die_mem: a buffer to save the resulting DIE + * + * Find the variable DIE accessed by the given register. + */ +Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg, + Dwarf_Die *die_mem) +{ + struct find_var_data data = { + .pc = pc, + .reg = reg, + }; + return die_find_child(sc_die, __die_find_var_reg_cb, &data, die_mem); +} #endif /* diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index f9d765f80fb0..b6f430730bd1 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -137,6 +137,10 @@ int die_get_scopes(Dwarf_Die *cu_die, Dwarf_Addr pc, Dwarf_Die **scopes); /* Get byte offset range of given variable DIE */ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf); +/* Find a variable saved in the 'reg' at given address */ +Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg, + Dwarf_Die *die_mem); + #else /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused, @@ -146,6 +150,14 @@ static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused, return -ENOTSUP; } +static inline Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die __maybe_unused, + Dwarf_Addr pc __maybe_unused, + int reg __maybe_unused, + Dwarf_Die *die_mem __maybe_unused) +{ + return NULL; +} + #endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ #endif /* _DWARF_AUX_H */ From f67f2fda7d997a43e3323aec88d76f1b5e0ea5f2 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 9 Nov 2023 15:59:27 -0800 Subject: [PATCH 031/179] perf build: Add feature check for dwarf_getcfi() The dwarf_getcfi() is available on libdw 0.142+. Instead of just checking the version number, it'd be nice to have a config item to check the feature at build time. Suggested-by: Masami Hiramatsu (Google) Signed-off-by: Namhyung Kim Acked-by: Masami Hiramatsu (Google) Cc: Adrian Hunter Cc: Andi Kleen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231110000012.3538610-9-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/build/Makefile.feature | 1 + tools/build/feature/Makefile | 4 ++++ tools/build/feature/test-dwarf_getcfi.c | 9 +++++++++ 3 files changed, 14 insertions(+) create mode 100644 tools/build/feature/test-dwarf_getcfi.c diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 934e2777a2db..64df118376df 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -32,6 +32,7 @@ FEATURE_TESTS_BASIC := \ backtrace \ dwarf \ dwarf_getlocations \ + dwarf_getcfi \ eventfd \ fortify-source \ get_current_dir_name \ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index dad79ede4e0a..37722e509eb9 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -7,6 +7,7 @@ FILES= \ test-bionic.bin \ test-dwarf.bin \ test-dwarf_getlocations.bin \ + test-dwarf_getcfi.bin \ test-eventfd.bin \ test-fortify-source.bin \ test-get_current_dir_name.bin \ @@ -154,6 +155,9 @@ $(OUTPUT)test-dwarf.bin: $(OUTPUT)test-dwarf_getlocations.bin: $(BUILD) $(DWARFLIBS) +$(OUTPUT)test-dwarf_getcfi.bin: + $(BUILD) $(DWARFLIBS) + $(OUTPUT)test-libelf-getphdrnum.bin: $(BUILD) -lelf diff --git a/tools/build/feature/test-dwarf_getcfi.c b/tools/build/feature/test-dwarf_getcfi.c new file mode 100644 index 000000000000..50e7d7cb7bdf --- /dev/null +++ b/tools/build/feature/test-dwarf_getcfi.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +int main(void) +{ + Dwarf *dwarf = NULL; + return dwarf_getcfi(dwarf) == NULL; +} From c06547d02094e7eb81389e9485a45d91cc21914c Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 9 Nov 2023 15:59:28 -0800 Subject: [PATCH 032/179] perf probe: Convert to check dwarf_getcfi feature Now it has a feature check for the dwarf_getcfi(), use it and convert the code to check HAVE_DWARF_CFI_SUPPORT definition. Suggested-by: Masami Hiramatsu (Google) Signed-off-by: Namhyung Kim Acked-by: Masami Hiramatsu (Google) Cc: Adrian Hunter Cc: Andi Kleen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231110000012.3538610-10-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.config | 5 +++++ tools/perf/util/probe-finder.c | 8 ++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 8b6cffbc4858..aa55850fbc21 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -476,6 +476,11 @@ else else CFLAGS += -DHAVE_DWARF_GETLOCATIONS_SUPPORT endif # dwarf_getlocations + ifneq ($(feature-dwarf_getcfi), 1) + msg := $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.142); + else + CFLAGS += -DHAVE_DWARF_CFI_SUPPORT + endif # dwarf_getcfi endif # Dwarf support endif # libelf support endif # NO_LIBELF diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 8d3dd85f9ff4..c8923375e30d 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -604,7 +604,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf) ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1); if (ret <= 0 || nops == 0) { pf->fb_ops = NULL; -#if _ELFUTILS_PREREQ(0, 142) +#ifdef HAVE_DWARF_CFI_SUPPORT } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa && (pf->cfi_eh != NULL || pf->cfi_dbg != NULL)) { if ((dwarf_cfi_addrframe(pf->cfi_eh, pf->addr, &frame) != 0 && @@ -615,7 +615,7 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf) free(frame); return -ENOENT; } -#endif +#endif /* HAVE_DWARF_CFI_SUPPORT */ } /* Call finder's callback handler */ @@ -1140,7 +1140,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg, pf->machine = ehdr.e_machine; -#if _ELFUTILS_PREREQ(0, 142) +#ifdef HAVE_DWARF_CFI_SUPPORT do { GElf_Shdr shdr; @@ -1150,7 +1150,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg, pf->cfi_dbg = dwarf_getcfi(dbg->dbg); } while (0); -#endif +#endif /* HAVE_DWARF_CFI_SUPPORT */ ret = debuginfo__find_probe_location(dbg, pf); return ret; From b539deafbadb2fc6ba79307a797196454b14f501 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Fri, 10 Nov 2023 12:09:08 +0100 Subject: [PATCH 033/179] perf report: Add s390 raw data interpretation for PAI counters Commit 39d62336f5c126ad ("s390/pai: add support for cryptography counters") added support for Processor Activity Instrumentation Facility (PAI) counters. These counters values are added as raw data with the perf sample during 'perf record'. Now add support to display these counters in the 'perf report' command. The counter number, its assigned name and value is now printed in addition to the hexadecimal output. Output before: # perf report -D 6 514766399626050 0x7b058 [0x48]: PERF_RECORD_SAMPLE(IP, 0x1): 303977/303977: 0 period: 1 addr: 0 ... thread: paitest:303977 ...... dso: 0x7b0a0@/root/perf.data.paicrypto [0x48]: event: 9 . . ... raw event: size 72 bytes . 0000: 00 00 00 09 00 01 00 48 00 00 00 00 00 00 00 00 .......H........ . 0010: 00 04 a3 69 00 04 a3 69 00 01 d4 2d 76 de a0 bb ...i...i...-v... . 0020: 00 00 00 00 00 01 5c 53 00 00 00 06 00 00 00 00 ......\S........ . 0030: 00 00 00 00 00 00 00 01 00 00 00 0c 00 07 00 00 ................ . 0040: 00 00 00 53 96 af 00 00 ...S.... Output after: # perf report -D 6 514766399626050 0x7b058 [0x48]: PERF_RECORD_SAMPLE(IP, 0x1): 303977/303977: 0 period: 1 addr: 0 ... thread: paitest:303977 ...... dso: 0x7b0a0@/root/perf.data.paicrypto [0x48]: event: 9 . . ... raw event: size 72 bytes . 0000: 00 00 00 09 00 01 00 48 00 00 00 00 00 00 00 00 .......H........ . 0010: 00 04 a3 69 00 04 a3 69 00 01 d4 2d 76 de a0 bb ...i...i...-v... . 0020: 00 00 00 00 00 01 5c 53 00 00 00 06 00 00 00 00 ......\S........ . 0030: 00 00 00 00 00 00 00 01 00 00 00 0c 00 07 00 00 ................ . 0040: 00 00 00 53 96 af 00 00 ...S.... Counter:007 km_aes_128 Value:0x00000000005396af <--- new Committer notes: Had to add ignore pragmas for that __packed function: +#pragma GCC diagnostic ignored "-Wpacked" +#pragma GCC diagnostic ignored "-Wattributes" Otherwise this doesn't build in things like debian experimentao cross building to mips64, etc. Signed-off-by: Thomas Richter Tested-by: Sumanth Korikkar Acked-by: Sumanth Korikkar Cc: Heiko Carstens Cc: Sven Schnelle Cc: Vasily Gorbik Link: https://lore.kernel.org/r/20231110110908.2312308-1-tmricht@linux.ibm.com [ Corrected non-existent commit referred to the right one: 39d62336f5c126ad ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/s390-cpumcf-kernel.h | 2 + tools/perf/util/s390-sample-raw.c | 113 ++++++++++++++++++++++++--- 2 files changed, 105 insertions(+), 10 deletions(-) diff --git a/tools/perf/util/s390-cpumcf-kernel.h b/tools/perf/util/s390-cpumcf-kernel.h index f55ca07f3ca1..74b36644e384 100644 --- a/tools/perf/util/s390-cpumcf-kernel.h +++ b/tools/perf/util/s390-cpumcf-kernel.h @@ -12,6 +12,8 @@ #define S390_CPUMCF_DIAG_DEF 0xfeef /* Counter diagnostic entry ID */ #define PERF_EVENT_CPUM_CF_DIAG 0xBC000 /* Event: Counter sets */ #define PERF_EVENT_CPUM_SF_DIAG 0xBD000 /* Event: Combined-sampling */ +#define PERF_EVENT_PAI_CRYPTO_ALL 0x1000 /* Event: CRYPTO_ALL */ +#define PERF_EVENT_PAI_NNPA_ALL 0x1800 /* Event: NNPA_ALL */ struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */ unsigned int def:16; /* 0-15 Data Entry Format */ diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c index 115b16edb451..29a744eeb71e 100644 --- a/tools/perf/util/s390-sample-raw.c +++ b/tools/perf/util/s390-sample-raw.c @@ -125,6 +125,9 @@ static int get_counterset_start(int setnr) return 128; case CPUMF_CTR_SET_MT_DIAG: /* Diagnostic counter set */ return 448; + case PERF_EVENT_PAI_NNPA_ALL: /* PAI NNPA counter set */ + case PERF_EVENT_PAI_CRYPTO_ALL: /* PAI CRYPTO counter set */ + return setnr; default: return -1; } @@ -212,27 +215,117 @@ static void s390_cpumcfdg_dump(struct perf_pmu *pmu, struct perf_sample *sample) } } -/* S390 specific trace event function. Check for PERF_RECORD_SAMPLE events - * and if the event was triggered by a counter set diagnostic event display - * its raw data. - * The function is only invoked when the dump flag -D is set. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wpacked" +#pragma GCC diagnostic ignored "-Wattributes" +/* + * Check for consistency of PAI_CRYPTO/PAI_NNPA raw data. */ -void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event, struct perf_sample *sample) +struct pai_data { /* Event number and value */ + u16 event_nr; + u64 event_val; +} __packed; + +#pragma GCC diagnostic pop + +/* + * Test for valid raw data. At least one PAI event should be in the raw + * data section. + */ +static bool s390_pai_all_test(struct perf_sample *sample) { + unsigned char *buf = sample->raw_data; + size_t len = sample->raw_size; + + if (len < 0xa || !buf) + return false; + return true; +} + +static void s390_pai_all_dump(struct evsel *evsel, struct perf_sample *sample) +{ + size_t len = sample->raw_size, offset = 0; + unsigned char *p = sample->raw_data; + const char *color = PERF_COLOR_BLUE; + struct pai_data pai_data; + char *ev_name; + + while (offset < len) { + memcpy(&pai_data.event_nr, p, sizeof(pai_data.event_nr)); + pai_data.event_nr = be16_to_cpu(pai_data.event_nr); + p += sizeof(pai_data.event_nr); + offset += sizeof(pai_data.event_nr); + + memcpy(&pai_data.event_val, p, sizeof(pai_data.event_val)); + pai_data.event_val = be64_to_cpu(pai_data.event_val); + p += sizeof(pai_data.event_val); + offset += sizeof(pai_data.event_val); + + ev_name = get_counter_name(evsel->core.attr.config, + pai_data.event_nr, evsel->pmu); + color_fprintf(stdout, color, "\tCounter:%03d %s Value:%#018lx\n", + pai_data.event_nr, ev_name ?: "", + pai_data.event_val); + free(ev_name); + + if (offset + 0xa > len) + break; + } + color_fprintf(stdout, color, "\n"); +} + +/* S390 specific trace event function. Check for PERF_RECORD_SAMPLE events + * and if the event was triggered by a + * - counter set diagnostic event + * - processor activity assist (PAI) crypto counter event + * - processor activity assist (PAI) neural network processor assist (NNPA) + * counter event + * display its raw data. + * The function is only invoked when the dump flag -D is set. + * + * Function evlist__s390_sample_raw() is defined as call back after it has + * been verified that the perf.data file was created on s390 platform. + */ +void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event, + struct perf_sample *sample) +{ + const char *pai_name; struct evsel *evsel; if (event->header.type != PERF_RECORD_SAMPLE) return; evsel = evlist__event2evsel(evlist, event); - if (evsel == NULL || - evsel->core.attr.config != PERF_EVENT_CPUM_CF_DIAG) + if (!evsel) return; /* Display raw data on screen */ - if (!s390_cpumcfdg_testctr(sample)) { - pr_err("Invalid counter set data encountered\n"); + if (evsel->core.attr.config == PERF_EVENT_CPUM_CF_DIAG) { + if (!evsel->pmu) + evsel->pmu = perf_pmus__find("cpum_cf"); + if (!s390_cpumcfdg_testctr(sample)) + pr_err("Invalid counter set data encountered\n"); + else + s390_cpumcfdg_dump(evsel->pmu, sample); return; } - s390_cpumcfdg_dump(evsel->pmu, sample); + + switch (evsel->core.attr.config) { + case PERF_EVENT_PAI_NNPA_ALL: + pai_name = "NNPA_ALL"; + break; + case PERF_EVENT_PAI_CRYPTO_ALL: + pai_name = "CRYPTO_ALL"; + break; + default: + return; + } + + if (!s390_pai_all_test(sample)) { + pr_err("Invalid %s raw data encountered\n", pai_name); + } else { + if (!evsel->pmu) + evsel->pmu = perf_pmus__find_by_type(evsel->core.attr.type); + s390_pai_all_dump(evsel, sample); + } } From acbf6de674ef7b1b5870b25e7b3c695bf84273d0 Mon Sep 17 00:00:00 2001 From: Ji Sheng Teoh Date: Fri, 3 Nov 2023 16:24:41 +0800 Subject: [PATCH 034/179] perf vendor events riscv: Add StarFive Dubhe-80 JSON file StarFive's Dubhe-80 supports raw event id 0x00 - 0x22. The raw events are enabled through PMU node of DT binding. Besides raw event, add standard RISC-V firmware events to support monitoring of firmware event. Example of PMU DT node: pmu { compatible = "riscv,pmu"; riscv,raw-event-to-mhpmcounters = /* Event ID 1-31 */ <0x00 0x00 0xFFFFFFFF 0xFFFFFFE0 0x00007FF8>, /* Event ID 32-33 */ <0x00 0x20 0xFFFFFFFF 0xFFFFFFFE 0x00007FF8>, /* Event ID 34 */ <0x00 0x22 0xFFFFFFFF 0xFFFFFF22 0x00007FF8>; }; Example of 'perf stat' output: [root@user]# perf stat -a \ -e access_mmu_stlb \ -e miss_mmu_stlb \ -e access_mmu_pte_c \ -e rob_flush \ -e btb_prediction_miss \ -e itlb_miss \ -e sync_del_fetch_g \ -e icache_miss \ -e bpu_br_retire \ -e bpu_br_miss \ -e ret_ins_retire \ -e ret_ins_miss \ -- openssl speed rsa2048 Doing 2048 bits private rsa's for 10s: 39 2048 bits private RSA's in 10.14s Doing 2048 bits public rsa's for 10s: 1563 2048 bits public RSA's in 10.00s version: 3.0.11 built on: Tue Sep 19 13:02:31 2023 UTC options: bn(64,64) CPUINFO: N/A sign verify sign/s verify/s rsa 2048 bits 0.260000s 0.006398s 3.8 156.3 Performance counter stats for 'system wide': 1338350 access_mmu_stlb 1154025 miss_mmu_stlb 1162691 access_mmu_pte_c 34067 rob_flush 11212384 btb_prediction_miss 1256242 itlb_miss 652523491 sync_del_fetch_g 384465 icache_miss 64635789 bpu_br_retire 323440 bpu_br_miss 8785143 ret_ins_retire 31236 ret_ins_miss 20.760822480 seconds time elapsed Reviewed-by: Ian Rogers Signed-off-by: Ji Sheng Teoh Cc: Adrian Hunter Cc: Albert Ou Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Ley Foon Tan Cc: Mark Rutland Cc: Namhyung Kim Cc: Nikita Shubin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Peter Zijlstra Cc: linux-riscv@lists.infradead.org Link: https://lore.kernel.org/r/20231103082441.1389842-1-jisheng.teoh@starfivetech.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/riscv/mapfile.csv | 1 + .../arch/riscv/starfive/dubhe-80/common.json | 172 ++++++++++++++++++ .../riscv/starfive/dubhe-80/firmware.json | 68 +++++++ 3 files changed, 241 insertions(+) create mode 100644 tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/common.json create mode 100644 tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json diff --git a/tools/perf/pmu-events/arch/riscv/mapfile.csv b/tools/perf/pmu-events/arch/riscv/mapfile.csv index c61b3d6ef616..ee61e26f90cd 100644 --- a/tools/perf/pmu-events/arch/riscv/mapfile.csv +++ b/tools/perf/pmu-events/arch/riscv/mapfile.csv @@ -15,3 +15,4 @@ # #MVENDORID-MARCHID-MIMPID,Version,Filename,EventType 0x489-0x8000000000000007-0x[[:xdigit:]]+,v1,sifive/u74,core +0x67e-0x80000000db000080-0x[[:xdigit:]]+,v1,starfive/dubhe-80,core diff --git a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/common.json b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/common.json new file mode 100644 index 000000000000..fbffcacb2ace --- /dev/null +++ b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/common.json @@ -0,0 +1,172 @@ +[ + { + "EventName": "ACCESS_MMU_STLB", + "EventCode": "0x1", + "BriefDescription": "access MMU STLB" + }, + { + "EventName": "MISS_MMU_STLB", + "EventCode": "0x2", + "BriefDescription": "miss MMU STLB" + }, + { + "EventName": "ACCESS_MMU_PTE_C", + "EventCode": "0x3", + "BriefDescription": "access MMU PTE-Cache" + }, + { + "EventName": "MISS_MMU_PTE_C", + "EventCode": "0x4", + "BriefDescription": "miss MMU PTE-Cache" + }, + { + "EventName": "ROB_FLUSH", + "EventCode": "0x5", + "BriefDescription": "ROB flush (all kinds of exceptions)" + }, + { + "EventName": "BTB_PREDICTION_MISS", + "EventCode": "0x6", + "BriefDescription": "BTB prediction miss" + }, + { + "EventName": "ITLB_MISS", + "EventCode": "0x7", + "BriefDescription": "ITLB miss" + }, + { + "EventName": "SYNC_DEL_FETCH_G", + "EventCode": "0x8", + "BriefDescription": "SYNC delivery a fetch-group" + }, + { + "EventName": "ICACHE_MISS", + "EventCode": "0x9", + "BriefDescription": "ICache miss" + }, + { + "EventName": "BPU_BR_RETIRE", + "EventCode": "0xA", + "BriefDescription": "condition branch instruction retire" + }, + { + "EventName": "BPU_BR_MISS", + "EventCode": "0xB", + "BriefDescription": "condition branch instruction miss" + }, + { + "EventName": "RET_INS_RETIRE", + "EventCode": "0xC", + "BriefDescription": "return instruction retire" + }, + { + "EventName": "RET_INS_MISS", + "EventCode": "0xD", + "BriefDescription": "return instruction miss" + }, + { + "EventName": "INDIRECT_JR_MISS", + "EventCode": "0xE", + "BriefDescription": "indirect JR instruction miss (inlcude without target)" + }, + { + "EventName": "IBUF_VAL_ID_NORDY", + "EventCode": "0xF", + "BriefDescription": "IBUF valid while ID not ready" + }, + { + "EventName": "IBUF_NOVAL_ID_RDY", + "EventCode": "0x10", + "BriefDescription": "IBUF not valid while ID ready" + }, + { + "EventName": "REN_INT_PHY_REG_NORDY", + "EventCode": "0x11", + "BriefDescription": "REN integer physical register file is not ready" + }, + { + "EventName": "REN_FP_PHY_REG_NORDY", + "EventCode": "0x12", + "BriefDescription": "REN floating point physical register file is not ready" + }, + { + "EventName": "REN_CP_NORDY", + "EventCode": "0x13", + "BriefDescription": "REN checkpoint is not ready" + }, + { + "EventName": "DEC_VAL_ROB_NORDY", + "EventCode": "0x14", + "BriefDescription": "DEC is valid and ROB is not ready" + }, + { + "EventName": "OOD_FLUSH_LS_DEP", + "EventCode": "0x15", + "BriefDescription": "out of order flush due to load/store dependency" + }, + { + "EventName": "BRU_RET_IJR_INS", + "EventCode": "0x16", + "BriefDescription": "BRU retire an IJR instruction" + }, + { + "EventName": "ACCESS_DTLB", + "EventCode": "0x17", + "BriefDescription": "access DTLB" + }, + { + "EventName": "MISS_DTLB", + "EventCode": "0x18", + "BriefDescription": "miss DTLB" + }, + { + "EventName": "LOAD_INS_DCACHE", + "EventCode": "0x19", + "BriefDescription": "load instruction access DCache" + }, + { + "EventName": "LOAD_INS_MISS_DCACHE", + "EventCode": "0x1A", + "BriefDescription": "load instruction miss DCache" + }, + { + "EventName": "STORE_INS_DCACHE", + "EventCode": "0x1B", + "BriefDescription": "store/amo instruction access DCache" + }, + { + "EventName": "STORE_INS_MISS_DCACHE", + "EventCode": "0x1C", + "BriefDescription": "store/amo instruction miss DCache" + }, + { + "EventName": "LOAD_SCACHE", + "EventCode": "0x1D", + "BriefDescription": "load access SCache" + }, + { + "EventName": "STORE_SCACHE", + "EventCode": "0x1E", + "BriefDescription": "store access SCache" + }, + { + "EventName": "LOAD_MISS_SCACHE", + "EventCode": "0x1F", + "BriefDescription": "load miss SCache" + }, + { + "EventName": "STORE_MISS_SCACHE", + "EventCode": "0x20", + "BriefDescription": "store miss SCache" + }, + { + "EventName": "L2C_PF_REQ", + "EventCode": "0x21", + "BriefDescription": "L2C data-prefetcher request" + }, + { + "EventName": "L2C_PF_HIT", + "EventCode": "0x22", + "BriefDescription": "L2C data-prefetcher hit" + } +] diff --git a/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json new file mode 100644 index 000000000000..9b4a032186a7 --- /dev/null +++ b/tools/perf/pmu-events/arch/riscv/starfive/dubhe-80/firmware.json @@ -0,0 +1,68 @@ +[ + { + "ArchStdEvent": "FW_MISALIGNED_LOAD" + }, + { + "ArchStdEvent": "FW_MISALIGNED_STORE" + }, + { + "ArchStdEvent": "FW_ACCESS_LOAD" + }, + { + "ArchStdEvent": "FW_ACCESS_STORE" + }, + { + "ArchStdEvent": "FW_ILLEGAL_INSN" + }, + { + "ArchStdEvent": "FW_SET_TIMER" + }, + { + "ArchStdEvent": "FW_IPI_SENT" + }, + { + "ArchStdEvent": "FW_IPI_RECEIVED" + }, + { + "ArchStdEvent": "FW_FENCE_I_SENT" + }, + { + "ArchStdEvent": "FW_FENCE_I_RECEIVED" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_SENT" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_VMID_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_VMID_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_ASID_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_ASID_RECEIVED" + } +] From 280b4e4a9e8009affd8f20ec2d467cb4deb05c1c Mon Sep 17 00:00:00 2001 From: Benjamin Gray Date: Tue, 12 Sep 2023 16:07:59 +1000 Subject: [PATCH 035/179] perf tools: Address python 3.6 DeprecationWarning for string scapes Python 3.6 introduced a DeprecationWarning for invalid escape sequences. This is upgraded to a SyntaxWarning in Python 3.12, and will eventually be a syntax error. Fix these now to get ahead of it before it's an error. Signed-off-by: Benjamin Gray Acked-by: Adrian Hunter Cc: Alexander Shishkin Cc: Andrii Nakryiko Cc: Hartley Sweeten Cc: Ian Abbott Cc: Ian Rogers Cc: Ingo Molnar Cc: Jan Kiszka Cc: Jiri Olsa Cc: Jonathan Corbet Cc: Kieran Bingham Cc: Mark Rutland Cc: Mykola Lysenko Cc: Namhyung Kim Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Peter Zijlstra Cc: Shuah Khan Cc: Todd E Brandt Cc: Tom Rix Cc: linux-doc@vger.kernel.org Cc: linux-ia64@vger.kernel.org Cc: linux-kselftest@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: llvm@lists.linux.dev Link: https://lore.kernel.org/r/20230912060801.95533-6-bgray@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/jevents.py | 2 +- tools/perf/scripts/python/arm-cs-trace-disasm.py | 4 ++-- tools/perf/scripts/python/compaction-times.py | 2 +- tools/perf/scripts/python/exported-sql-viewer.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py index 3c091ab75305..0093c998cb6e 100755 --- a/tools/perf/pmu-events/jevents.py +++ b/tools/perf/pmu-events/jevents.py @@ -83,7 +83,7 @@ def c_len(s: str) -> int: """Return the length of s a C string This doesn't handle all escape characters properly. It first assumes - all \ are for escaping, it then adjusts as it will have over counted + all \\ are for escaping, it then adjusts as it will have over counted \\. The code uses \000 rather than \0 as a terminator as an adjacent number would be folded into a string of \0 (ie. "\0" + "5" doesn't equal a terminator followed by the number 5 but the escape of diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py index d59ff53f1d94..de58991c78bb 100755 --- a/tools/perf/scripts/python/arm-cs-trace-disasm.py +++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py @@ -45,8 +45,8 @@ parser = OptionParser(option_list=option_list) # Initialize global dicts and regular expression disasm_cache = dict() cpu_data = dict() -disasm_re = re.compile("^\s*([0-9a-fA-F]+):") -disasm_func_re = re.compile("^\s*([0-9a-fA-F]+)\s.*:") +disasm_re = re.compile(r"^\s*([0-9a-fA-F]+):") +disasm_func_re = re.compile(r"^\s*([0-9a-fA-F]+)\s.*:") cache_size = 64*1024 glb_source_file_name = None diff --git a/tools/perf/scripts/python/compaction-times.py b/tools/perf/scripts/python/compaction-times.py index 2560a042dc6f..9401f7c14747 100644 --- a/tools/perf/scripts/python/compaction-times.py +++ b/tools/perf/scripts/python/compaction-times.py @@ -260,7 +260,7 @@ def pr_help(): comm_re = None pid_re = None -pid_regex = "^(\d*)-(\d*)$|^(\d*)$" +pid_regex = r"^(\d*)-(\d*)$|^(\d*)$" opt_proc = popt.DISP_DFL opt_disp = topt.DISP_ALL diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py index 13f2d8a81610..121cf61ba1b3 100755 --- a/tools/perf/scripts/python/exported-sql-viewer.py +++ b/tools/perf/scripts/python/exported-sql-viewer.py @@ -677,8 +677,8 @@ class CallGraphModelBase(TreeModel): # sqlite supports GLOB (text only) which uses * and ? and is case sensitive if not self.glb.dbref.is_sqlite3: # Escape % and _ - s = value.replace("%", "\%") - s = s.replace("_", "\_") + s = value.replace("%", "\\%") + s = s.replace("_", "\\_") # Translate * and ? into SQL LIKE pattern characters % and _ trans = string.maketrans("*?", "%_") match = " LIKE '" + str(s).translate(trans) + "'" From 72b4ca7e993e94f09bcf6d19fc385a2e8060c71f Mon Sep 17 00:00:00 2001 From: Nick Forrington Date: Thu, 2 Nov 2023 16:22:24 +0000 Subject: [PATCH 036/179] perf test: Remove atomics from test_loop to avoid test failures The current use of atomics can lead to test failures, as tests (such as tests/shell/record.sh) search for samples with "test_loop" as the top-most stack frame, but find frames related to the atomic operation (e.g. __aarch64_ldadd4_relax). This change simply removes the "count" variable, as it is not necessary. Fixes: 1962ab6f6e0b39e4 ("perf test workload thloop: Make count increments atomic") Reviewed-by: James Clark Signed-off-by: Nick Forrington Acked-by: Leo Yan Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231102162225.50028-1-nick.forrington@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/workloads/thloop.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/perf/tests/workloads/thloop.c b/tools/perf/tests/workloads/thloop.c index af05269c2eb8..457b29f91c3e 100644 --- a/tools/perf/tests/workloads/thloop.c +++ b/tools/perf/tests/workloads/thloop.c @@ -7,7 +7,6 @@ #include "../tests.h" static volatile sig_atomic_t done; -static volatile unsigned count; /* We want to check this symbol in perf report */ noinline void test_loop(void); @@ -19,8 +18,7 @@ static void sighandler(int sig __maybe_unused) noinline void test_loop(void) { - while (!done) - __atomic_fetch_add(&count, 1, __ATOMIC_RELAXED); + while (!done); } static void *thfunc(void *arg) From b457c526072aa8ab312517010837b06d38b9bb66 Mon Sep 17 00:00:00 2001 From: Paran Lee Date: Tue, 21 Nov 2023 07:32:19 +0900 Subject: [PATCH 037/179] perf script python: Fail check on dynamic allocation Add PyList_New() Fail check in get_field_numeric_entry() function and dynamic allocation checking for set_regs_in_dict(), python_start_script(). Reviewed-by: Adrian Hunter Reviewed-by: MichelleJin Signed-off-by: Paran Lee Cc: Alexander Shishkin Cc: Austin Kim Cc: Honggyu Kim Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Li Dong Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Sean Christopherson Link: https://lore.kernel.org/r/20231120223218.9036-1-p4ranlee@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- .../util/scripting-engines/trace-event-python.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 94312741443a..860e1837ba96 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -353,6 +353,8 @@ static PyObject *get_field_numeric_entry(struct tep_event *event, if (is_array) { list = PyList_New(field->arraylen); + if (!list) + Py_FatalError("couldn't create Python list"); item_size = field->size / field->arraylen; n_items = field->arraylen; } else { @@ -754,7 +756,7 @@ static void regs_map(struct regs_dump *regs, uint64_t mask, const char *arch, ch } } -static void set_regs_in_dict(PyObject *dict, +static int set_regs_in_dict(PyObject *dict, struct perf_sample *sample, struct evsel *evsel) { @@ -770,6 +772,8 @@ static void set_regs_in_dict(PyObject *dict, */ int size = __sw_hweight64(attr->sample_regs_intr) * 28; char *bf = malloc(size); + if (!bf) + return -1; regs_map(&sample->intr_regs, attr->sample_regs_intr, arch, bf, size); @@ -781,6 +785,8 @@ static void set_regs_in_dict(PyObject *dict, pydict_set_item_string_decref(dict, "uregs", _PyUnicode_FromString(bf)); free(bf); + + return 0; } static void set_sym_in_dict(PyObject *dict, struct addr_location *al, @@ -920,7 +926,8 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample, PyLong_FromUnsignedLongLong(sample->cyc_cnt)); } - set_regs_in_dict(dict, sample, evsel); + if (set_regs_in_dict(dict, sample, evsel)) + Py_FatalError("Failed to setting regs in dict"); return dict; } @@ -1918,12 +1925,18 @@ static int python_start_script(const char *script, int argc, const char **argv, scripting_context->session = session; #if PY_MAJOR_VERSION < 3 command_line = malloc((argc + 1) * sizeof(const char *)); + if (!command_line) + return -1; + command_line[0] = script; for (i = 1; i < argc + 1; i++) command_line[i] = argv[i - 1]; PyImport_AppendInittab(name, initperf_trace_context); #else command_line = malloc((argc + 1) * sizeof(wchar_t *)); + if (!command_line) + return -1; + command_line[0] = Py_DecodeLocale(script, NULL); for (i = 1; i < argc + 1; i++) command_line[i] = Py_DecodeLocale(argv[i - 1], NULL); From cd38d6b5fa2dfc3beb7311f0b373e3141620c76b Mon Sep 17 00:00:00 2001 From: zhaimingbing Date: Mon, 20 Nov 2023 19:23:56 +0800 Subject: [PATCH 038/179] perf script perl: Fail check on dynamic allocation Return ENOMEM when dynamic allocation failed. Reviewed-by: Ian Rogers Signed-off-by: zhaimingbing Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Li Dong Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Sean Christopherson Link: https://lore.kernel.org/r/20231120112356.8652-1-zhaimingbing@cmss.chinamobile.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/scripting-engines/trace-event-perl.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index 603091317bed..b072ac5d3bc2 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -490,6 +490,9 @@ static int perl_start_script(const char *script, int argc, const char **argv, scripting_context->session = session; command_line = malloc((argc + 2) * sizeof(const char *)); + if (!command_line) + return -ENOMEM; + command_line[0] = ""; command_line[1] = script; for (i = 2; i < argc + 2; i++) From 697579629f850d8f863147351e12e74c50114a8a Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 7 Nov 2023 10:40:20 -0800 Subject: [PATCH 039/179] perf test: Basic branch counter support Add a basic test for the branch counter feature. The test verifies that - The new filter can be successfully applied on the supported platforms. - The counter value can be outputted via the perf report -D Signed-off-by: Kan Liang Tested-by: Ian Rogers Cc: Adrian Hunter Cc: Ingo Molnar Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Tinghao Zhang Link: https://lore.kernel.org/r/20231107184020.1497571-1-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/record.sh | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh index 29443b8e8876..c74412c68e65 100755 --- a/tools/perf/tests/shell/record.sh +++ b/tools/perf/tests/shell/record.sh @@ -12,6 +12,9 @@ err=0 perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX) testprog="perf test -w thloop" testsym="test_loop" +cpu_pmu_dir="/sys/bus/event_source/devices/cpu*" +br_cntr_file="/caps/branch_counter_nr" +br_cntr_output="branch stack counters" cleanup() { rm -rf "${perfdata}" @@ -155,10 +158,37 @@ test_workload() { echo "Basic target workload test [Success]" } +test_branch_counter() { + echo "Basic branch counter test" + # Check if the branch counter feature is supported + for dir in $cpu_pmu_dir + do + if [ ! -e "$dir$br_cntr_file" ] + then + echo "branch counter feature not supported on all core PMUs ($dir) [Skipped]" + return + fi + done + if ! perf record -o "${perfdata}" -j any,counter ${testprog} 2> /dev/null + then + echo "Basic branch counter test [Failed record]" + err=1 + return + fi + if ! perf report -i "${perfdata}" -D -q | grep -q "$br_cntr_output" + then + echo "Basic branch record test [Failed missing output]" + err=1 + return + fi + echo "Basic branch counter test [Success]" +} + test_per_thread test_register_capture test_system_wide test_workload +test_branch_counter cleanup exit $err From 2dbba30fd69b604802a9535b74bddb5bcca23793 Mon Sep 17 00:00:00 2001 From: James Clark Date: Fri, 1 Sep 2023 14:37:15 +0100 Subject: [PATCH 040/179] perf cs-etm: Bump minimum OpenCSD version to ensure a bugfix is present Since commit d927ef5004ef ("perf cs-etm: Add exception level consistency check"), the exception that was added to Perf will be triggered unless the following bugfix from OpenCSD is present: - _Version 1.2.1_: - __Bugfix__: ETM4x / ETE - output of context elements to client can in some circumstances be delayed until after subsequent atoms have been processed leading to incorrect memory decode access via the client callbacks. Fixed to flush context elements immediately they are committed. Rather than remove the assert and silently fail, just increase the minimum version requirement to avoid hard to debug issues and regressions. Reviewed-by: Ian Rogers Signed-off-by: James Clark Tested-by: Leo Yan Cc: John Garry Cc: Mike Leach Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20230901133716.677499-1-james.clark@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/build/feature/test-libopencsd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/build/feature/test-libopencsd.c b/tools/build/feature/test-libopencsd.c index eb6303ff446e..4cfcef9da3e4 100644 --- a/tools/build/feature/test-libopencsd.c +++ b/tools/build/feature/test-libopencsd.c @@ -4,9 +4,9 @@ /* * Check OpenCSD library version is sufficient to provide required features */ -#define OCSD_MIN_VER ((1 << 16) | (1 << 8) | (1)) +#define OCSD_MIN_VER ((1 << 16) | (2 << 8) | (1)) #if !defined(OCSD_VER_NUM) || (OCSD_VER_NUM < OCSD_MIN_VER) -#error "OpenCSD >= 1.1.1 is required" +#error "OpenCSD >= 1.2.1 is required" #endif int main(void) From 26218331f49c858d52e60418cf3cef4c3fa6cf2e Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Sat, 14 Oct 2023 15:45:12 +0800 Subject: [PATCH 041/179] perf auxtrace: Add 'T' itrace option for timestamp trace An AUX trace can contain timestamp, but in some situations, the hardware trace module (e.g. Arm CoreSight) cannot decide the traced timestamp is the same source with CPU's time, thus the decoder can not use the timestamp trace for samples. This patch introduces 'T' itrace option. If users know the platforms they are working on have the same time counter with CPUs, users can use this new option to tell a decoder for using timestamp trace as kernel time. Signed-off-by: Leo Yan Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Mark Rutland Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Suzuki Poulouse Cc: Will Deacon Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20231014074513.1668000-2-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/itrace.txt | 1 + tools/perf/util/auxtrace.c | 3 +++ tools/perf/util/auxtrace.h | 3 +++ 3 files changed, 7 insertions(+) diff --git a/tools/perf/Documentation/itrace.txt b/tools/perf/Documentation/itrace.txt index a97f95825b14..19cc179be9a7 100644 --- a/tools/perf/Documentation/itrace.txt +++ b/tools/perf/Documentation/itrace.txt @@ -25,6 +25,7 @@ q quicker (less detailed) decoding A approximate IPC Z prefer to ignore timestamps (so-called "timeless" decoding) + T use the timestamp trace as kernel time The default is all events i.e. the same as --itrace=iybxwpe, except for perf script where it is --itrace=ce diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index a0368202a746..f528c4364d23 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -1638,6 +1638,9 @@ int itrace_do_parse_synth_opts(struct itrace_synth_opts *synth_opts, case 'Z': synth_opts->timeless_decoding = true; break; + case 'T': + synth_opts->use_timestamp = true; + break; case ' ': case ',': break; diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h index 29eb82dff574..55702215a82d 100644 --- a/tools/perf/util/auxtrace.h +++ b/tools/perf/util/auxtrace.h @@ -99,6 +99,7 @@ enum itrace_period_type { * @remote_access: whether to synthesize remote access events * @mem: whether to synthesize memory events * @timeless_decoding: prefer "timeless" decoding i.e. ignore timestamps + * @use_timestamp: use the timestamp trace as kernel time * @vm_time_correlation: perform VM Time Correlation * @vm_tm_corr_dry_run: VM Time Correlation dry-run * @vm_tm_corr_args: VM Time Correlation implementation-specific arguments @@ -146,6 +147,7 @@ struct itrace_synth_opts { bool remote_access; bool mem; bool timeless_decoding; + bool use_timestamp; bool vm_time_correlation; bool vm_tm_corr_dry_run; char *vm_tm_corr_args; @@ -678,6 +680,7 @@ bool auxtrace__evsel_is_auxtrace(struct perf_session *session, " q: quicker (less detailed) decoding\n" \ " A: approximate IPC\n" \ " Z: prefer to ignore timestamps (so-called \"timeless\" decoding)\n" \ +" T: use the timestamp trace as kernel time\n" \ " PERIOD[ns|us|ms|i|t]: specify period to sample stream\n" \ " concatenate multiple options. Default is iybxwpe or cewp\n" From a4271827e609d30b7255aa7e4c453a8f3fe36a7b Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Sat, 14 Oct 2023 15:45:13 +0800 Subject: [PATCH 042/179] perf cs-etm: Enable itrace option 'T' Prior to Armv8.4, the feature FEAT_TRF is not supported by Arm CPUs. Consequently, the sysfs node 'ts_source' will not be set as 1 by the CoreSight ETM driver. On the other hand, the perf tool relies on the 'ts_source' node to determine whether the kernel timestamp is traced. Since the 'ts_source' is not set for Arm CPUs prior to Armv8.4, platforms in this case cannot utilize the traced timestamp as the kernel time. This patch enables the 'T' itrace option, which forcibly utilizes the traced timestamp as the kernel time. If users are aware that their working platform's Arm CoreSight shares the same counter with the kernel time, they can specify 'T' option to decode the traced timestamp as the kernel time. An usage example is: # perf record -e cs_etm// -- test_program # perf script --itrace=i10ibT # perf report --itrace=i10ibT Reviewed-by: James Clark Signed-off-by: Leo Yan Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Mark Rutland Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Suzuki Poulouse Cc: Will Deacon Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20231014074513.1668000-3-leo.yan@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/cs-etm.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index a9873d14c632..d65d7485886c 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -3346,12 +3346,27 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event, etm->metadata = metadata; etm->auxtrace_type = auxtrace_info->type; - /* Use virtual timestamps if all ETMs report ts_source = 1 */ - etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu); + if (etm->synth_opts.use_timestamp) + /* + * Prior to Armv8.4, Arm CPUs don't support FEAT_TRF feature, + * therefore the decoder cannot know if the timestamp trace is + * same with the kernel time. + * + * If a user has knowledge for the working platform and can + * specify itrace option 'T' to tell decoder to forcely use the + * traced timestamp as the kernel time. + */ + etm->has_virtual_ts = true; + else + /* Use virtual timestamps if all ETMs report ts_source = 1 */ + etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu); if (!etm->has_virtual_ts) ui__warning("Virtual timestamps are not enabled, or not supported by the traced system.\n" - "The time field of the samples will not be set accurately.\n\n"); + "The time field of the samples will not be set accurately.\n" + "For Arm CPUs prior to Armv8.4 or without support FEAT_TRF,\n" + "you can specify the itrace option 'T' for timestamp decoding\n" + "if the Coresight timestamp on the platform is same with the kernel time.\n\n"); etm->auxtrace.process_event = cs_etm__process_event; etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event; From a24d9d9dc096fc0d0bd85302c9a4fe4fe3b1107b Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 22 Nov 2023 20:29:22 -0800 Subject: [PATCH 043/179] perf parse-events: Make legacy events lower priority than sysfs/JSON The perf tool has previously made legacy events the priority so with or without a PMU the legacy event would be opened: $ perf stat -e cpu-cycles,cpu/cpu-cycles/ true Using CPUID GenuineIntel-6-8D-1 intel_pt default config: tsc,mtc,mtc_period=3,psb_period=3,pt,branch Attempting to add event pmu 'cpu' with 'cpu-cycles,' that may result in non-fatal errors After aliases, add event pmu 'cpu' with 'cpu-cycles,' that may result in non-fatal errors Control descriptor is not initialized ------------------------------------------------------------ perf_event_attr: type 0 (PERF_TYPE_HARDWARE) size 136 config 0 (PERF_COUNT_HW_CPU_CYCLES) sample_type IDENTIFIER read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING disabled 1 inherit 1 enable_on_exec 1 exclude_guest 1 ------------------------------------------------------------ sys_perf_event_open: pid 833967 cpu -1 group_fd -1 flags 0x8 = 3 ------------------------------------------------------------ perf_event_attr: type 0 (PERF_TYPE_HARDWARE) size 136 config 0 (PERF_COUNT_HW_CPU_CYCLES) sample_type IDENTIFIER read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING disabled 1 inherit 1 enable_on_exec 1 exclude_guest 1 ------------------------------------------------------------ ... Fixes to make hybrid/BIG.little PMUs behave correctly, ie as core PMUs capable of opening legacy events on each, removing hard coded "cpu_core" and "cpu_atom" Intel PMU names, etc. caused a behavioral difference on Apple/ARM due to latent issues in the PMU driver reported in: https://lore.kernel.org/lkml/08f1f185-e259-4014-9ca4-6411d5c1bc65@marcan.st/ As part of that report Mark Rutland requested that legacy events not be higher in priority when a PMU is specified reversing what has until this change been perf's default behavior. With this change the above becomes: $ perf stat -e cpu-cycles,cpu/cpu-cycles/ true Using CPUID GenuineIntel-6-8D-1 Attempt to add: cpu/cpu-cycles=0/ ..after resolving event: cpu/event=0x3c/ Control descriptor is not initialized ------------------------------------------------------------ perf_event_attr: type 0 (PERF_TYPE_HARDWARE) size 136 config 0 (PERF_COUNT_HW_CPU_CYCLES) sample_type IDENTIFIER read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING disabled 1 inherit 1 enable_on_exec 1 exclude_guest 1 ------------------------------------------------------------ sys_perf_event_open: pid 827628 cpu -1 group_fd -1 flags 0x8 = 3 ------------------------------------------------------------ perf_event_attr: type 4 (PERF_TYPE_RAW) size 136 config 0x3c sample_type IDENTIFIER read_format TOTAL_TIME_ENABLED|TOTAL_TIME_RUNNING disabled 1 inherit 1 enable_on_exec 1 exclude_guest 1 ------------------------------------------------------------ ... So the second event has become a raw event as /sys/devices/cpu/events/cpu-cycles exists. A fix was necessary to config_term_pmu in parse-events.c as check_alias expansion needs to happen after config_term_pmu, and config_term_pmu may need calling a second time because of this. config_term_pmu is updated to not use the legacy event when the PMU has such a named event (either from JSON or sysfs). The bulk of this change is updating all of the parse-events test expectations so that if a sysfs/JSON event exists for a PMU the test doesn't fail - a further sign, if it were needed, that the legacy event priority was a known and tested behavior of the perf tool. Reported-by: Hector Martin Signed-off-by: Ian Rogers Tested-by: Hector Martin Tested-by: Marc Zyngier Acked-by: Mark Rutland Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231123042922.834425-1-irogers@google.com [ Initialize the 'alias_rewrote_terms' variable to false to address a clang warning ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/parse-events.c | 256 +++++++++++++++++++++++--------- tools/perf/util/parse-events.c | 52 +++++-- tools/perf/util/pmu.c | 8 +- tools/perf/util/pmu.h | 3 +- 4 files changed, 231 insertions(+), 88 deletions(-) diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c index e52f45c7c3d1..fbdf710d5eea 100644 --- a/tools/perf/tests/parse-events.c +++ b/tools/perf/tests/parse-events.c @@ -162,6 +162,22 @@ static int test__checkevent_numeric(struct evlist *evlist) return TEST_OK; } + +static int assert_hw(struct perf_evsel *evsel, enum perf_hw_id id, const char *name) +{ + struct perf_pmu *pmu; + + if (evsel->attr.type == PERF_TYPE_HARDWARE) { + TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, id)); + return 0; + } + pmu = perf_pmus__find_by_type(evsel->attr.type); + + TEST_ASSERT_VAL("unexpected PMU type", pmu); + TEST_ASSERT_VAL("PMU missing event", perf_pmu__have_event(pmu, name)); + return 0; +} + static int test__checkevent_symbolic_name(struct evlist *evlist) { struct perf_evsel *evsel; @@ -169,10 +185,12 @@ static int test__checkevent_symbolic_name(struct evlist *evlist) TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries); perf_evlist__for_each_evsel(&evlist->core, evsel) { - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); - TEST_ASSERT_VAL("wrong config", - test_perf_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); + int ret = assert_hw(evsel, PERF_COUNT_HW_INSTRUCTIONS, "instructions"); + + if (ret) + return ret; } + return TEST_OK; } @@ -183,8 +201,10 @@ static int test__checkevent_symbolic_name_config(struct evlist *evlist) TEST_ASSERT_VAL("wrong number of entries", 0 != evlist->core.nr_entries); perf_evlist__for_each_evsel(&evlist->core, evsel) { - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); - TEST_ASSERT_VAL("wrong config", test_perf_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + int ret = assert_hw(evsel, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + + if (ret) + return ret; /* * The period value gets configured within evlist__config, * while this test executes only parse events method. @@ -861,10 +881,14 @@ static int test__group1(struct evlist *evlist) evlist__nr_groups(evlist) == num_core_entries()); for (int i = 0; i < num_core_entries(); i++) { + int ret; + /* instructions:k */ evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -878,8 +902,10 @@ static int test__group1(struct evlist *evlist) /* cycles:upp */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -907,6 +933,8 @@ static int test__group2(struct evlist *evlist) TEST_ASSERT_VAL("wrong number of groups", 1 == evlist__nr_groups(evlist)); evlist__for_each_entry(evlist, evsel) { + int ret; + if (evsel->core.attr.type == PERF_TYPE_SOFTWARE) { /* faults + :ku modifier */ leader = evsel; @@ -939,8 +967,10 @@ static int test__group2(struct evlist *evlist) continue; } /* cycles:k */ - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -957,6 +987,7 @@ static int test__group2(struct evlist *evlist) static int test__group3(struct evlist *evlist __maybe_unused) { struct evsel *evsel, *group1_leader = NULL, *group2_leader = NULL; + int ret; TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries == (3 * perf_pmus__num_core_pmus() + 2)); @@ -1045,8 +1076,10 @@ static int test__group3(struct evlist *evlist __maybe_unused) continue; } /* instructions:u */ - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -1070,10 +1103,14 @@ static int test__group4(struct evlist *evlist __maybe_unused) num_core_entries() == evlist__nr_groups(evlist)); for (int i = 0; i < num_core_entries(); i++) { + int ret; + /* cycles:u + p */ evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -1089,8 +1126,10 @@ static int test__group4(struct evlist *evlist __maybe_unused) /* instructions:kp + p */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -1108,6 +1147,7 @@ static int test__group4(struct evlist *evlist __maybe_unused) static int test__group5(struct evlist *evlist __maybe_unused) { struct evsel *evsel = NULL, *leader; + int ret; TEST_ASSERT_VAL("wrong number of entries", evlist->core.nr_entries == (5 * num_core_entries())); @@ -1117,8 +1157,10 @@ static int test__group5(struct evlist *evlist __maybe_unused) for (int i = 0; i < num_core_entries(); i++) { /* cycles + G */ evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1133,8 +1175,10 @@ static int test__group5(struct evlist *evlist __maybe_unused) /* instructions + G */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1148,8 +1192,10 @@ static int test__group5(struct evlist *evlist __maybe_unused) for (int i = 0; i < num_core_entries(); i++) { /* cycles:G */ evsel = leader = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1164,8 +1210,10 @@ static int test__group5(struct evlist *evlist __maybe_unused) /* instructions:G */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1178,8 +1226,10 @@ static int test__group5(struct evlist *evlist __maybe_unused) for (int i = 0; i < num_core_entries(); i++) { /* cycles */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1201,10 +1251,14 @@ static int test__group_gh1(struct evlist *evlist) evlist__nr_groups(evlist) == num_core_entries()); for (int i = 0; i < num_core_entries(); i++) { + int ret; + /* cycles + :H group modifier */ evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1218,8 +1272,10 @@ static int test__group_gh1(struct evlist *evlist) /* cache-misses:G + :H group modifier */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1242,10 +1298,14 @@ static int test__group_gh2(struct evlist *evlist) evlist__nr_groups(evlist) == num_core_entries()); for (int i = 0; i < num_core_entries(); i++) { + int ret; + /* cycles + :G group modifier */ evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1259,8 +1319,10 @@ static int test__group_gh2(struct evlist *evlist) /* cache-misses:H + :G group modifier */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1283,10 +1345,14 @@ static int test__group_gh3(struct evlist *evlist) evlist__nr_groups(evlist) == num_core_entries()); for (int i = 0; i < num_core_entries(); i++) { + int ret; + /* cycles:G + :u group modifier */ evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -1300,8 +1366,10 @@ static int test__group_gh3(struct evlist *evlist) /* cache-misses:H + :u group modifier */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -1324,10 +1392,14 @@ static int test__group_gh4(struct evlist *evlist) evlist__nr_groups(evlist) == num_core_entries()); for (int i = 0; i < num_core_entries(); i++) { + int ret; + /* cycles:G + :uG group modifier */ evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -1341,8 +1413,10 @@ static int test__group_gh4(struct evlist *evlist) /* cache-misses:H + :uG group modifier */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -1363,10 +1437,14 @@ static int test__leader_sample1(struct evlist *evlist) evlist->core.nr_entries == (3 * num_core_entries())); for (int i = 0; i < num_core_entries(); i++) { + int ret; + /* cycles - sampling group leader */ evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1379,8 +1457,10 @@ static int test__leader_sample1(struct evlist *evlist) /* cache-misses - not sampling */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1392,8 +1472,10 @@ static int test__leader_sample1(struct evlist *evlist) /* branch-misses - not sampling */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->core.attr.exclude_hv); @@ -1415,10 +1497,14 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused) evlist->core.nr_entries == (2 * num_core_entries())); for (int i = 0; i < num_core_entries(); i++) { + int ret; + /* instructions - sampling group leader */ evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_INSTRUCTIONS, "instructions"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -1431,8 +1517,10 @@ static int test__leader_sample2(struct evlist *evlist __maybe_unused) /* branch-misses - not sampling */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->core.attr.exclude_hv); @@ -1472,10 +1560,14 @@ static int test__pinned_group(struct evlist *evlist) evlist->core.nr_entries == (3 * num_core_entries())); for (int i = 0; i < num_core_entries(); i++) { + int ret; + /* cycles - group leader */ evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); /* TODO: The group modifier is not copied to the split group leader. */ @@ -1484,13 +1576,18 @@ static int test__pinned_group(struct evlist *evlist) /* cache-misses - can not be pinned, but will go on with the leader */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned); /* branch-misses - ditto */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong pinned", !evsel->core.attr.pinned); } return TEST_OK; @@ -1517,10 +1614,14 @@ static int test__exclusive_group(struct evlist *evlist) evlist->core.nr_entries == 3 * num_core_entries()); for (int i = 0; i < num_core_entries(); i++) { + int ret; + /* cycles - group leader */ evsel = leader = (i == 0 ? evlist__first(evlist) : evsel__next(evsel)); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); /* TODO: The group modifier is not copied to the split group leader. */ @@ -1529,13 +1630,18 @@ static int test__exclusive_group(struct evlist *evlist) /* cache-misses - can not be pinned, but will go on with the leader */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CACHE_MISSES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_CACHE_MISSES, "cache-misses"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive); /* branch-misses - ditto */ evsel = evsel__next(evsel); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_MISSES)); + ret = assert_hw(&evsel->core, PERF_COUNT_HW_BRANCH_MISSES, "branch-misses"); + if (ret) + return ret; + TEST_ASSERT_VAL("wrong exclusive", !evsel->core.attr.exclusive); } return TEST_OK; @@ -1677,9 +1783,11 @@ static int test__checkevent_raw_pmu(struct evlist *evlist) static int test__sym_event_slash(struct evlist *evlist) { struct evsel *evsel = evlist__first(evlist); + int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + + if (ret) + return ret; - TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); return TEST_OK; } @@ -1687,9 +1795,11 @@ static int test__sym_event_slash(struct evlist *evlist) static int test__sym_event_dc(struct evlist *evlist) { struct evsel *evsel = evlist__first(evlist); + int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + + if (ret) + return ret; - TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); TEST_ASSERT_VAL("wrong exclude_user", evsel->core.attr.exclude_user); return TEST_OK; } @@ -1697,9 +1807,11 @@ static int test__sym_event_dc(struct evlist *evlist) static int test__term_equal_term(struct evlist *evlist) { struct evsel *evsel = evlist__first(evlist); + int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + + if (ret) + return ret; - TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "name") == 0); return TEST_OK; } @@ -1707,9 +1819,11 @@ static int test__term_equal_term(struct evlist *evlist) static int test__term_equal_legacy(struct evlist *evlist) { struct evsel *evsel = evlist__first(evlist); + int ret = assert_hw(&evsel->core, PERF_COUNT_HW_CPU_CYCLES, "cycles"); + + if (ret) + return ret; - TEST_ASSERT_VAL("wrong type", evsel->core.attr.type == PERF_TYPE_HARDWARE); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); TEST_ASSERT_VAL("wrong name setting", strcmp(evsel->name, "l1d") == 0); return TEST_OK; } diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index aa2f5c6fc7fc..66eabcea4242 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -976,7 +976,7 @@ static int config_term_pmu(struct perf_event_attr *attr, struct parse_events_error *err) { if (term->type_term == PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE) { - const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); + struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); if (!pmu) { char *err_str; @@ -986,15 +986,23 @@ static int config_term_pmu(struct perf_event_attr *attr, err_str, /*help=*/NULL); return -EINVAL; } - if (perf_pmu__supports_legacy_cache(pmu)) { + /* + * Rewrite the PMU event to a legacy cache one unless the PMU + * doesn't support legacy cache events or the event is present + * within the PMU. + */ + if (perf_pmu__supports_legacy_cache(pmu) && + !perf_pmu__have_event(pmu, term->config)) { attr->type = PERF_TYPE_HW_CACHE; return parse_events__decode_legacy_cache(term->config, pmu->type, &attr->config); - } else + } else { term->type_term = PARSE_EVENTS__TERM_TYPE_USER; + term->no_value = true; + } } if (term->type_term == PARSE_EVENTS__TERM_TYPE_HARDWARE) { - const struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); + struct perf_pmu *pmu = perf_pmus__find_by_type(attr->type); if (!pmu) { char *err_str; @@ -1004,10 +1012,19 @@ static int config_term_pmu(struct perf_event_attr *attr, err_str, /*help=*/NULL); return -EINVAL; } - attr->type = PERF_TYPE_HARDWARE; - attr->config = term->val.num; - if (perf_pmus__supports_extended_type()) - attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; + /* + * If the PMU has a sysfs or json event prefer it over + * legacy. ARM requires this. + */ + if (perf_pmu__have_event(pmu, term->config)) { + term->type_term = PARSE_EVENTS__TERM_TYPE_USER; + term->no_value = true; + } else { + attr->type = PERF_TYPE_HARDWARE; + attr->config = term->val.num; + if (perf_pmus__supports_extended_type()) + attr->config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; + } return 0; } if (term->type_term == PARSE_EVENTS__TERM_TYPE_USER || @@ -1381,6 +1398,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, YYLTYPE *loc = loc_; LIST_HEAD(config_terms); struct parse_events_terms parsed_terms; + bool alias_rewrote_terms = false; pmu = parse_state->fake_pmu ?: perf_pmus__find(name); @@ -1433,7 +1451,15 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, return evsel ? 0 : -ENOMEM; } - if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, &parsed_terms, &info, err)) { + /* Configure attr/terms with a known PMU, this will set hardcoded terms. */ + if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { + parse_events_terms__exit(&parsed_terms); + return -EINVAL; + } + + /* Look for event names in the terms and rewrite into format based terms. */ + if (!parse_state->fake_pmu && perf_pmu__check_alias(pmu, &parsed_terms, + &info, &alias_rewrote_terms, err)) { parse_events_terms__exit(&parsed_terms); return -EINVAL; } @@ -1447,11 +1473,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state, strbuf_release(&sb); } - /* - * Configure hardcoded terms first, no need to check - * return value when called with fail == 0 ;) - */ - if (config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { + /* Configure attr/terms again if an alias was expanded. */ + if (alias_rewrote_terms && + config_attr(&attr, &parsed_terms, parse_state->error, config_term_pmu)) { parse_events_terms__exit(&parsed_terms); return -EINVAL; } diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index d3c9aa4326be..3c9609944a2f 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -1494,12 +1494,14 @@ static int check_info_data(struct perf_pmu *pmu, * defined for the alias */ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_terms, - struct perf_pmu_info *info, struct parse_events_error *err) + struct perf_pmu_info *info, bool *rewrote_terms, + struct parse_events_error *err) { struct parse_events_term *term, *h; struct perf_pmu_alias *alias; int ret; + *rewrote_terms = false; info->per_pkg = false; /* @@ -1521,7 +1523,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_ NULL); return ret; } - + *rewrote_terms = true; ret = check_info_data(pmu, alias, info, err, term->err_term); if (ret) return ret; @@ -1615,6 +1617,8 @@ bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu) bool perf_pmu__have_event(struct perf_pmu *pmu, const char *name) { + if (!name) + return false; if (perf_pmu__find_alias(pmu, name, /*load=*/ true) != NULL) return true; if (pmu->cpu_aliases_added || !pmu->events_table) diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index d2895d415f08..424c3fee0949 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -201,7 +201,8 @@ int perf_pmu__config_terms(const struct perf_pmu *pmu, __u64 perf_pmu__format_bits(struct perf_pmu *pmu, const char *name); int perf_pmu__format_type(struct perf_pmu *pmu, const char *name); int perf_pmu__check_alias(struct perf_pmu *pmu, struct parse_events_terms *head_terms, - struct perf_pmu_info *info, struct parse_events_error *err); + struct perf_pmu_info *info, bool *rewrote_terms, + struct parse_events_error *err); int perf_pmu__find_event(struct perf_pmu *pmu, const char *event, void *state, pmu_event_callback cb); int perf_pmu__format_parse(struct perf_pmu *pmu, int dirfd, bool eager_load); From 4a18ab467820b75436ea9ddd42ee7cb10efa491c Mon Sep 17 00:00:00 2001 From: zhaimingbing Date: Fri, 24 Nov 2023 17:26:57 +0800 Subject: [PATCH 044/179] perf lock: Fix a memory leak on an error path if a strdup-ed string is NULL,the allocated memory needs freeing. Signed-off-by: zhaimingbing Acked-by: Ingo Molnar Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Jiri Olsa Cc: Li Dong Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Sean Christopherson Link: https://lore.kernel.org/r/20231124092657.10392-1-zhaimingbing@cmss.chinamobile.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-lock.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index a3ff2f4edbaa..230461280e45 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -2285,8 +2285,10 @@ static int __cmd_record(int argc, const char **argv) else ev_name = strdup(contention_tracepoints[j].name); - if (!ev_name) + if (!ev_name) { + free(rec_argv); return -ENOMEM; + } rec_argv[i++] = "-e"; rec_argv[i++] = ev_name; From 581ff5b66c94a8133d1c77ed42334623fadc968c Mon Sep 17 00:00:00 2001 From: zhujun2 Date: Tue, 14 Nov 2023 22:42:55 -0800 Subject: [PATCH 045/179] perf tests coresight: Remove unused variables These variables are never referenced in the code, just remove them. Reviewed-by: James Clark Signed-off-by: zhujun2 Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Leo Yan Cc: Mark Rutland Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Suzuki Poulouse Cc: coresight@lists.linaro.org Link: https://lore.kernel.org/r/20231115064255.11057-1-zhujun2@cmss.chinamobile.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c | 1 - tools/perf/tests/shell/coresight/thread_loop/thread_loop.c | 1 - .../shell/coresight/unroll_loop_thread/unroll_loop_thread.c | 1 - 3 files changed, 3 deletions(-) diff --git a/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c b/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c index a7e169d1bf64..5f886cd09e6b 100644 --- a/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c +++ b/tools/perf/tests/shell/coresight/memcpy_thread/memcpy_thread.c @@ -42,7 +42,6 @@ static pthread_t new_thr(void *(*fn) (void *arg), void *arg) int main(int argc, char **argv) { unsigned long i, len, size, thr; - pthread_t threads[256]; struct args args[256]; long long v; diff --git a/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c b/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c index c0158fac7d0b..e05a559253ca 100644 --- a/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c +++ b/tools/perf/tests/shell/coresight/thread_loop/thread_loop.c @@ -57,7 +57,6 @@ static pthread_t new_thr(void *(*fn) (void *arg), void *arg) int main(int argc, char **argv) { unsigned int i, len, thr; - pthread_t threads[256]; struct args args[256]; if (argc < 3) { diff --git a/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c b/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c index 8f6d384208ed..0fc7bf1a25af 100644 --- a/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c +++ b/tools/perf/tests/shell/coresight/unroll_loop_thread/unroll_loop_thread.c @@ -51,7 +51,6 @@ static pthread_t new_thr(void *(*fn) (void *arg), void *arg) int main(int argc, char **argv) { unsigned int i, thr; - pthread_t threads[256]; struct args args[256]; if (argc < 2) { From 5ebe2f4bf0a8fe8cceb5664a7dea4c17e2cf8477 Mon Sep 17 00:00:00 2001 From: Ji Sheng Teoh Date: Wed, 22 Nov 2023 11:09:08 +0800 Subject: [PATCH 046/179] perf vendor events riscv: Add StarFive Dubhe-90 JSON file Similar to StarFive's Dubhe-80, Dubhe-90 supports raw event id 0x00 - 0x22. Reuse Dubhe-80 firmware and common json file. The raw events are enabled through PMU node of DT binding. Besides raw event, add standard RISC-V firmware events to support monitoring of firmware event. Example of PMU DT node: pmu { compatible = "riscv,pmu"; riscv,raw-event-to-mhpmcounters = /* Event ID 1-31 */ <0x00 0x00 0xFFFFFFFF 0xFFFFFFE0 0x00007FF8>, /* Event ID 32-33 */ <0x00 0x20 0xFFFFFFFF 0xFFFFFFFE 0x00007FF8>, /* Event ID 34 */ <0x00 0x22 0xFFFFFFFF 0xFFFFFF22 0x00007FF8>; }; 'perf stat' output: [root@user]# perf stat -a \ -e access_mmu_stlb \ -e miss_mmu_stlb \ -e access_mmu_pte_c \ -e rob_flush \ -e btb_prediction_miss \ -e itlb_miss \ -e sync_del_fetch_g \ -e icache_miss \ -e bpu_br_retire \ -e bpu_br_miss \ -e ret_ins_retire \ -e ret_ins_miss \ -- openssl speed rsa2048 Doing 2048 bits private rsa's for 10s: 39 2048 bits private RSA's in 10.03s Doing 2048 bits public rsa's for 10s: 1469 2048 bits public RSA's in 9.47s version: 3.0.10 built on: Tue Aug 1 13:47:24 2023 UTC options: bn(64,64) CPUINFO: N/A sign verify sign/s verify/s rsa 2048 bits 0.257179s 0.006447s 3.9 155.1 Performance counter stats for 'system wide': 3112882 access_mmu_stlb 10550 miss_mmu_stlb 18251 access_mmu_pte_c 274765 rob_flush 22470560 btb_prediction_miss 3035839 itlb_miss 643549060 sync_del_fetch_g 133013 icache_miss 62982796 bpu_br_retire 287548 bpu_br_miss 8935910 ret_ins_retire 8308 ret_ins_miss 20.656182600 seconds time elapsed Reviewed-by: Ley Foon Tan Signed-off-by: Ji Sheng Teoh Cc: Adrian Hunter Cc: Albert Ou Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Nikita Shubin Cc: Palmer Dabbelt Cc: Paul Walmsley Cc: Peter Zijlstra Cc: linux-riscv@lists.infradead.org Link: https://lore.kernel.org/r/20231122030908.2981502-1-jisheng.teoh@starfivetech.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/riscv/mapfile.csv | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/pmu-events/arch/riscv/mapfile.csv b/tools/perf/pmu-events/arch/riscv/mapfile.csv index ee61e26f90cd..56b03138d46a 100644 --- a/tools/perf/pmu-events/arch/riscv/mapfile.csv +++ b/tools/perf/pmu-events/arch/riscv/mapfile.csv @@ -15,4 +15,4 @@ # #MVENDORID-MARCHID-MIMPID,Version,Filename,EventType 0x489-0x8000000000000007-0x[[:xdigit:]]+,v1,sifive/u74,core -0x67e-0x80000000db000080-0x[[:xdigit:]]+,v1,starfive/dubhe-80,core +0x67e-0x80000000db0000[89]0-0x[[:xdigit:]]+,v1,starfive/dubhe-80,core From 1638b11ef8156c8551f5aaa5799069633593c5fe Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Thu, 23 Nov 2023 21:32:32 +0530 Subject: [PATCH 047/179] perf tools: Add perf binary dependent rule for shellcheck log in Makefile.perf Add rule in new Makefile "tests/Makefile.tests" for running shellcheck on shell test scripts. This automates below shellcheck into the build. $ for F in $(find tests/shell/ -perm -o=x -name '*.sh'); do shellcheck -S warning $F; done Condition for shellcheck is added in Makefile.perf to avoid build breakage in the absence of shellcheck binary. Update Makefile.perf to contain new rule for "SHELLCHECK_TEST" which is for making shellcheck test as a dependency on perf binary. Added "tests/Makefile.tests" to run shellcheck on shellscripts in tests/shell. The make rule "SHLLCHECK_RUN" ensures that, every time during make, shellcheck will be run only on modified files during subsequent invocations. By this, if any newly added shell scripts or fixes in existing scripts breaks coding/formatting style, it will get captured during the perf build. Example build failure by modifying probe_vfs_getname.sh in tests/shell: In tests/shell/probe_vfs_getname.sh line 8: . $(dirname $0)/lib/probe.sh ^-----------^ SC2046 (warning): Quote this to prevent word splitting. For more information: https://www.shellcheck.net/wiki/SC2046 -- Quote this to prevent word splitt... make[3]: *** [/root/athira/perf-tools-next/tools/perf/tests/Makefile.tests:18: tests/shell/.probe_vfs_getname.sh.shellcheck_log] Error 1 make[2]: *** [Makefile.perf:686: SHELLCHECK_TEST] Error 2 make[2]: *** Waiting for unfinished jobs.... make[1]: *** [Makefile.perf:244: sub-make] Error 2 make: *** [Makefile:70: all] Error 2 Here, like other files which gets created during compilation (ex: .builtin-bench.o.cmd or .perf.o.cmd ), create .shellcheck_log also as a hidden file. Example: tests/shell/.probe_vfs_getname.sh.shellcheck_log shellcheck is re-run if any of the script gets modified based on its dependency of this log file. After this, for testing, changed "tests/shell/trace+probe_vfs_getname.sh" to break shellcheck format. In the next make run, it is also captured: In tests/shell/probe_vfs_getname.sh line 8: . $(dirname $0)/lib/probe.sh ^-----------^ SC2046 (warning): Quote this to prevent word splitting. For more information: https://www.shellcheck.net/wiki/SC2046 -- Quote this to prevent word splitt... make[3]: *** [/root/athira/perf-tools-next/tools/perf/tests/Makefile.tests:18: tests/shell/.probe_vfs_getname.sh.shellcheck_log] Error 1 make[3]: *** Waiting for unfinished jobs.... In tests/shell/trace+probe_vfs_getname.sh line 14: . $(dirname $0)/lib/probe.sh ^-----------^ SC2046 (warning): Quote this to prevent word splitting. For more information: https://www.shellcheck.net/wiki/SC2046 -- Quote this to prevent word splitt... make[3]: *** [/root/athira/perf-tools-next/tools/perf/tests/Makefile.tests:18: tests/shell/.trace+probe_vfs_getname.sh.shellcheck_log] Error 1 make[2]: *** [Makefile.perf:686: SHELLCHECK_TEST] Error 2 make[2]: *** Waiting for unfinished jobs.... make[1]: *** [Makefile.perf:244: sub-make] Error 2 make: *** [Makefile:70: all] Error 2 Failure log can be found in the stdout of make itself. This is reported at build time. To be able to go ahead with the build or disable shellcheck even though it is known that some test is broken, add a "NO_SHELLCHECK" option. Example: make NO_SHELLCHECK=1 INSTALL libsubcmd_headers INSTALL libsymbol_headers INSTALL libapi_headers INSTALL libperf_headers INSTALL libbpf_headers LINK perf Note: This is tested on RHEL and also SLES. Use below check: "$(shell which shellcheck 2> /dev/null)" to look for presence of shellcheck binary. The approach "shell command -v" is not used here. In some of the distros(RHEL), command is available as executable file (/usr/bin/command). But in some distros(SLES), it is a shell builtin and not available as executable file. Committer testing: $ type shellcheck shellcheck is hashed (/usr/bin/shellcheck) $ rpm -qf /usr/bin/shellcheck ShellCheck-0.9.0-2.fc38.x86_64 $ $ alias m $ git diff diff --git a/tools/perf/tests/shell/probe_vfs_getname.sh b/tools/perf/tests/shell/probe_vfs_getname.sh index 554e12e83c55fd56..dbc14634678e2bf6 100755 --- a/tools/perf/tests/shell/probe_vfs_getname.sh +++ b/tools/perf/tests/shell/probe_vfs_getname.sh @@ -5,7 +5,7 @@ # Arnaldo Carvalho de Melo , 2017 # shellcheck source=lib/probe.sh -. "$(dirname $0)"/lib/probe.sh +. $(dirname $0)/lib/probe.sh skip_if_no_perf_probe || exit 2 alias m='rm -rf ~/libexec/perf-core/ ; make -k CORESIGHT=1 O=/tmp/build/$(basename $PWD) -C tools/perf install-bin && perf test python' $ m make: Entering directory '/home/acme/git/perf-tools-next/tools/perf' BUILD: Doing 'make -j32' parallel build INSTALL libbpf_headers In tests/shell/probe_vfs_getname.sh line 8: . $(dirname $0)/lib/probe.sh ^-----------^ SC2046 (warning): Quote this to prevent word splitting. For more information: https://www.shellcheck.net/wiki/SC2046 -- Quote this to prevent word splitt... make[3]: *** [/home/acme/git/perf-tools-next/tools/perf/tests/Makefile.tests:18: tests/shell/.probe_vfs_getname.sh.shellcheck_log] Error 1 make[2]: *** [Makefile.perf:686: SHELLCHECK_TEST] Error 2 make[2]: *** Waiting for unfinished jobs.... make[1]: *** [Makefile.perf:244: sub-make] Error 2 make: *** [Makefile:113: install-bin] Error 2 make: Leaving directory '/home/acme/git/perf-tools-next/tools/perf' $ Reviewed-by: James Clark Signed-off-by: Athira Jajeev Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Disha Goel Cc: Ian Rogers Cc: Jiri Olsa Cc: Kajol Jain Cc: Madhavan Srinivasan Cc: Namhyung Kim Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/r/20231123160232.94253-1-atrajeev@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.perf | 21 ++++++++++++++++++++- tools/perf/tests/Makefile.tests | 22 ++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 tools/perf/tests/Makefile.tests diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index d80dcaa5a1e3..824cbc0af7d7 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -134,6 +134,8 @@ include ../scripts/utilities.mak # x86 instruction decoder - new instructions test # # Define GEN_VMLINUX_H to generate vmlinux.h from the BTF. +# +# Define NO_SHELLCHECK if you do not want to run shellcheck during build # As per kernel Makefile, avoid funny character set dependencies unexport LC_ALL @@ -671,7 +673,23 @@ $(PERF_IN): prepare FORCE $(PMU_EVENTS_IN): FORCE prepare $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=pmu-events -$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) +# Runs shellcheck on perf test shell scripts + +SHELLCHECK := $(shell which shellcheck 2> /dev/null) + +ifeq ($(NO_SHELLCHECK),1) +SHELLCHECK := +endif + +ifneq ($(SHELLCHECK),) +SHELLCHECK_TEST: FORCE prepare + $(Q)$(MAKE) -f $(srctree)/tools/perf/tests/Makefile.tests +else +SHELLCHECK_TEST: + @: +endif + +$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) SHELLCHECK_TEST $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) \ $(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@ @@ -1134,6 +1152,7 @@ bpf-skel-clean: $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS) clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean + $(Q)$(MAKE) -f $(srctree)/tools/perf/tests/Makefile.tests clean $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS) $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete $(Q)$(RM) $(OUTPUT).config-detected diff --git a/tools/perf/tests/Makefile.tests b/tools/perf/tests/Makefile.tests new file mode 100644 index 000000000000..fdaca5f7a946 --- /dev/null +++ b/tools/perf/tests/Makefile.tests @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +# Athira Rajeev , 2023 + +PROGS := $(shell find tests/shell -perm -o=x -type f -name '*.sh') +FILE_NAME := $(notdir $(PROGS)) +FILE_NAME := $(FILE_NAME:%=.%) +LOGS := $(join $(dir $(PROGS)),$(FILE_NAME)) +LOGS := $(LOGS:%=%.shellcheck_log) + +.PHONY: all +all: SHELLCHECK_RUN + @: + +SHELLCHECK_RUN: $(LOGS) + +.%.shellcheck_log: % + $(call rule_mkdir) + $(Q)$(call frecho-cmd,test)@shellcheck -S warning "$<" > $@ || (cat $@ && rm $@ && false) + +clean: + $(eval log_files := $(shell find . -name '.*.shellcheck_log')) + @rm -rf $(log_files) From 8aa1e6e29a21f6bb99dcaa64d11e97a21f0f9dc1 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Wed, 22 Nov 2023 10:27:03 +0100 Subject: [PATCH 048/179] perf report: Remove warning on missing raw data for s390 Command # ./perf report -i /tmp/111 -D > /dev/null emits an error message when a sample for event CRYPTO_ALL in the perf.data file does not contain any raw data. This is ok. Do not trigger this warning when the sample in the perf.data files does not contain any raw data at all. Check for availability of raw data for all events and return if none is available. Output before: # ./perf report -i /tmp/111 -D > /dev/null Invalid CRYPTO_ALL raw data encountered Invalid CRYPTO_ALL raw data encountered Invalid CRYPTO_ALL raw data encountered # Output after: # ./perf report -i /tmp/111 -D > /dev/null # Fixes: b539deafbadb2fc6 ("perf report: Add s390 raw data interpretation for PAI counters") Signed-off-by: Thomas Richter Acked-by: Namhyung Kim Acked-by: Sumanth Korikkar Cc: Heiko Carstens Cc: Sven Schnelle Cc: Thomas Richter Cc: Vasily Gorbik Link: https://lore.kernel.org/r/20231122092703.3163191-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/s390-sample-raw.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c index 29a744eeb71e..53383e97ec9d 100644 --- a/tools/perf/util/s390-sample-raw.c +++ b/tools/perf/util/s390-sample-raw.c @@ -51,8 +51,6 @@ static bool s390_cpumcfdg_testctr(struct perf_sample *sample) struct cf_trailer_entry *te; struct cf_ctrset_entry *cep, ce; - if (!len) - return false; while (offset < len) { cep = (struct cf_ctrset_entry *)(buf + offset); ce.def = be16_to_cpu(cep->def); @@ -234,10 +232,9 @@ struct pai_data { /* Event number and value */ */ static bool s390_pai_all_test(struct perf_sample *sample) { - unsigned char *buf = sample->raw_data; size_t len = sample->raw_size; - if (len < 0xa || !buf) + if (len < 0xa) return false; return true; } @@ -299,6 +296,10 @@ void evlist__s390_sample_raw(struct evlist *evlist, union perf_event *event, if (!evsel) return; + /* Check for raw data in sample */ + if (!sample->raw_size || !sample->raw_data) + return; + /* Display raw data on screen */ if (evsel->core.attr.config == PERF_EVENT_CPUM_CF_DIAG) { if (!evsel->pmu) From 70df07838fc1c0acfab3325ae79014e241a88bdf Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:41 +0200 Subject: [PATCH 049/179] perf header: Fix segfault on build_mem_topology() error path Do not increase the node count unless a node has been successfully read, because it can lead to a segfault if an error occurs. For example, if perf exceeds the open file limit in memory_node__read(), which, on a test system, could be made to happen by setting the file limit to exactly 32: Before: $ ulimit -n 32 $ perf mem record --all-user -- sleep 1 [ perf record: Woken up 1 times to write data ] failed: can't open memory sysfs data perf: Segmentation fault Obtained 14 stack frames. perf(sighandler_dump_stack+0x48) [0x55f4b1f59558] /lib/x86_64-linux-gnu/libc.so.6(+0x42520) [0x7f4ba1c42520] /lib/x86_64-linux-gnu/libc.so.6(free+0x1e) [0x7f4ba1ca53fe] perf(+0x178ff4) [0x55f4b1f48ff4] perf(+0x179a70) [0x55f4b1f49a70] perf(+0x17ef5d) [0x55f4b1f4ef5d] perf(+0x85c0b) [0x55f4b1e55c0b] perf(cmd_record+0xe1d) [0x55f4b1e5920d] perf(cmd_mem+0xc96) [0x55f4b1e80e56] perf(+0x130460) [0x55f4b1f00460] perf(main+0x689) [0x55f4b1e427d9] /lib/x86_64-linux-gnu/libc.so.6(+0x29d90) [0x7f4ba1c29d90] /lib/x86_64-linux-gnu/libc.so.6(__libc_start_main+0x80) [0x7f4ba1c29e40] perf(_start+0x25) [0x55f4b1e42a25] Segmentation fault (core dumped) $ After: $ ulimit -n 32 $ perf mem record --all-user -- sleep 1 [ perf record: Woken up 1 times to write data ] failed: can't open memory sysfs data [ perf record: Captured and wrote 0.005 MB perf.data (11 samples) ] $ Fixes: f8e502b9d1b3b197 ("perf header: Ensure bitmaps are freed") Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: Ian Rogers Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-2-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/header.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 1c687b5789c0..08cc2febabde 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1444,7 +1444,9 @@ static int build_mem_topology(struct memory_node **nodesp, u64 *cntp) nodes = new_nodes; size += 4; } - ret = memory_node__read(&nodes[cnt++], idx); + ret = memory_node__read(&nodes[cnt], idx); + if (!ret) + cnt += 1; } out: closedir(dir); From 96ba5999e8d86138cea90422f8c00309a7eedd3b Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:42 +0200 Subject: [PATCH 050/179] perf tests lib: Add perf_has_symbol.sh Some shell tests depend on finding symbols for perf itself, and fail if perf has been stripped and no debug object is available. Add helper functions to check if perf has a needed symbol. This is preparation for amending the tests themselves to be skipped if a needed symbol is not found. The functions make use of the "Symbols" test which reads and checks symbols from a dso, perf itself by default. Note the "Symbols" test will find symbols using the same method as other perf tests, including, for example, looking in the buildid cache. An alternative would be to prevent the needed symbols from being stripped, which seems to work with gcc's externally_visible attribute, but that attribute is not supported by clang. Another alternative would be to use option -Wl,-E (which is already used when perf is built with perl support) which causes the linker to add all (global) symbols to the dynamic symbol table. Then the required symbols need only be made global in scope to avoid being strippable. However that goes beyond what is needed. Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-3-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/lib/perf_has_symbol.sh | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 tools/perf/tests/shell/lib/perf_has_symbol.sh diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh new file mode 100644 index 000000000000..5d59c32ae3e7 --- /dev/null +++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh @@ -0,0 +1,21 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +perf_has_symbol() +{ + if perf test -vv "Symbols" 2>&1 | grep "[[:space:]]$1$"; then + echo "perf does have symbol '$1'" + return 0 + fi + echo "perf does not have symbol '$1'" + return 1 +} + +skip_test_missing_symbol() +{ + if ! perf_has_symbol "$1" ; then + echo "perf is missing symbols - skipping test" + exit 2 + fi + return 0 +} From c9526a735082bba57da322332cbcef1bbdff5698 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:43 +0200 Subject: [PATCH 051/179] perf tests: Skip pipe test if noploop symbol is missing perf pipe recording and injection test depends on finding symbol noploop in perf, and fails if perf has been stripped and no debug object is available. In that case, skip the test instead. Example: Before: $ strip tools/perf/perf $ tools/perf/perf buildid-cache -p `realpath tools/perf/perf` $ tools/perf/perf test -v pipe 86: perf pipe recording and injection test : --- start --- test child forked, pid 47734 [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.000 MB - ] 47741 47741 -1 |perf [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.000 MB - ] cannot find noploop function in pipe #1 test child finished with -1 ---- end ---- perf pipe recording and injection test: FAILED! After: $ tools/perf/perf test -v pipe 86: perf pipe recording and injection test : --- start --- test child forked, pid 48996 perf does not have symbol 'noploop' perf is missing symbols - skipping test test child finished with -2 ---- end ---- perf pipe recording and injection test: Skip Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-4-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/pipe_test.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/pipe_test.sh b/tools/perf/tests/shell/pipe_test.sh index 8dd115dd35a7..a78d35d2cff0 100755 --- a/tools/perf/tests/shell/pipe_test.sh +++ b/tools/perf/tests/shell/pipe_test.sh @@ -2,10 +2,17 @@ # perf pipe recording and injection test # SPDX-License-Identifier: GPL-2.0 +shelldir=$(dirname "$0") +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + +sym="noploop" + +skip_test_missing_symbol ${sym} + data=$(mktemp /tmp/perf.data.XXXXXX) prog="perf test -w noploop" task="perf" -sym="noploop" if ! perf record -e task-clock:u -o - ${prog} | perf report -i - --task | grep ${task}; then echo "cannot find the test file in the perf report" From 3c489dbe69c155c86c4460491d11520cf8ec3637 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:44 +0200 Subject: [PATCH 052/179] perf tests: Skip record test if test_loop symbol is missing perf record test depends on finding symbol test_loop in perf, and fails if perf has been stripped and no debug object is available. In that case, skip the test instead. Example: Note, building with perl support adds option -Wl,-E which causes the linker to add all (global) symbols to the dynamic symbol table. So the test_loop symbol, being global, does not get stripped unless NO_LIBPERL=1 Before: $ make NO_LIBPERL=1 -C tools/perf >/dev/null 2>&1 $ strip tools/perf/perf $ tools/perf/perf buildid-cache -p `realpath tools/perf/perf` $ tools/perf/perf test -v 'record tests' 91: perf record tests : --- start --- test child forked, pid 118750 Basic --per-thread mode test Per-thread record [Failed missing output] Register capture test Register capture test [Success] Basic --system-wide mode test System-wide record [Skipped not supported] Basic target workload test Workload record [Failed missing output] test child finished with -1 ---- end ---- perf record tests: FAILED! After: $ tools/perf/perf test -v 'record tests' 91: perf record tests : --- start --- test child forked, pid 120025 perf does not have symbol 'test_loop' perf is missing symbols - skipping test test child finished with -2 ---- end ---- perf record tests: Skip Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-5-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/record.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/record.sh b/tools/perf/tests/shell/record.sh index c74412c68e65..3d1a7759a7b2 100755 --- a/tools/perf/tests/shell/record.sh +++ b/tools/perf/tests/shell/record.sh @@ -8,10 +8,16 @@ shelldir=$(dirname "$0") # shellcheck source=lib/waiting.sh . "${shelldir}"/lib/waiting.sh +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + +testsym="test_loop" + +skip_test_missing_symbol ${testsym} + err=0 perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX) testprog="perf test -w thloop" -testsym="test_loop" cpu_pmu_dir="/sys/bus/event_source/devices/cpu*" br_cntr_file="/caps/branch_counter_nr" br_cntr_output="branch stack counters" From fc1de29a8b8ad46b590b2d389b53b4ecf9758273 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:45 +0200 Subject: [PATCH 053/179] perf tests: Skip Arm64 callgraphs test if leafloop symbol is missing The test "Check Arm64 callgraphs are complete in fp mode" depends on finding symbol leafloop in perf, and fails if perf has been stripped and no debug object is available. In that case, skip the test instead. Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-6-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/test_arm_callgraph_fp.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/perf/tests/shell/test_arm_callgraph_fp.sh b/tools/perf/tests/shell/test_arm_callgraph_fp.sh index 66dfdfdad553..e342e6c8aa50 100755 --- a/tools/perf/tests/shell/test_arm_callgraph_fp.sh +++ b/tools/perf/tests/shell/test_arm_callgraph_fp.sh @@ -2,8 +2,14 @@ # Check Arm64 callgraphs are complete in fp mode # SPDX-License-Identifier: GPL-2.0 +shelldir=$(dirname "$0") +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + lscpu | grep -q "aarch64" || exit 2 +skip_test_missing_symbol leafloop + PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX) TEST_PROGRAM="perf test -w leafloop" From fcfb5a6189f55669c931dce9fec85280655c515f Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:46 +0200 Subject: [PATCH 054/179] perf tests: Skip branch stack sampling test if brstack_bench symbol is missing The test "Check branch stack sampling" depends on finding symbol brstack_bench (and several others) in perf, and fails if perf has been stripped and no debug object is available. In that case, skip the test instead. Example: Before: $ strip tools/perf/perf $ tools/perf/perf buildid-cache -p `realpath tools/perf/perf` $ tools/perf/perf test -v 'branch stack sampling' 112: Check branch stack sampling : --- start --- test child forked, pid 123741 Testing user branch stack sampling + grep -E -m1 ^brstack_bench\+[^ ]*/brstack_foo\+[^ ]*/IND_CALL/.*$ /tmp/__perf_test.program.5Dz1U/perf.script + cleanup + rm -rf /tmp/__perf_test.program.5Dz1U test child finished with -1 ---- end ---- Check branch stack sampling: FAILED! After: $ tools/perf/perf test -v 'branch stack sampling' 112: Check branch stack sampling : --- start --- test child forked, pid 125157 perf does not have symbol 'brstack_bench' perf is missing symbols - skipping test test child finished with -2 ---- end ---- Check branch stack sampling: Skip Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-7-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/test_brstack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/perf/tests/shell/test_brstack.sh b/tools/perf/tests/shell/test_brstack.sh index 09908d71c994..5f14d0cb013f 100755 --- a/tools/perf/tests/shell/test_brstack.sh +++ b/tools/perf/tests/shell/test_brstack.sh @@ -4,6 +4,10 @@ # SPDX-License-Identifier: GPL-2.0 # German Gomez , 2022 +shelldir=$(dirname "$0") +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + # skip the test if the hardware doesn't support branch stack sampling # and if the architecture doesn't support filter types: any,save_type,u if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev/null 2>&1 ; then @@ -11,6 +15,8 @@ if ! perf record -o- --no-buildid --branch-filter any,save_type,u -- true > /dev exit 2 fi +skip_test_missing_symbol brstack_bench + TMPDIR=$(mktemp -d /tmp/__perf_test.program.XXXXX) TESTPROG="perf test -w brstack" From 3b24b15cf6fb2dbe1d009a52c9ddcb7721503d8f Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:47 +0200 Subject: [PATCH 055/179] perf tests: Make data symbol test wait for perf to start The perf data symbol test waits 1 second for perf to run and collect data, which may be too little if perf takes a long time to start up, which has been noticed on systems with many CPUs. Use existing wait_for_perf_to_start helper to wait for perf to start. Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-8-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/test_data_symbol.sh | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh index 69bb6fe86c50..e50e54e94f6f 100755 --- a/tools/perf/tests/shell/test_data_symbol.sh +++ b/tools/perf/tests/shell/test_data_symbol.sh @@ -4,6 +4,10 @@ # SPDX-License-Identifier: GPL-2.0 # Leo Yan , 2022 +shelldir=$(dirname "$0") +# shellcheck source=lib/waiting.sh +. "${shelldir}"/lib/waiting.sh + skip_if_no_mem_event() { perf mem record -e list 2>&1 | grep -E -q 'available' && return 0 return 2 @@ -13,6 +17,7 @@ skip_if_no_mem_event || exit 2 TEST_PROGRAM="perf test -w datasym" PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX) +ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX) check_result() { # The memory report format is as below: @@ -50,13 +55,15 @@ echo "Recording workload..." # specific CPU and test in per-CPU mode. is_amd=$(grep -E -c 'vendor_id.*AuthenticAMD' /proc/cpuinfo) if (($is_amd >= 1)); then - perf mem record -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM & + perf mem record -vvv -o ${PERF_DATA} -C 0 -- taskset -c 0 $TEST_PROGRAM 2>"${ERR_FILE}" & else - perf mem record --all-user -o ${PERF_DATA} -- $TEST_PROGRAM & + perf mem record -vvv --all-user -o ${PERF_DATA} -- $TEST_PROGRAM 2>"${ERR_FILE}" & fi PERFPID=$! +wait_for_perf_to_start ${PERFPID} "${ERR_FILE}" + sleep 1 kill $PERFPID From 124bf6360ad8fe9267017d10b5dd465d4af73247 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Thu, 23 Nov 2023 09:58:48 +0200 Subject: [PATCH 056/179] perf tests: Skip data symbol test if buf1 symbol is missing perf data symbol test depends on finding symbol buf1 in perf, and fails if perf has been stripped and no debug object is available. In that case, skip the test instead. Example: Before: $ strip tools/perf/perf $ tools/perf/perf buildid-cache -p `realpath tools/perf/perf` $ tools/perf/perf test -v 'data symbol' 113: Test data symbol : --- start --- test child forked, pid 125646 Recording workload... [ perf record: Woken up 3 times to write data ] [ perf record: Captured and wrote 0.577 MB /tmp/__perf_test.perf.data.Jhbdp (7794 samples) ] Cleaning up files... test child finished with -1 ---- end ---- Test data symbol: FAILED! After: $ tools/perf/perf test -v 'data symbol' 113: Test data symbol : --- start --- test child forked, pid 125747 perf does not have symbol 'buf1' perf is missing symbols - skipping test test child finished with -2 ---- end ---- Test data symbol: Skip Signed-off-by: Adrian Hunter Acked-by: Ian Rogers Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Leo Yan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123075848.9652-9-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/test_data_symbol.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/perf/tests/shell/test_data_symbol.sh b/tools/perf/tests/shell/test_data_symbol.sh index e50e54e94f6f..3dfa91832aa8 100755 --- a/tools/perf/tests/shell/test_data_symbol.sh +++ b/tools/perf/tests/shell/test_data_symbol.sh @@ -8,6 +8,9 @@ shelldir=$(dirname "$0") # shellcheck source=lib/waiting.sh . "${shelldir}"/lib/waiting.sh +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + skip_if_no_mem_event() { perf mem record -e list 2>&1 | grep -E -q 'available' && return 0 return 2 @@ -15,6 +18,8 @@ skip_if_no_mem_event() { skip_if_no_mem_event || exit 2 +skip_test_missing_symbol buf1 + TEST_PROGRAM="perf test -w datasym" PERF_DATA=$(mktemp /tmp/__perf_test.perf.data.XXXXX) ERR_FILE=$(mktemp /tmp/__perf_test.stderr.XXXXX) From 19dd49c9337a482325d4dd7000e328dac11f6b5c Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 9 Nov 2023 15:27:32 -0800 Subject: [PATCH 057/179] perf vendor events: Add skx, clx, icx and spr upi bandwidth metric Add upi_data_receive_bw metric for skylakex, cascadelakex, icelakex and sapphirerapids. The metric was added to perfmon metrics in: https://github.com/intel/perfmon/pull/119 Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Caleb Biggers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Perry Taylor Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231109232732.2973015-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- .../perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json | 6 ++++++ tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json | 6 ++++++ .../pmu-events/arch/x86/sapphirerapids/spr-metrics.json | 6 ++++++ tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json | 6 ++++++ 4 files changed, 24 insertions(+) diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json index 84c132af3dfa..8bc6c0707856 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json @@ -1862,6 +1862,12 @@ "MetricName": "uncore_frequency", "ScaleUnit": "1GHz" }, + { + "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)", + "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time", + "MetricName": "upi_data_receive_bw", + "ScaleUnit": "1MB/s" + }, { "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)", "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time", diff --git a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json index e98602c66707..71d78a7841ea 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/icx-metrics.json @@ -1846,6 +1846,12 @@ "MetricName": "uncore_frequency", "ScaleUnit": "1GHz" }, + { + "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)", + "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time", + "MetricName": "upi_data_receive_bw", + "ScaleUnit": "1MB/s" + }, { "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)", "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time", diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json index 06c6d67cb76b..e31a4aac9f20 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/spr-metrics.json @@ -1964,6 +1964,12 @@ "MetricName": "uncore_frequency", "ScaleUnit": "1GHz" }, + { + "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)", + "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time", + "MetricName": "upi_data_receive_bw", + "ScaleUnit": "1MB/s" + }, { "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)", "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time", diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json index 4a8f8eeb7525..ec3aa5ef00a3 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json @@ -1806,6 +1806,12 @@ "MetricName": "uncore_frequency", "ScaleUnit": "1GHz" }, + { + "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data receive bandwidth (MB/sec)", + "MetricExpr": "UNC_UPI_RxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time", + "MetricName": "upi_data_receive_bw", + "ScaleUnit": "1MB/s" + }, { "BriefDescription": "Intel(R) Ultra Path Interconnect (UPI) data transmit bandwidth (MB/sec)", "MetricExpr": "UNC_UPI_TxL_FLITS.ALL_DATA * 7.111111111111111 / 1e6 / duration_time", From 7340c6df49df1b261892d287444c255d0a378063 Mon Sep 17 00:00:00 2001 From: Inochi Amaoto Date: Wed, 22 Nov 2023 20:41:06 +0800 Subject: [PATCH 058/179] perf vendor events riscv: add T-HEAD C9xx JSON file Add JSON file of T-HEAD C9xx series events. The event idx (raw value) is summary as following: event id range | support cpu 0x01 - 0x2a | c906,c910,c920 The event ids are based on the public document of T-HEAD and cover the c900 series. These events are the max that c900 series support. Since T-HEAD let manufacturers decide whether events are usable, the final support of the perf events is determined by the pmu node of the soc dtb. Signed-off-by: Inochi Amaoto Tested-by: Guo Ren Acked-by: Palmer Dabbelt Cc: Adrian Hunter Cc: Albert Ou Cc: Alexander Shishkin Cc: Chen Wang Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Jisheng Zhang Cc: Mark Rutland Cc: Namhyung Kim Cc: Paul Walmsley Cc: Peter Zijlstra Cc: Wei Fu Cc: linux-riscv@lists.infradead.org Link: https://lore.kernel.org/r/IA1PR20MB495325FCF603BAA841E29281BBBAA@IA1PR20MB4953.namprd20.prod.outlook.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/riscv/mapfile.csv | 1 + .../arch/riscv/thead/c900-legacy/cache.json | 67 ++++++++++++++++ .../riscv/thead/c900-legacy/firmware.json | 68 ++++++++++++++++ .../riscv/thead/c900-legacy/instruction.json | 72 +++++++++++++++++ .../riscv/thead/c900-legacy/microarch.json | 80 +++++++++++++++++++ 5 files changed, 288 insertions(+) create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json create mode 100644 tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json diff --git a/tools/perf/pmu-events/arch/riscv/mapfile.csv b/tools/perf/pmu-events/arch/riscv/mapfile.csv index 56b03138d46a..cfc449b19810 100644 --- a/tools/perf/pmu-events/arch/riscv/mapfile.csv +++ b/tools/perf/pmu-events/arch/riscv/mapfile.csv @@ -15,4 +15,5 @@ # #MVENDORID-MARCHID-MIMPID,Version,Filename,EventType 0x489-0x8000000000000007-0x[[:xdigit:]]+,v1,sifive/u74,core +0x5b7-0x0-0x0,v1,thead/c900-legacy,core 0x67e-0x80000000db0000[89]0-0x[[:xdigit:]]+,v1,starfive/dubhe-80,core diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json new file mode 100644 index 000000000000..2b142348d635 --- /dev/null +++ b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/cache.json @@ -0,0 +1,67 @@ +[ + { + "EventName": "L1_ICACHE_ACCESS", + "EventCode": "0x00000001", + "BriefDescription": "L1 instruction cache access" + }, + { + "EventName": "L1_ICACHE_MISS", + "EventCode": "0x00000002", + "BriefDescription": "L1 instruction cache miss" + }, + { + "EventName": "ITLB_MISS", + "EventCode": "0x00000003", + "BriefDescription": "I-UTLB miss" + }, + { + "EventName": "DTLB_MISS", + "EventCode": "0x00000004", + "BriefDescription": "D-UTLB miss" + }, + { + "EventName": "JTLB_MISS", + "EventCode": "0x00000005", + "BriefDescription": "JTLB miss" + }, + { + "EventName": "L1_DCACHE_READ_ACCESS", + "EventCode": "0x0000000c", + "BriefDescription": "L1 data cache read access" + }, + { + "EventName": "L1_DCACHE_READ_MISS", + "EventCode": "0x0000000d", + "BriefDescription": "L1 data cache read miss" + }, + { + "EventName": "L1_DCACHE_WRITE_ACCESS", + "EventCode": "0x0000000e", + "BriefDescription": "L1 data cache write access" + }, + { + "EventName": "L1_DCACHE_WRITE_MISS", + "EventCode": "0x0000000f", + "BriefDescription": "L1 data cache write miss" + }, + { + "EventName": "LL_CACHE_READ_ACCESS", + "EventCode": "0x00000010", + "BriefDescription": "LL Cache read access" + }, + { + "EventName": "LL_CACHE_READ_MISS", + "EventCode": "0x00000011", + "BriefDescription": "LL Cache read miss" + }, + { + "EventName": "LL_CACHE_WRITE_ACCESS", + "EventCode": "0x00000012", + "BriefDescription": "LL Cache write access" + }, + { + "EventName": "LL_CACHE_WRITE_MISS", + "EventCode": "0x00000013", + "BriefDescription": "LL Cache write miss" + } +] diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json new file mode 100644 index 000000000000..9b4a032186a7 --- /dev/null +++ b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/firmware.json @@ -0,0 +1,68 @@ +[ + { + "ArchStdEvent": "FW_MISALIGNED_LOAD" + }, + { + "ArchStdEvent": "FW_MISALIGNED_STORE" + }, + { + "ArchStdEvent": "FW_ACCESS_LOAD" + }, + { + "ArchStdEvent": "FW_ACCESS_STORE" + }, + { + "ArchStdEvent": "FW_ILLEGAL_INSN" + }, + { + "ArchStdEvent": "FW_SET_TIMER" + }, + { + "ArchStdEvent": "FW_IPI_SENT" + }, + { + "ArchStdEvent": "FW_IPI_RECEIVED" + }, + { + "ArchStdEvent": "FW_FENCE_I_SENT" + }, + { + "ArchStdEvent": "FW_FENCE_I_RECEIVED" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_SENT" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_SFENCE_VMA_ASID_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_VMID_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_GVMA_VMID_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_RECEIVED" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_ASID_SENT" + }, + { + "ArchStdEvent": "FW_HFENCE_VVMA_ASID_RECEIVED" + } +] diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json new file mode 100644 index 000000000000..c822b5373333 --- /dev/null +++ b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/instruction.json @@ -0,0 +1,72 @@ +[ + { + "EventName": "INST_BRANCH_MISPREDICT", + "EventCode": "0x00000006", + "BriefDescription": "Mispredicted branch instructions" + }, + { + "EventName": "INST_BRANCH", + "EventCode": "0x00000007", + "BriefDescription": "Retired branch instructions" + }, + { + "EventName": "INST_JMP_MISPREDICT", + "EventCode": "0x00000008", + "BriefDescription": "Indirect branch mispredict" + }, + { + "EventName": "INST_JMP", + "EventCode": "0x00000009", + "BriefDescription": "Retired jmp instructions" + }, + { + "EventName": "INST_STORE", + "EventCode": "0x0000000b", + "BriefDescription": "Retired store instructions" + }, + { + "EventName": "INST_ALU", + "EventCode": "0x0000001d", + "BriefDescription": "Retired ALU instructions" + }, + { + "EventName": "INST_LDST", + "EventCode": "0x0000001e", + "BriefDescription": "Retired Load/Store instructions" + }, + { + "EventName": "INST_VECTOR", + "EventCode": "0x0000001f", + "BriefDescription": "Retired Vector instructions" + }, + { + "EventName": "INST_CSR", + "EventCode": "0x00000020", + "BriefDescription": "Retired CSR instructions" + }, + { + "EventName": "INST_SYNC", + "EventCode": "0x00000021", + "BriefDescription": "Retired sync instructions (AMO/LR/SC instructions)" + }, + { + "EventName": "INST_UNALIGNED_ACCESS", + "EventCode": "0x00000022", + "BriefDescription": "Retired Store/Load instructions with unaligned memory access" + }, + { + "EventName": "INST_ECALL", + "EventCode": "0x00000025", + "BriefDescription": "Retired ecall instructions" + }, + { + "EventName": "INST_LONG_JP", + "EventCode": "0x00000026", + "BriefDescription": "Retired long jump instructions" + }, + { + "EventName": "INST_FP", + "EventCode": "0x0000002a", + "BriefDescription": "Retired FPU instructions" + } +] diff --git a/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json new file mode 100644 index 000000000000..0ab6f288af91 --- /dev/null +++ b/tools/perf/pmu-events/arch/riscv/thead/c900-legacy/microarch.json @@ -0,0 +1,80 @@ +[ + { + "EventName": "LSU_SPEC_FAIL", + "EventCode": "0x0000000a", + "BriefDescription": "LSU speculation fail" + }, + { + "EventName": "IDU_RF_PIPE_FAIL", + "EventCode": "0x00000014", + "BriefDescription": "Instruction decode unit launch pipeline failed in RF state" + }, + { + "EventName": "IDU_RF_REG_FAIL", + "EventCode": "0x00000015", + "BriefDescription": "Instruction decode unit launch register file fail in RF state" + }, + { + "EventName": "IDU_RF_INSTRUCTION", + "EventCode": "0x00000016", + "BriefDescription": "retired instruction count of Instruction decode unit in RF (Register File) stage" + }, + { + "EventName": "LSU_4K_STALL", + "EventCode": "0x00000017", + "BriefDescription": "LSU stall times for long distance data access (Over 4K)", + "PublicDescription": "This stall occurs when translate virtual address with page offset over 4k" + }, + { + "EventName": "LSU_OTHER_STALL", + "EventCode": "0x00000018", + "BriefDescription": "LSU stall times for other reasons (except the 4k stall)" + }, + { + "EventName": "LSU_SQ_OTHER_DIS", + "EventCode": "0x00000019", + "BriefDescription": "LSU store queue discard others" + }, + { + "EventName": "LSU_SQ_DATA_DISCARD", + "EventCode": "0x0000001a", + "BriefDescription": "LSU store queue discard data (uops)" + }, + { + "EventName": "BRANCH_DIRECTION_MISPREDICTION", + "EventCode": "0x0000001b", + "BriefDescription": "Branch misprediction in BTB" + }, + { + "EventName": "BRANCH_DIRECTION_PREDICTION", + "EventCode": "0x0000001c", + "BriefDescription": "All branch prediction in BTB", + "PublicDescription": "This event including both successful prediction and failed prediction in BTB" + }, + { + "EventName": "INTERRUPT_ACK_COUNT", + "EventCode": "0x00000023", + "BriefDescription": "acknowledged interrupt count" + }, + { + "EventName": "INTERRUPT_OFF_CYCLE", + "EventCode": "0x00000024", + "BriefDescription": "PLIC arbitration time when the interrupt is not responded", + "PublicDescription": "The arbitration time is recorded while meeting any of the following:\n- CPU is M-mode and MIE == 0\n- CPU is S-mode and delegation and SIE == 0\n" + }, + { + "EventName": "IFU_STALLED_CYCLE", + "EventCode": "0x00000027", + "BriefDescription": "Number of stall cycles of the instruction fetch unit (IFU)." + }, + { + "EventName": "IDU_STALLED_CYCLE", + "EventCode": "0x00000028", + "BriefDescription": "hpcp_backend_stall Number of stall cycles of the instruction decoding unit (IDU) and next-level pipeline unit." + }, + { + "EventName": "SYNC_STALL", + "EventCode": "0x00000029", + "BriefDescription": "Sync instruction stall cycle fence/fence.i/sync/sfence" + } +] From ffa96259ca5f02bbcecd2c45831e7632cd6d3485 Mon Sep 17 00:00:00 2001 From: James Clark Date: Mon, 13 Nov 2023 10:23:24 +0000 Subject: [PATCH 059/179] perf test: Use existing config value for objdump path There is already an existing config value for changing the objdump path, so instead of having two values that do the same thing, make 'perf test' use annotate.objdump as well. Signed-off-by: James Clark Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Fangrui Song Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Peter Zijlstra Cc: Yang Jihong Cc: Yonghong Song Link: https://lore.kernel.org/r/ZU5Cx4LTrB5q0sIG@kernel.org Link: https://lore.kernel.org/r/20231113102327.695386-1-james.clark@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-config.txt | 8 ++------ tools/perf/tests/builtin-test.c | 2 +- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt index 16398babd1ef..379f9d7a8ab1 100644 --- a/tools/perf/Documentation/perf-config.txt +++ b/tools/perf/Documentation/perf-config.txt @@ -251,7 +251,8 @@ annotate.*:: addr2line binary to use for file names and line numbers. annotate.objdump:: - objdump binary to use for disassembly and annotations. + objdump binary to use for disassembly and annotations, + including in the 'perf test' command. annotate.disassembler_style:: Use this to change the default disassembler style to some other value @@ -722,11 +723,6 @@ session-.*:: Defines new record session for daemon. The value is record's command line without the 'record' keyword. -test.*:: - - test.objdump:: - objdump binary to use for disassembly and annotations. - SEE ALSO -------- linkperf:perf[1] diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 113e92119e1d..b8c21e81a021 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -518,7 +518,7 @@ static int run_workload(const char *work, int argc, const char **argv) static int perf_test__config(const char *var, const char *value, void *data __maybe_unused) { - if (!strcmp(var, "test.objdump")) + if (!strcmp(var, "annotate.objdump")) test_objdump_path = value; return 0; From 08973307d28311505b85216d724826e2ca21759e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Wed, 11 Oct 2023 20:50:25 -0700 Subject: [PATCH 060/179] perf annotate: Check if operand has multiple regs It needs to check all possible information in an instruction. Let's add a field indicating if the operand has multiple registers. I'll be used to search type information like in an array access on x86 like: mov 0x10(%rax,%rbx,8), %rcx ------------- here Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231012035111.676789-3-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 36 ++++++++++++++++++++++++++++++++++++ tools/perf/util/annotate.h | 2 ++ 2 files changed, 38 insertions(+) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 3364edf30f50..9a828dc601c7 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -85,6 +85,8 @@ struct arch { struct { char comment_char; char skip_functions_char; + char register_char; + char memory_ref_char; } objdump; }; @@ -188,6 +190,8 @@ static struct arch architectures[] = { .insn_suffix = "bwlq", .objdump = { .comment_char = '#', + .register_char = '%', + .memory_ref_char = '(', }, }, { @@ -566,6 +570,34 @@ static struct ins_ops lock_ops = { .scnprintf = lock__scnprintf, }; +/* + * Check if the operand has more than one registers like x86 SIB addressing: + * 0x1234(%rax, %rbx, 8) + * + * But it doesn't care segment selectors like %gs:0x5678(%rcx), so just check + * the input string after 'memory_ref_char' if exists. + */ +static bool check_multi_regs(struct arch *arch, const char *op) +{ + int count = 0; + + if (arch->objdump.register_char == 0) + return false; + + if (arch->objdump.memory_ref_char) { + op = strchr(op, arch->objdump.memory_ref_char); + if (op == NULL) + return false; + } + + while ((op = strchr(op, arch->objdump.register_char)) != NULL) { + count++; + op++; + } + + return count > 1; +} + static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms __maybe_unused) { char *s = strchr(ops->raw, ','), *target, *comment, prev; @@ -593,6 +625,8 @@ static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_sy if (ops->source.raw == NULL) return -1; + ops->source.multi_regs = check_multi_regs(arch, ops->source.raw); + target = skip_spaces(++s); comment = strchr(s, arch->objdump.comment_char); @@ -613,6 +647,8 @@ static int mov__parse(struct arch *arch, struct ins_operands *ops, struct map_sy if (ops->target.raw == NULL) goto out_free_source; + ops->target.multi_regs = check_multi_regs(arch, ops->target.raw); + if (comment == NULL) return 0; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index bc8b95e8b1be..b64a2be287b3 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -39,12 +39,14 @@ struct ins_operands { s64 offset; bool offset_avail; bool outside; + bool multi_regs; } target; union { struct { char *raw; char *name; u64 addr; + bool multi_regs; } source; struct { struct ins ins; From 72108c0b9c0e004d7dfc2fc88b02c30b12711325 Mon Sep 17 00:00:00 2001 From: Yang Jihong Date: Tue, 31 Oct 2023 10:55:23 +0000 Subject: [PATCH 061/179] perf tools: Add --debug-file option to redirect debug output Currently, debug messages is output to stderr, add --debug-file option to support redirection to a specified file. Some test scenarios: # perf --list-opts --help --version --exec-path --html-path --paginate --no-pager --debugfs-dir --buildid-dir --list-cmds --list-opts --debug --debug-file # perf --debug-file No path given for --debug-file. Usage: perf [--version] [--help] [OPTIONS] COMMAND [ARGS] # perf --debug-file /sys/perf.log record -v true Open debug file '/sys/perf.log' failed: Permission denied Usage: perf [--version] [--help] [OPTIONS] COMMAND [ARGS] # perf --debug-file /tmp/perf.log record -v true [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.013 MB perf.data (26 samples) ] # cat /tmp/perf.log DEBUGINFOD_URLS= Using CPUID GenuineIntel-6-3E-4 nr_cblocks: 0 affinity: SYS mmap flush: 1 comp level: 0 mmap size 528384B Control descriptor is not initialized mmap size 528384B Looking at the vmlinux_path (8 entries long) Using /proc/kcore for kernel data Using /proc/kallsyms for symbols symbol:unmap_start file:(null) line:0 offset:0 return:0 lazy:(null) symbol:unmap_complete file:(null) line:0 offset:0 return:0 lazy:(null) symbol:map_start file:(null) line:0 offset:0 return:0 lazy:(null) symbol:map_complete file:(null) line:0 offset:0 return:0 lazy:(null) symbol:reloc_start file:(null) line:0 offset:0 return:0 lazy:(null) symbol:reloc_complete file:(null) line:0 offset:0 return:0 lazy:(null) symbol:init_start file:(null) line:0 offset:0 return:0 lazy:(null) symbol:init_complete file:(null) line:0 offset:0 return:0 lazy:(null) symbol:lll_lock_wait_private file:(null) line:0 offset:0 return:0 lazy:(null) symbol:lll_lock_wait file:(null) line:0 offset:0 return:0 lazy:(null) symbol:setjmp file:(null) line:0 offset:0 return:0 lazy:(null) symbol:longjmp file:(null) line:0 offset:0 return:0 lazy:(null) symbol:longjmp_target file:(null) line:0 offset:0 return:0 lazy:(null) failed to write feature HYBRID_TOPOLOGY Signed-off-by: Yang Jihong Acked-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231031105523.1472558-1-yangjihong1@huawei.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf.txt | 3 +++ tools/perf/perf.c | 30 ++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/tools/perf/Documentation/perf.txt b/tools/perf/Documentation/perf.txt index ba3df49c169d..a7cf7bc2f968 100644 --- a/tools/perf/Documentation/perf.txt +++ b/tools/perf/Documentation/perf.txt @@ -64,6 +64,9 @@ OPTIONS perf-event-open - Print perf_event_open() arguments and return value +--debug-file:: + Write debug output to a specified file. + DESCRIPTION ----------- Performance counters for Linux are a new kernel-based subsystem diff --git a/tools/perf/perf.c b/tools/perf/perf.c index d3fc8090413c..921bee0a6437 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -39,6 +39,7 @@ #include static int use_pager = -1; +static FILE *debug_fp = NULL; struct cmd_struct { const char *cmd; @@ -162,6 +163,19 @@ static void commit_pager_choice(void) } } +static int set_debug_file(const char *path) +{ + debug_fp = fopen(path, "w"); + if (!debug_fp) { + fprintf(stderr, "Open debug file '%s' failed: %s\n", + path, strerror(errno)); + return -1; + } + + debug_set_file(debug_fp); + return 0; +} + struct option options[] = { OPT_ARGUMENT("help", "help"), OPT_ARGUMENT("version", "version"), @@ -174,6 +188,7 @@ struct option options[] = { OPT_ARGUMENT("list-cmds", "list-cmds"), OPT_ARGUMENT("list-opts", "list-opts"), OPT_ARGUMENT("debug", "debug"), + OPT_ARGUMENT("debug-file", "debug-file"), OPT_END() }; @@ -287,6 +302,18 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) (*argv)++; (*argc)--; + } else if (!strcmp(cmd, "--debug-file")) { + if (*argc < 2) { + fprintf(stderr, "No path given for --debug-file.\n"); + usage(perf_usage_string); + } + + if (set_debug_file((*argv)[1])) + usage(perf_usage_string); + + (*argv)++; + (*argc)--; + } else { fprintf(stderr, "Unknown option: %s\n", cmd); usage(perf_usage_string); @@ -547,5 +574,8 @@ int main(int argc, const char **argv) fprintf(stderr, "Failed to run command '%s': %s\n", cmd, str_error_r(errno, sbuf, sizeof(sbuf))); out: + if (debug_fp) + fclose(debug_fp); + return 1; } From d60469d7c0e5c4e8de10699377d2ba79004236a4 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Thu, 9 Nov 2023 15:59:51 -0800 Subject: [PATCH 062/179] perf dwarf-aux: Add die_find_variable_by_addr() The die_find_variable_by_addr() is to find a variables in the given DIE using given (PC-relative) address. Global variables will have a location expression with DW_OP_addr which has an address so can simply compare it with the address. <1><143a7>: Abbrev Number: 2 (DW_TAG_variable) <143a8> DW_AT_name : loops_per_jiffy <143ac> DW_AT_type : <0x1cca> <143b0> DW_AT_external : 1 <143b0> DW_AT_decl_file : 193 <143b1> DW_AT_decl_line : 213 <143b2> DW_AT_location : 9 byte block: 3 b0 46 41 82 ff ff ff ff (DW_OP_addr: ffffffff824146b0) Note that the type-offset should be calculated from the base address of the global variable. Signed-off-by: Namhyung Kim Acked-by: Masami Hiramatsu (Google) Cc: Adrian Hunter Cc: Andi Kleen Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231110000012.3538610-33-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/dwarf-aux.c | 79 +++++++++++++++++++++++++++++++++++++ tools/perf/util/dwarf-aux.h | 14 +++++++ 2 files changed, 93 insertions(+) diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index 652e6e7368a2..edd9e407bc74 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -1250,8 +1250,12 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf) struct find_var_data { /* Target instruction address */ Dwarf_Addr pc; + /* Target memory address (for global data) */ + Dwarf_Addr addr; /* Target register */ unsigned reg; + /* Access offset, set for global data */ + int offset; }; /* Max number of registers DW_OP_regN supports */ @@ -1312,6 +1316,81 @@ Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg, }; return die_find_child(sc_die, __die_find_var_reg_cb, &data, die_mem); } + +/* Only checks direct child DIEs in the given scope */ +static int __die_find_var_addr_cb(Dwarf_Die *die_mem, void *arg) +{ + struct find_var_data *data = arg; + int tag = dwarf_tag(die_mem); + ptrdiff_t off = 0; + Dwarf_Attribute attr; + Dwarf_Addr base, start, end; + Dwarf_Word size; + Dwarf_Die type_die; + Dwarf_Op *ops; + size_t nops; + + if (tag != DW_TAG_variable) + return DIE_FIND_CB_SIBLING; + + if (dwarf_attr(die_mem, DW_AT_location, &attr) == NULL) + return DIE_FIND_CB_SIBLING; + + while ((off = dwarf_getlocations(&attr, off, &base, &start, &end, &ops, &nops)) > 0) { + if (ops->atom != DW_OP_addr) + continue; + + if (data->addr < ops->number) + continue; + + if (data->addr == ops->number) { + /* Update offset relative to the start of the variable */ + data->offset = 0; + return DIE_FIND_CB_END; + } + + if (die_get_real_type(die_mem, &type_die) == NULL) + continue; + + if (dwarf_aggregate_size(&type_die, &size) < 0) + continue; + + if (data->addr >= ops->number + size) + continue; + + /* Update offset relative to the start of the variable */ + data->offset = data->addr - ops->number; + return DIE_FIND_CB_END; + } + return DIE_FIND_CB_SIBLING; +} + +/** + * die_find_variable_by_addr - Find variable located at given address + * @sc_die: a scope DIE + * @pc: the program address to find + * @addr: the data address to find + * @die_mem: a buffer to save the resulting DIE + * @offset: the offset in the resulting type + * + * Find the variable DIE located at the given address (in PC-relative mode). + * This is usually for global variables. + */ +Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc, + Dwarf_Addr addr, Dwarf_Die *die_mem, + int *offset) +{ + struct find_var_data data = { + .pc = pc, + .addr = addr, + }; + Dwarf_Die *result; + + result = die_find_child(sc_die, __die_find_var_addr_cb, &data, die_mem); + if (result) + *offset = data.offset; + return result; +} #endif /* diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index b6f430730bd1..0ddf61fd3f8b 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -141,6 +141,11 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf); Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die, Dwarf_Addr pc, int reg, Dwarf_Die *die_mem); +/* Find a (global) variable located in the 'addr' */ +Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die, Dwarf_Addr pc, + Dwarf_Addr addr, Dwarf_Die *die_mem, + int *offset); + #else /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ static inline int die_get_var_range(Dwarf_Die *sp_die __maybe_unused, @@ -158,6 +163,15 @@ static inline Dwarf_Die *die_find_variable_by_reg(Dwarf_Die *sc_die __maybe_unus return NULL; } +static inline Dwarf_Die *die_find_variable_by_addr(Dwarf_Die *sc_die __maybe_unused, + Dwarf_Addr pc __maybe_unused, + Dwarf_Addr addr __maybe_unused, + Dwarf_Die *die_mem __maybe_unused, + int *offset __maybe_unused) +{ + return NULL; +} + #endif /* HAVE_DWARF_GETLOCATIONS_SUPPORT */ #endif /* _DWARF_AUX_H */ From 5940a20a186bd74efd6d0dc0b2b7c77d891895d9 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 2 Nov 2023 10:56:46 -0700 Subject: [PATCH 063/179] perf mmap: Lazily initialize zstd streams to save memory when not using it Zstd streams create dictionaries that can require significant RAM, especially when there is one per-CPU. Tools like 'perf record' won't use the streams without the -z option, and so the creation of the streams is pure overhead. Switch to creating the streams on first use. Committer notes: ssize_t comes from sys/types.h, size_t from stddef.h. This worked on glibc as stdlib.h includes both, but not on musl libc. So do what 'man size_t' says and include sys/types.h and stddef.h instead of stdlib.h Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu (Google) Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231102175735.2272696-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 26 ++++++++++----- tools/perf/util/compress.h | 7 +++-- tools/perf/util/mmap.c | 5 ++- tools/perf/util/mmap.h | 1 - tools/perf/util/zstd.c | 63 +++++++++++++++++++------------------ 5 files changed, 59 insertions(+), 43 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 8ec818568662..9b4f3805ca92 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -270,7 +270,7 @@ static int record__write(struct record *rec, struct mmap *map __maybe_unused, static int record__aio_enabled(struct record *rec); static int record__comp_enabled(struct record *rec); -static size_t zstd_compress(struct perf_session *session, struct mmap *map, +static ssize_t zstd_compress(struct perf_session *session, struct mmap *map, void *dst, size_t dst_size, void *src, size_t src_size); #ifdef HAVE_AIO_SUPPORT @@ -405,9 +405,13 @@ static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size */ if (record__comp_enabled(aio->rec)) { - size = zstd_compress(aio->rec->session, NULL, aio->data + aio->size, - mmap__mmap_len(map) - aio->size, - buf, size); + ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size, + mmap__mmap_len(map) - aio->size, + buf, size); + if (compressed < 0) + return (int)compressed; + + size = compressed; } else { memcpy(aio->data + aio->size, buf, size); } @@ -633,7 +637,13 @@ static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size) struct record *rec = to; if (record__comp_enabled(rec)) { - size = zstd_compress(rec->session, map, map->data, mmap__mmap_len(map), bf, size); + ssize_t compressed = zstd_compress(rec->session, map, map->data, + mmap__mmap_len(map), bf, size); + + if (compressed < 0) + return (int)compressed; + + size = compressed; bf = map->data; } @@ -1527,10 +1537,10 @@ static size_t process_comp_header(void *record, size_t increment) return size; } -static size_t zstd_compress(struct perf_session *session, struct mmap *map, +static ssize_t zstd_compress(struct perf_session *session, struct mmap *map, void *dst, size_t dst_size, void *src, size_t src_size) { - size_t compressed; + ssize_t compressed; size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1; struct zstd_data *zstd_data = &session->zstd_data; @@ -1539,6 +1549,8 @@ static size_t zstd_compress(struct perf_session *session, struct mmap *map, compressed = zstd_compress_stream_to_records(zstd_data, dst, dst_size, src, src_size, max_record_size, process_comp_header); + if (compressed < 0) + return compressed; if (map && map->file) { thread->bytes_transferred += src_size; diff --git a/tools/perf/util/compress.h b/tools/perf/util/compress.h index 0cd3369af2a4..b29109cd3609 100644 --- a/tools/perf/util/compress.h +++ b/tools/perf/util/compress.h @@ -3,6 +3,8 @@ #define PERF_COMPRESS_H #include +#include +#include #ifdef HAVE_ZSTD_SUPPORT #include #endif @@ -21,6 +23,7 @@ struct zstd_data { #ifdef HAVE_ZSTD_SUPPORT ZSTD_CStream *cstream; ZSTD_DStream *dstream; + int comp_level; #endif }; @@ -29,7 +32,7 @@ struct zstd_data { int zstd_init(struct zstd_data *data, int level); int zstd_fini(struct zstd_data *data); -size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size, +ssize_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size, void *src, size_t src_size, size_t max_record_size, size_t process_header(void *record, size_t increment)); @@ -48,7 +51,7 @@ static inline int zstd_fini(struct zstd_data *data __maybe_unused) } static inline -size_t zstd_compress_stream_to_records(struct zstd_data *data __maybe_unused, +ssize_t zstd_compress_stream_to_records(struct zstd_data *data __maybe_unused, void *dst __maybe_unused, size_t dst_size __maybe_unused, void *src __maybe_unused, size_t src_size __maybe_unused, size_t max_record_size __maybe_unused, diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index 49093b21ee2d..122ee198a86e 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c @@ -295,15 +295,14 @@ int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu map->core.flush = mp->flush; - map->comp_level = mp->comp_level; #ifndef PYTHON_PERF - if (zstd_init(&map->zstd_data, map->comp_level)) { + if (zstd_init(&map->zstd_data, mp->comp_level)) { pr_debug2("failed to init mmap compressor, error %d\n", errno); return -1; } #endif - if (map->comp_level && !perf_mmap__aio_enabled(map)) { + if (mp->comp_level && !perf_mmap__aio_enabled(map)) { map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0); if (map->data == MAP_FAILED) { diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index f944c3cd5efa..0df6e1621c7e 100644 --- a/tools/perf/util/mmap.h +++ b/tools/perf/util/mmap.h @@ -39,7 +39,6 @@ struct mmap { #endif struct mmap_cpu_mask affinity_mask; void *data; - int comp_level; struct perf_data_file *file; struct zstd_data zstd_data; }; diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c index 48dd2b018c47..57027e0ac7b6 100644 --- a/tools/perf/util/zstd.c +++ b/tools/perf/util/zstd.c @@ -7,35 +7,9 @@ int zstd_init(struct zstd_data *data, int level) { - size_t ret; - - data->dstream = ZSTD_createDStream(); - if (data->dstream == NULL) { - pr_err("Couldn't create decompression stream.\n"); - return -1; - } - - ret = ZSTD_initDStream(data->dstream); - if (ZSTD_isError(ret)) { - pr_err("Failed to initialize decompression stream: %s\n", ZSTD_getErrorName(ret)); - return -1; - } - - if (!level) - return 0; - - data->cstream = ZSTD_createCStream(); - if (data->cstream == NULL) { - pr_err("Couldn't create compression stream.\n"); - return -1; - } - - ret = ZSTD_initCStream(data->cstream, level); - if (ZSTD_isError(ret)) { - pr_err("Failed to initialize compression stream: %s\n", ZSTD_getErrorName(ret)); - return -1; - } - + data->comp_level = level; + data->dstream = NULL; + data->cstream = NULL; return 0; } @@ -54,7 +28,7 @@ int zstd_fini(struct zstd_data *data) return 0; } -size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size, +ssize_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t dst_size, void *src, size_t src_size, size_t max_record_size, size_t process_header(void *record, size_t increment)) { @@ -63,6 +37,21 @@ size_t zstd_compress_stream_to_records(struct zstd_data *data, void *dst, size_t ZSTD_outBuffer output; void *record; + if (!data->cstream) { + data->cstream = ZSTD_createCStream(); + if (data->cstream == NULL) { + pr_err("Couldn't create compression stream.\n"); + return -1; + } + + ret = ZSTD_initCStream(data->cstream, data->comp_level); + if (ZSTD_isError(ret)) { + pr_err("Failed to initialize compression stream: %s\n", + ZSTD_getErrorName(ret)); + return -1; + } + } + while (input.pos < input.size) { record = dst; size = process_header(record, 0); @@ -96,6 +85,20 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size ZSTD_inBuffer input = { src, src_size, 0 }; ZSTD_outBuffer output = { dst, dst_size, 0 }; + if (!data->dstream) { + data->dstream = ZSTD_createDStream(); + if (data->dstream == NULL) { + pr_err("Couldn't create decompression stream.\n"); + return 0; + } + + ret = ZSTD_initDStream(data->dstream); + if (ZSTD_isError(ret)) { + pr_err("Failed to initialize decompression stream: %s\n", + ZSTD_getErrorName(ret)); + return 0; + } + } while (input.pos < input.size) { ret = ZSTD_decompressStream(data->dstream, &output, &input); if (ZSTD_isError(ret)) { From a472ee42e6f60c8714e2306385687922afcba8c4 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 29 Nov 2023 12:47:17 -0300 Subject: [PATCH 064/179] perf test sigtrap: Generalize the BTF routine to reuse it in this test Move the part that loads the BTF info to a "btf__available()" that will lazy load the BTF info so that if we need it for some other test, which we will in the following cset, we can reuse it. At some point this will move from this specific 'perf test' entry to be used in other parts of perf, do it when needed. Cc: Adrian Hunter Cc: Clark Williams Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Kate Carcia Cc: Namhyung Kim Cc: Thomas Gleixner Link: https://lore.kernel.org/r/20231129154718.326330-2-acme@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/sigtrap.c | 60 +++++++++++++++++++++++++------------- 1 file changed, 40 insertions(+), 20 deletions(-) diff --git a/tools/perf/tests/sigtrap.c b/tools/perf/tests/sigtrap.c index 1de7478ec189..a1bc7c776254 100644 --- a/tools/perf/tests/sigtrap.c +++ b/tools/perf/tests/sigtrap.c @@ -57,36 +57,51 @@ static struct perf_event_attr make_event_attr(void) #ifdef HAVE_BPF_SKEL #include +static struct btf *btf; + +static bool btf__available(void) +{ + if (btf == NULL) + btf = btf__load_vmlinux_btf(); + + return btf != NULL; +} + +static void btf__exit(void) +{ + btf__free(btf); + btf = NULL; +} + +static const struct btf_member *__btf_type__find_member_by_name(int type_id, const char *member_name) +{ + const struct btf_type *t = btf__type_by_id(btf, type_id); + const struct btf_member *m; + int i; + + for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { + const char *current_member_name = btf__name_by_offset(btf, m->name_off); + if (!strcmp(current_member_name, member_name)) + return m; + } + + return NULL; +} + static bool attr_has_sigtrap(void) { - bool ret = false; - struct btf *btf; - const struct btf_type *t; - const struct btf_member *m; - const char *name; - int i, id; + int id; - btf = btf__load_vmlinux_btf(); - if (btf == NULL) { + if (!btf__available()) { /* should be an old kernel */ return false; } id = btf__find_by_name_kind(btf, "perf_event_attr", BTF_KIND_STRUCT); if (id < 0) - goto out; + return false; - t = btf__type_by_id(btf, id); - for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { - name = btf__name_by_offset(btf, m->name_off); - if (!strcmp(name, "sigtrap")) { - ret = true; - break; - } - } -out: - btf__free(btf); - return ret; + return __btf_type__find_member_by_name(id, "sigtrap") != NULL; } #else /* !HAVE_BPF_SKEL */ static bool attr_has_sigtrap(void) @@ -109,6 +124,10 @@ static bool attr_has_sigtrap(void) return ret; } + +static void btf__exit(void) +{ +} #endif /* HAVE_BPF_SKEL */ static void @@ -221,6 +240,7 @@ static int test__sigtrap(struct test_suite *test __maybe_unused, int subtest __m sigaction(SIGTRAP, &oldact, NULL); out: pthread_barrier_destroy(&barrier); + btf__exit(); return ret; } From 650e0bde43f35bb675e87e30f679a57cfa22e0e5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 29 Nov 2023 12:47:18 -0300 Subject: [PATCH 065/179] perf tests sigtrap: Skip if running on a kernel with sleepable spinlocks There are issues as reported that need some more investigation on the RT kernel front, till that is addressed, skip this test. This test is already skipped for multiple hardware architectures where the tested kernel feature is not supported. Acked-by: Marco Elver Cc: Adrian Hunter Cc: Clark Williams Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Juri Lelli Cc: Kate Carcia Cc: Marco Elver Cc: Mike Galbraith Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Link: https://lore.kernel.org/all/e368f2c848d77fbc8d259f44e2055fe469c219cf.camel@gmx.de/ Link: https://lore.kernel.org/r/20231129154718.326330-3-acme@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/sigtrap.c | 46 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 44 insertions(+), 2 deletions(-) diff --git a/tools/perf/tests/sigtrap.c b/tools/perf/tests/sigtrap.c index a1bc7c776254..e6fd934b027a 100644 --- a/tools/perf/tests/sigtrap.c +++ b/tools/perf/tests/sigtrap.c @@ -103,6 +103,34 @@ static bool attr_has_sigtrap(void) return __btf_type__find_member_by_name(id, "sigtrap") != NULL; } + +static bool kernel_with_sleepable_spinlocks(void) +{ + const struct btf_member *member; + const struct btf_type *type; + const char *type_name; + int id; + + if (!btf__available()) + return false; + + id = btf__find_by_name_kind(btf, "spinlock", BTF_KIND_STRUCT); + if (id < 0) + return false; + + // Only RT has a "lock" member for "struct spinlock" + member = __btf_type__find_member_by_name(id, "lock"); + if (member == NULL) + return false; + + // But check its type as well + type = btf__type_by_id(btf, member->type); + if (!type || !btf_is_struct(type)) + return false; + + type_name = btf__name_by_offset(btf, type->name_off); + return type_name && !strcmp(type_name, "rt_mutex_base"); +} #else /* !HAVE_BPF_SKEL */ static bool attr_has_sigtrap(void) { @@ -125,6 +153,11 @@ static bool attr_has_sigtrap(void) return ret; } +static bool kernel_with_sleepable_spinlocks(void) +{ + return false; +} + static void btf__exit(void) { } @@ -166,7 +199,7 @@ static int run_test_threads(pthread_t *threads, pthread_barrier_t *barrier) static int run_stress_test(int fd, pthread_t *threads, pthread_barrier_t *barrier) { - int ret; + int ret, expected_sigtraps; ctx.iterate_on = 3000; @@ -175,7 +208,16 @@ static int run_stress_test(int fd, pthread_t *threads, pthread_barrier_t *barrie ret = run_test_threads(threads, barrier); TEST_ASSERT_EQUAL("disable failed", ioctl(fd, PERF_EVENT_IOC_DISABLE, 0), 0); - TEST_ASSERT_EQUAL("unexpected sigtraps", ctx.signal_count, NUM_THREADS * ctx.iterate_on); + expected_sigtraps = NUM_THREADS * ctx.iterate_on; + + if (ctx.signal_count < expected_sigtraps && kernel_with_sleepable_spinlocks()) { + pr_debug("Expected %d sigtraps, got %d, running on a kernel with sleepable spinlocks.\n", + expected_sigtraps, ctx.signal_count); + pr_debug("See https://lore.kernel.org/all/e368f2c848d77fbc8d259f44e2055fe469c219cf.camel@gmx.de/\n"); + return TEST_SKIP; + } else + TEST_ASSERT_EQUAL("unexpected sigtraps", ctx.signal_count, expected_sigtraps); + TEST_ASSERT_EQUAL("missing signals or incorrectly delivered", ctx.tids_want_signal, 0); TEST_ASSERT_VAL("unexpected si_addr", ctx.first_siginfo.si_addr == &ctx.iterate_on); #if 0 /* FIXME: enable when libc's signal.h has si_perf_{type,data} */ From 72a2a0a494ec9aefbca4ad64f46b8e3370809993 Mon Sep 17 00:00:00 2001 From: Likhitha Korrapati Date: Sun, 26 Nov 2023 02:09:14 -0500 Subject: [PATCH 066/179] perf test record+probe_libc_inet_pton: Fix call chain match on powerpc The perf test "probe libc's inet_pton & backtrace it with ping" fails on powerpc as below: # perf test -v "probe libc's inet_pton & backtrace it with ping" 85: probe libc's inet_pton & backtrace it with ping : --- start --- test child forked, pid 96028 ping 96056 [002] 127271.101961: probe_libc:inet_pton: (7fffa1779a60) 7fffa1779a60 __GI___inet_pton+0x0 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 7fffa172a73c getaddrinfo+0x121c (/usr/lib64/glibc-hwcaps/power10/libc.so.6) FAIL: expected backtrace entry "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\(/usr/lib64/glibc-hwcaps/power10/libc.so.6\)$" got "7fffa172a73c getaddrinfo+0x121c (/usr/lib64/glibc-hwcaps/power10/libc.so.6)" test child finished with -1 ---- end ---- probe libc's inet_pton & backtrace it with ping: FAILED! This test installs a probe on libc's inet_pton function, which will use uprobes and then uses perf trace on a ping to localhost. It gets 3 levels deep backtrace and checks whether it is what we expected or not. The test started failing from RHEL 9.4 where as it works in previous distro version (RHEL 9.2). Test expects gaih_inet function to be part of backtrace. But in the glibc version (2.34-86) which is part of distro where it fails, this function is missing and hence the test is failing. From nm and ping command output we can confirm that gaih_inet function is not present in the expected backtrace for glibc version glibc-2.34-86 [root@xxx perf]# nm /usr/lib64/glibc-hwcaps/power10/libc.so.6 | grep gaih_inet 00000000001273e0 t gaih_inet_serv 00000000001cd8d8 r gaih_inet_typeproto [root@xxx perf]# perf script -i /tmp/perf.data.6E8 ping 104048 [000] 128582.508976: probe_libc:inet_pton: (7fff83779a60) 7fff83779a60 __GI___inet_pton+0x0 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 7fff8372a73c getaddrinfo+0x121c (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 11dc73534 [unknown] (/usr/bin/ping) 7fff8362a8c4 __libc_start_call_main+0x84 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) FAIL: expected backtrace entry "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\(/usr/lib64/glibc-hwcaps/power10/libc.so.6\)$" got "7fff9d52a73c getaddrinfo+0x121c (/usr/lib64/glibc-hwcaps/power10/libc.so.6)" With version glibc-2.34-60 gaih_inet function is present as part of the expected backtrace. So we cannot just remove the gaih_inet function from the backtrace. [root@xxx perf]# nm /usr/lib64/glibc-hwcaps/power10/libc.so.6 | grep gaih_inet 0000000000130490 t gaih_inet.constprop.0 000000000012e830 t gaih_inet_serv 00000000001d45e4 r gaih_inet_typeproto [root@xxx perf]# ./perf script -i /tmp/perf.data.b6S ping 67906 [000] 22699.591699: probe_libc:inet_pton_3: (7fffbdd80820) 7fffbdd80820 __GI___inet_pton+0x0 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 7fffbdd31160 gaih_inet.constprop.0+0xcd0 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 7fffbdd31c7c getaddrinfo+0x14c (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 1140d3558 [unknown] (/usr/bin/ping) This patch solves this issue by doing a conditional skip. If there is a gaih_inet function present in the libc then it will be added to the expected backtrace else the function will be skipped from being added to the expected backtrace. Output with the patch [root@xxx perf]# ./perf test -v "probe libc's inet_pton & backtrace it with ping" 83: probe libc's inet_pton & backtrace it with ping : --- start --- test child forked, pid 102662 ping 102692 [000] 127935.549973: probe_libc:inet_pton: (7fff93379a60) 7fff93379a60 __GI___inet_pton+0x0 (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 7fff9332a73c getaddrinfo+0x121c (/usr/lib64/glibc-hwcaps/power10/libc.so.6) 11ef03534 [unknown] (/usr/bin/ping) test child finished with 0 ---- end ---- probe libc's inet_pton & backtrace it with ping: Ok Reported-by: Disha Goel Reviewed-by: Athira Jajeev Reviewed-by: Ian Rogers Signed-off-by: Likhitha Korrapati Tested-by: Disha Goel Cc: Adrian Hunter Cc: Disha Goel Cc: James Clark Cc: Jiri Olsa Cc: Kajol Jain Cc: Madhavan Srinivasan Cc: Namhyung Kim Cc: linuxppc-dev@lists.ozlabs.org Link: https://lore.kernel.org/r/20231126070914.175332-1-likhitha@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/record+probe_libc_inet_pton.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh index eebeea6bdc76..72c65570db37 100755 --- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh @@ -45,7 +45,10 @@ trace_libc_inet_pton_backtrace() { ;; ppc64|ppc64le) eventattr='max-stack=4' - echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected + # Add gaih_inet to expected backtrace only if it is part of libc. + if nm $libc | grep -F -q gaih_inet.; then + echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected + fi echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected echo ".*(\+0x[[:xdigit:]]+|\[unknown\])[[:space:]]\(.*/bin/ping.*\)$" >> $expected ;; From af76b2dec0984a079d8497bfa37d29a9b55932e1 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 30 Nov 2023 14:11:45 -0300 Subject: [PATCH 067/179] libapi: Add missing linux/types.h header to get the __u64 type on io.h There are functions using __u64, so we need to have the linux/types.h header otherwise we'll break when its not included before api/io.h. Fixes: e95770af4c4a280f ("tools api: Add a lightweight buffered reading api") Reviewed-by: Ian Rogers Cc: Adrian Hunter Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/ZWjDPL+IzPPsuC3X@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/api/io.h | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/lib/api/io.h b/tools/lib/api/io.h index a77b74c5fb65..2a7fe9758813 100644 --- a/tools/lib/api/io.h +++ b/tools/lib/api/io.h @@ -12,6 +12,7 @@ #include #include #include +#include struct io { /* File descriptor being read/ */ From 366efbff58092fac48421fa34018bb34c326088e Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 27 Nov 2023 14:08:14 -0800 Subject: [PATCH 068/179] libperf: Lazily allocate/size mmap event copy The event copy in the mmap is used to have storage to read an event. Not all users of mmaps read the events, such as perf record. The amount of buffer was also statically set to PERF_SAMPLE_MAX_SIZE rather than the amount necessary from the header's event size. Switch to a model where the event_copy is reallocated if too small to the event's size. This adds the potential for the event to move, so if a copy of the event pointer were stored it could be broken. All the current users do: while(event = perf_mmap__read_event()) { ... } and so they would be broken due to the event being overwritten if they had stored the pointer. Manual inspection and address sanitizer testing also shows the event pointer not being stored. Signed-off-by: Ian Rogers Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu (Google) Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231127220902.1315692-3-irogers@google.com [ Replace two lines with equivalent zfree(&map->event_copy) ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/include/internal/mmap.h | 3 ++- tools/lib/perf/mmap.c | 20 +++++++++++++++++--- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/tools/lib/perf/include/internal/mmap.h b/tools/lib/perf/include/internal/mmap.h index 5a062af8e9d8..5f08cab61ece 100644 --- a/tools/lib/perf/include/internal/mmap.h +++ b/tools/lib/perf/include/internal/mmap.h @@ -33,7 +33,8 @@ struct perf_mmap { bool overwrite; u64 flush; libperf_unmap_cb_t unmap_cb; - char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); + void *event_copy; + size_t event_copy_sz; struct perf_mmap *next; }; diff --git a/tools/lib/perf/mmap.c b/tools/lib/perf/mmap.c index 2184814b37dd..0c903c2372c9 100644 --- a/tools/lib/perf/mmap.c +++ b/tools/lib/perf/mmap.c @@ -19,6 +19,7 @@ void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev, bool overwrite, libperf_unmap_cb_t unmap_cb) { + /* Assume fields were zero initialized. */ map->fd = -1; map->overwrite = overwrite; map->unmap_cb = unmap_cb; @@ -51,13 +52,18 @@ int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp, void perf_mmap__munmap(struct perf_mmap *map) { - if (map && map->base != NULL) { + if (!map) + return; + + zfree(&map->event_copy); + map->event_copy_sz = 0; + if (map->base) { munmap(map->base, perf_mmap__mmap_len(map)); map->base = NULL; map->fd = -1; refcount_set(&map->refcnt, 0); } - if (map && map->unmap_cb) + if (map->unmap_cb) map->unmap_cb(map); } @@ -223,9 +229,17 @@ static union perf_event *perf_mmap__read(struct perf_mmap *map, */ if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) { unsigned int offset = *startp; - unsigned int len = min(sizeof(*event), size), cpy; + unsigned int len = size, cpy; void *dst = map->event_copy; + if (size > map->event_copy_sz) { + dst = realloc(map->event_copy, size); + if (!dst) + return NULL; + map->event_copy = dst; + map->event_copy_sz = size; + } + do { cpy = min(map->mask + 1 - (offset & map->mask), len); memcpy(dst, &data[offset & map->mask], cpy); From b6a15269cee255dc5fe75c249c701e3956d892cb Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 27 Nov 2023 14:08:16 -0800 Subject: [PATCH 069/179] tools api fs: Switch filename__read_str to use io.h filename__read_str() has its own string reading code that allocates memory before reading into it. The memory allocated is sized at BUFSIZ that is 8kb. Most strings are short and so most of this 8kb is wasted. Refactor io__getline(), as io__getdelim(), so that the newline character can be configurable and ignored in the case of filename__read_str(). Code like build_caches_for_cpu() in perf's header.c will read many strings and hold them in a data structure, in this case multiple strings per cache level per CPU. Using io.h's io__getline() avoids the wasted memory as strings are temporarily read into a buffer on the stack before being copied to a buffer that grows 128 bytes at a time and is never sized larger than the string. For a 16 hyperthread system the memory consumption of "perf record true" is reduced by 180kb, primarily through saving memory when reading the cache information. Signed-off-by: Ian Rogers Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu (Google) Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231127220902.1315692-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/api/fs/fs.c | 56 +++++++++++-------------------------------- tools/lib/api/io.h | 11 ++++++--- 2 files changed, 22 insertions(+), 45 deletions(-) diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c index 5cb0eeec2c8a..004f2af5504b 100644 --- a/tools/lib/api/fs/fs.c +++ b/tools/lib/api/fs/fs.c @@ -16,6 +16,7 @@ #include #include "fs.h" +#include "../io.h" #include "debug-internal.h" #define _STR(x) #x @@ -344,53 +345,24 @@ int filename__read_ull(const char *filename, unsigned long long *value) return filename__read_ull_base(filename, value, 0); } -#define STRERR_BUFSIZE 128 /* For the buffer size of strerror_r */ - int filename__read_str(const char *filename, char **buf, size_t *sizep) { - size_t size = 0, alloc_size = 0; - void *bf = NULL, *nbf; - int fd, n, err = 0; - char sbuf[STRERR_BUFSIZE]; + struct io io; + char bf[128]; + int err; - fd = open(filename, O_RDONLY); - if (fd < 0) + io.fd = open(filename, O_RDONLY); + if (io.fd < 0) return -errno; - - do { - if (size == alloc_size) { - alloc_size += BUFSIZ; - nbf = realloc(bf, alloc_size); - if (!nbf) { - err = -ENOMEM; - break; - } - - bf = nbf; - } - - n = read(fd, bf + size, alloc_size - size); - if (n < 0) { - if (size) { - pr_warn("read failed %d: %s\n", errno, - strerror_r(errno, sbuf, sizeof(sbuf))); - err = 0; - } else - err = -errno; - - break; - } - - size += n; - } while (n > 0); - - if (!err) { - *sizep = size; - *buf = bf; + io__init(&io, io.fd, bf, sizeof(bf)); + *buf = NULL; + err = io__getdelim(&io, buf, sizep, /*delim=*/-1); + if (err < 0) { + free(*buf); + *buf = NULL; } else - free(bf); - - close(fd); + err = 0; + close(io.fd); return err; } diff --git a/tools/lib/api/io.h b/tools/lib/api/io.h index 2a7fe9758813..84adf8102018 100644 --- a/tools/lib/api/io.h +++ b/tools/lib/api/io.h @@ -141,8 +141,8 @@ static inline int io__get_dec(struct io *io, __u64 *dec) } } -/* Read up to and including the first newline following the pattern of getline. */ -static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_len_out) +/* Read up to and including the first delim. */ +static inline ssize_t io__getdelim(struct io *io, char **line_out, size_t *line_len_out, int delim) { char buf[128]; int buf_pos = 0; @@ -152,7 +152,7 @@ static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_l /* TODO: reuse previously allocated memory. */ free(*line_out); - while (ch != '\n') { + while (ch != delim) { ch = io__get_char(io); if (ch < 0) @@ -185,4 +185,9 @@ static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_l return -ENOMEM; } +static inline ssize_t io__getline(struct io *io, char **line_out, size_t *line_len_out) +{ + return io__getdelim(io, line_out, line_len_out, /*delim=*/'\n'); +} + #endif /* __API_IO__ */ From f8846a1a3c54a53f1c30836a2b7143cfa5da223d Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 27 Nov 2023 14:08:17 -0800 Subject: [PATCH 070/179] tools api fs: Avoid reading whole file for a 1 byte bool sysfs__read_bool() used the first byte from a fully read file into a string. It then looked at the first byte's value. Avoid doing this and just read the first byte. Signed-off-by: Ian Rogers Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu (Google) Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231127220902.1315692-6-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/api/fs/fs.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c index 004f2af5504b..337fde770e45 100644 --- a/tools/lib/api/fs/fs.c +++ b/tools/lib/api/fs/fs.c @@ -447,15 +447,22 @@ int sysfs__read_str(const char *entry, char **buf, size_t *sizep) int sysfs__read_bool(const char *entry, bool *value) { - char *buf; - size_t size; - int ret; + struct io io; + char bf[16]; + int ret = 0; + char path[PATH_MAX]; + const char *sysfs = sysfs__mountpoint(); - ret = sysfs__read_str(entry, &buf, &size); - if (ret < 0) - return ret; + if (!sysfs) + return -1; - switch (buf[0]) { + snprintf(path, sizeof(path), "%s/%s", sysfs, entry); + io.fd = open(path, O_RDONLY); + if (io.fd < 0) + return -errno; + + io__init(&io, io.fd, bf, sizeof(bf)); + switch (io__get_char(&io)) { case '1': case 'y': case 'Y': @@ -469,8 +476,7 @@ int sysfs__read_bool(const char *entry, bool *value) default: ret = -1; } - - free(buf); + close(io.fd); return ret; } From 072b6ad7cac6a868e56dec5f48a2e67d9ab8cb6e Mon Sep 17 00:00:00 2001 From: Nick Forrington Date: Thu, 2 Nov 2023 16:11:16 +0000 Subject: [PATCH 071/179] perf docs: Fix man page formatting for 'perf lock' This makes "CONTENTION" a top level section (rather than a subsection of "INFO"). Fixes: 79079f21f50a501f ("perf lock: Add -k and -F options to 'contention' subcommand") Reviewed-by: James Clark Signed-off-by: Nick Forrington Tested-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231102161117.49533-1-nick.forrington@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-lock.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/Documentation/perf-lock.txt b/tools/perf/Documentation/perf-lock.txt index 503abcba1438..f5938d616d75 100644 --- a/tools/perf/Documentation/perf-lock.txt +++ b/tools/perf/Documentation/perf-lock.txt @@ -119,7 +119,7 @@ INFO OPTIONS CONTENTION OPTIONS --------------- +------------------ -k:: --key=:: From 556bed5c6d4167f9ffb5c8648cdd3c8e39aefec7 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 30 Nov 2023 18:46:36 -0300 Subject: [PATCH 072/179] perf beauty: Don't use 'find ... -printf' as it isn't available in busybox MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Namhyung reported: I'm seeing a build error on my Alpine linux image which uses busybox + musl libc: In file included from trace/beauty/arch_errno_names.c:1, from builtin-trace.c:899: /build/trace/beauty/generated/arch_errno_name_array.c: In function 'arch_syscalls__strerrno': /build/trace/beauty/generated/arch_errno_name_array.c:142:49: error: unused parameter 'arch' [-Werror=unused-parameter] 142 | const char *arch_syscalls__strerrno(const char *arch, int err) It looks like busybox find command doesn't have -printf option find: unrecognized: -printf , Yesterday 9:16 PM , BusyBox v1.36.1 (2023-07-27 17:12:24 UTC) multi-call binary. Usage: find [-HL] [PATH]... [OPTIONS] [ACTIONS] Search for files and perform actions on them. First failed action stops processing of current file. Defaults: PATH is current directory, action is '-print' So just remove it and pipe find's entry to a basename loop to produce the same result. Then use an alternative loop that relies on the shell to avoid needless forks and execs. The discussion about it generated the impetus to stop doing strcmps to find the right table at each errno to string translation but instead do this just once and then use a function pointer to the right arch specific table. Suggested-by: David Laight Reported-by: Namhyung Kim Cc: Adrian Hunter Cc: Hendrik Brueckner Cc: Ian Rogers Cc: Jiri Olsa Cc: Michael Petlan Cc: Thomas Richter Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/trace/beauty/arch_errno_names.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh index cc09dcaa891e..b6e0767b4b34 100755 --- a/tools/perf/trace/beauty/arch_errno_names.sh +++ b/tools/perf/trace/beauty/arch_errno_names.sh @@ -76,7 +76,9 @@ EoHEADER # Create list of architectures that have a specific errno.h. archlist="" -for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | sort -r); do +for f in $toolsdir/arch/*/include/uapi/asm/errno.h; do + d=${f%/include/uapi/asm/errno.h} + arch="${d##*/}" test -f $toolsdir/arch/$arch/include/uapi/asm/errno.h && archlist="$archlist $arch" done From 54373b5d53c1f6aa6164ee5bea4761abb16b351c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 1 Dec 2023 14:52:00 -0300 Subject: [PATCH 073/179] perf env: Introduce perf_env__arch_strerrno() That will cache the arch specific function translating error numbers to strings. Reviewed-by: Ian Rogers Cc: Adrian Hunter Cc: David Laight Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/20231201203046.486596-2-acme@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-trace.c | 6 ++---- tools/perf/util/env.c | 12 ++++++++++++ tools/perf/util/env.h | 1 + 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index e541d0e2777a..109b8e64fe69 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -2470,9 +2470,8 @@ static int trace__fprintf_callchain(struct trace *trace, struct perf_sample *sam static const char *errno_to_name(struct evsel *evsel, int err) { struct perf_env *env = evsel__env(evsel); - const char *arch_name = perf_env__arch(env); - return arch_syscalls__strerrno(arch_name, err); + return perf_env__arch_strerrno(env, err); } static int trace__sys_exit(struct trace *trace, struct evsel *evsel, @@ -4264,12 +4263,11 @@ static size_t thread__dump_stats(struct thread_trace *ttrace, printed += fprintf(fp, " %9.3f %9.2f%%\n", max, pct); if (trace->errno_summary && stats->nr_failures) { - const char *arch_name = perf_env__arch(trace->host->env); int e; for (e = 0; e < stats->max_errno; ++e) { if (stats->errnos[e] != 0) - fprintf(fp, "\t\t\t\t%s: %d\n", arch_syscalls__strerrno(arch_name, e + 1), stats->errnos[e]); + fprintf(fp, "\t\t\t\t%s: %d\n", perf_env__arch_strerrno(trace->host->env, e + 1), stats->errnos[e]); } } } diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index cbc18b22ace5..a632f33646bb 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -3,6 +3,7 @@ #include "debug.h" #include "env.h" #include "util/header.h" +#include "linux/compiler.h" #include #include #include "cgroup.h" @@ -12,6 +13,7 @@ #include #include "pmus.h" #include "strbuf.h" +#include "trace/beauty/beauty.h" struct perf_env perf_env; @@ -453,6 +455,16 @@ const char *perf_env__arch(struct perf_env *env) return normalize_arch(arch_name); } +const char *perf_env__arch_strerrno(struct perf_env *env __maybe_unused, int err __maybe_unused) +{ +#if defined(HAVE_SYSCALL_TABLE_SUPPORT) && defined(HAVE_LIBTRACEEVENT) + const char *arch_name = perf_env__arch(env); + return arch_syscalls__strerrno(arch_name, err); +#else + return "!(HAVE_SYSCALL_TABLE_SUPPORT && HAVE_LIBTRACEEVENT)"; +#endif +} + const char *perf_env__cpuid(struct perf_env *env) { int status; diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index 94596ff124d5..79f371879f45 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -164,6 +164,7 @@ int perf_env__read_cpu_topology_map(struct perf_env *env); void cpu_cache_level__free(struct cpu_cache_level *cache); const char *perf_env__arch(struct perf_env *env); +const char *perf_env__arch_strerrno(struct perf_env *env, int err); const char *perf_env__cpuid(struct perf_env *env); const char *perf_env__raw_arch(struct perf_env *env); int perf_env__nr_cpus_avail(struct perf_env *env); From 4acef67646f35e14d202d5f534c0fd68d7691b22 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 1 Dec 2023 17:07:34 -0300 Subject: [PATCH 074/179] perf env: Cache the arch specific strerrno function in perf_env__arch_strerrno() So that we don't have to go thru the series of strcmp(arch) calls for each id -> string translation. Reviewed-by: Ian Rogers Cc: Adrian Hunter Cc: David Laight Cc: Jiri Olsa Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/20231201203046.486596-3-acme@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/trace/beauty/arch_errno_names.sh | 6 +++--- tools/perf/trace/beauty/beauty.h | 2 -- tools/perf/util/env.c | 6 ++++-- tools/perf/util/env.h | 5 +++++ 4 files changed, 12 insertions(+), 7 deletions(-) diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh index b6e0767b4b34..7df4bf5b55a3 100755 --- a/tools/perf/trace/beauty/arch_errno_names.sh +++ b/tools/perf/trace/beauty/arch_errno_names.sh @@ -57,13 +57,13 @@ create_arch_errno_table_func() archlist="$1" default="$2" - printf 'const char *arch_syscalls__strerrno(const char *arch, int err)\n' + printf 'arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch)\n' printf '{\n' for arch in $archlist; do printf '\tif (!strcmp(arch, "%s"))\n' $(arch_string "$arch") - printf '\t\treturn errno_to_name__%s(err);\n' $(arch_string "$arch") + printf '\t\treturn errno_to_name__%s;\n' $(arch_string "$arch") done - printf '\treturn errno_to_name__%s(err);\n' $(arch_string "$default") + printf '\treturn errno_to_name__%s;\n' $(arch_string "$default") printf '}\n' } diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h index 788e8f6bd90e..9feb794f5c6e 100644 --- a/tools/perf/trace/beauty/beauty.h +++ b/tools/perf/trace/beauty/beauty.h @@ -251,6 +251,4 @@ size_t open__scnprintf_flags(unsigned long flags, char *bf, size_t size, bool sh void syscall_arg__set_ret_scnprintf(struct syscall_arg *arg, size_t (*ret_scnprintf)(char *bf, size_t size, struct syscall_arg *arg)); -const char *arch_syscalls__strerrno(const char *arch, int err); - #endif /* _PERF_TRACE_BEAUTY_H */ diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index a632f33646bb..c68b7a004f29 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -458,8 +458,10 @@ const char *perf_env__arch(struct perf_env *env) const char *perf_env__arch_strerrno(struct perf_env *env __maybe_unused, int err __maybe_unused) { #if defined(HAVE_SYSCALL_TABLE_SUPPORT) && defined(HAVE_LIBTRACEEVENT) - const char *arch_name = perf_env__arch(env); - return arch_syscalls__strerrno(arch_name, err); + if (env->arch_strerrno == NULL) + env->arch_strerrno = arch_syscalls__strerrno_function(perf_env__arch(env)); + + return env->arch_strerrno ? env->arch_strerrno(err) : "no arch specific strerrno function"; #else return "!(HAVE_SYSCALL_TABLE_SUPPORT && HAVE_LIBTRACEEVENT)"; #endif diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index 79f371879f45..bf7e3c4c211f 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -53,6 +53,10 @@ struct pmu_caps { char *pmu_name; }; +typedef const char *(arch_syscalls__strerrno_t)(int err); + +arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch); + struct perf_env { char *hostname; char *os_release; @@ -135,6 +139,7 @@ struct perf_env { */ bool enabled; } clock; + arch_syscalls__strerrno_t *arch_strerrno; }; enum perf_compress_type { From 28b01743ca752cea5ab182297d8b912b22f2a2d1 Mon Sep 17 00:00:00 2001 From: Veronika Molnarova Date: Fri, 1 Dec 2023 20:46:17 +0100 Subject: [PATCH 075/179] perf test record user-regs: Fix mask for vg register The 'vg' register for arm64 shows up in --user_regs as available when masking the variable AT_HWCAP with 1 << 22 returns '1' as done in perf_regs.c. However, in subtests for support of SVE, the check for the 'vg' register is done by masking the variable AT_HWCAP with the value 0x200000 which is equals to 1 << 21 instead of 1 << 22. This results in inconsistencies on certain systems where the test expects that the 'vg' register is not operational when it is, and vice-versa. During the testing on a machine that the test expected not to have the 'vg' register available, 'perf record' with the option --user-regs showed records for the 'vg' register together with all of the others, which means that the mask for the subtest of perf_event_attr is off by one. Change the value of the mask from 0x200000 to 0x400000 to correct it. Fixes: 9440ebdc333dd12e ("perf test arm64: Add attr tests for new VG register") Reviewed-by: Leo Yan Signed-off-by: Veronika Molnarova Cc: James Clark Cc: Michael Petlan Link: https://lore.kernel.org/r/20231201194617.13012-1-vmolnaro@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64 | 2 +- tools/perf/tests/attr/test-record-user-regs-sve-aarch64 | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64 b/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64 index fbb065842880..bed765450ca9 100644 --- a/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64 +++ b/tools/perf/tests/attr/test-record-user-regs-no-sve-aarch64 @@ -6,4 +6,4 @@ args = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1 ret = 129 test_ret = true arch = aarch64 -auxv = auxv["AT_HWCAP"] & 0x200000 == 0 +auxv = auxv["AT_HWCAP"] & 0x400000 == 0 diff --git a/tools/perf/tests/attr/test-record-user-regs-sve-aarch64 b/tools/perf/tests/attr/test-record-user-regs-sve-aarch64 index c598c803221d..a65113cd7311 100644 --- a/tools/perf/tests/attr/test-record-user-regs-sve-aarch64 +++ b/tools/perf/tests/attr/test-record-user-regs-sve-aarch64 @@ -6,7 +6,7 @@ args = --no-bpf-event --user-regs=vg kill >/dev/null 2>&1 ret = 1 test_ret = true arch = aarch64 -auxv = auxv["AT_HWCAP"] & 0x200000 == 0x200000 +auxv = auxv["AT_HWCAP"] & 0x400000 == 0x400000 kernel_since = 6.1 [event:base-record] From 10a149e4b4a9187940adbfff0f216ccb5a15aa41 Mon Sep 17 00:00:00 2001 From: Ilkka Koskinen Date: Thu, 30 Nov 2023 18:15:49 -0800 Subject: [PATCH 076/179] perf vendor events arm64 AmpereOne: Rename BPU_FLUSH_MEM_FAULT to GPC_FLUSH_MEM_FAULT The documentation wrongly called the event as BPU_FLUSH_MEM_FAULT and now has been fixed. Correct the name in the perf tool as well. Fixes: a9650b7f6fc09d16 ("perf vendor events arm64: Add AmpereOne core PMU events") Reviewed-by: Ian Rogers Signed-off-by: Ilkka Koskinen Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ilkka Koskinen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Leo Yan Cc: linux-arm-kernel@lists.infradead.org Cc: Mark Rutland Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Will Deacon Link: https://lore.kernel.org/r/20231201021550.1109196-3-ilkka@os.amperecomputing.com Signed-off-by: Arnaldo Carvalho de Melo --- .../pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json index 88b23b85e33c..879ff21e0b17 100644 --- a/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereone/core-imp-def.json @@ -110,7 +110,7 @@ { "PublicDescription": "Flushes due to memory hazards", "EventCode": "0x121", - "EventName": "BPU_FLUSH_MEM_FAULT", + "EventName": "GPC_FLUSH_MEM_FAULT", "BriefDescription": "Flushes due to memory hazards" }, { From 16438b652b464ef7d0a877d31e93ab54338f6b0a Mon Sep 17 00:00:00 2001 From: Ilkka Koskinen Date: Thu, 30 Nov 2023 18:15:50 -0800 Subject: [PATCH 077/179] perf vendor events arm64 AmpereOneX: Add core PMU events and metrics Add JSON files for AmpereOneX core PMU events and metrics. Reviewed-by: Ian Rogers Signed-off-by: Ilkka Koskinen Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: Leo Yan Cc: Mark Rutland Cc: Mike Leach Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20231201021550.1109196-4-ilkka@os.amperecomputing.com Signed-off-by: Arnaldo Carvalho de Melo --- .../arch/arm64/ampere/ampereonex/branch.json | 125 +++++ .../arch/arm64/ampere/ampereonex/bus.json | 20 + .../arch/arm64/ampere/ampereonex/cache.json | 206 ++++++++ .../arm64/ampere/ampereonex/core-imp-def.json | 464 ++++++++++++++++++ .../arm64/ampere/ampereonex/exception.json | 47 ++ .../arm64/ampere/ampereonex/instruction.json | 128 +++++ .../arm64/ampere/ampereonex/intrinsic.json | 14 + .../arch/arm64/ampere/ampereonex/memory.json | 41 ++ .../arch/arm64/ampere/ampereonex/metrics.json | 442 +++++++++++++++++ .../arch/arm64/ampere/ampereonex/mmu.json | 170 +++++++ .../arm64/ampere/ampereonex/pipeline.json | 41 ++ .../arch/arm64/ampere/ampereonex/spe.json | 14 + tools/perf/pmu-events/arch/arm64/mapfile.csv | 1 + 13 files changed, 1713 insertions(+) create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/branch.json create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/bus.json create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/core-imp-def.json create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/exception.json create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/instruction.json create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/intrinsic.json create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/memory.json create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/mmu.json create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/pipeline.json create mode 100644 tools/perf/pmu-events/arch/arm64/ampere/ampereonex/spe.json diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/branch.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/branch.json new file mode 100644 index 000000000000..a632755fc086 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/branch.json @@ -0,0 +1,125 @@ +[ + { + "ArchStdEvent": "BR_IMMED_SPEC" + }, + { + "ArchStdEvent": "BR_RETURN_SPEC" + }, + { + "ArchStdEvent": "BR_INDIRECT_SPEC" + }, + { + "ArchStdEvent": "BR_MIS_PRED" + }, + { + "ArchStdEvent": "BR_PRED" + }, + { + "PublicDescription": "Instruction architecturally executed, branch not taken", + "EventCode": "0x8107", + "EventName": "BR_SKIP_RETIRED", + "BriefDescription": "Instruction architecturally executed, branch not taken" + }, + { + "PublicDescription": "Instruction architecturally executed, immediate branch taken", + "EventCode": "0x8108", + "EventName": "BR_IMMED_TAKEN_RETIRED", + "BriefDescription": "Instruction architecturally executed, immediate branch taken" + }, + { + "PublicDescription": "Instruction architecturally executed, indirect branch excluding return retired", + "EventCode": "0x810c", + "EventName": "BR_INDNR_TAKEN_RETIRED", + "BriefDescription": "Instruction architecturally executed, indirect branch excluding return retired" + }, + { + "PublicDescription": "Instruction architecturally executed, predicted immediate branch", + "EventCode": "0x8110", + "EventName": "BR_IMMED_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, predicted immediate branch" + }, + { + "PublicDescription": "Instruction architecturally executed, mispredicted immediate branch", + "EventCode": "0x8111", + "EventName": "BR_IMMED_MIS_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, mispredicted immediate branch" + }, + { + "PublicDescription": "Instruction architecturally executed, predicted indirect branch", + "EventCode": "0x8112", + "EventName": "BR_IND_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, predicted indirect branch" + }, + { + "PublicDescription": "Instruction architecturally executed, mispredicted indirect branch", + "EventCode": "0x8113", + "EventName": "BR_IND_MIS_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, mispredicted indirect branch" + }, + { + "PublicDescription": "Instruction architecturally executed, predicted procedure return", + "EventCode": "0x8114", + "EventName": "BR_RETURN_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, predicted procedure return" + }, + { + "PublicDescription": "Instruction architecturally executed, mispredicted procedure return", + "EventCode": "0x8115", + "EventName": "BR_RETURN_MIS_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, mispredicted procedure return" + }, + { + "PublicDescription": "Instruction architecturally executed, predicted indirect branch excluding return", + "EventCode": "0x8116", + "EventName": "BR_INDNR_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, predicted indirect branch excluding return" + }, + { + "PublicDescription": "Instruction architecturally executed, mispredicted indirect branch excluding return", + "EventCode": "0x8117", + "EventName": "BR_INDNR_MIS_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, mispredicted indirect branch excluding return" + }, + { + "PublicDescription": "Instruction architecturally executed, predicted branch, taken", + "EventCode": "0x8118", + "EventName": "BR_TAKEN_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, predicted branch, taken" + }, + { + "PublicDescription": "Instruction architecturally executed, mispredicted branch, taken", + "EventCode": "0x8119", + "EventName": "BR_TAKEN_MIS_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, mispredicted branch, taken" + }, + { + "PublicDescription": "Instruction architecturally executed, predicted branch, not taken", + "EventCode": "0x811a", + "EventName": "BR_SKIP_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, predicted branch, not taken" + }, + { + "PublicDescription": "Instruction architecturally executed, mispredicted branch, not taken", + "EventCode": "0x811b", + "EventName": "BR_SKIP_MIS_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, mispredicted branch, not taken" + }, + { + "PublicDescription": "Instruction architecturally executed, predicted branch", + "EventCode": "0x811c", + "EventName": "BR_PRED_RETIRED", + "BriefDescription": "Instruction architecturally executed, predicted branch" + }, + { + "PublicDescription": "Instruction architecturally executed, indirect branch", + "EventCode": "0x811d", + "EventName": "BR_IND_RETIRED", + "BriefDescription": "Instruction architecturally executed, indirect branch" + }, + { + "PublicDescription": "Branch Record captured.", + "EventCode": "0x811f", + "EventName": "BRB_FILTRATE", + "BriefDescription": "Branch Record captured." + } +] diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/bus.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/bus.json new file mode 100644 index 000000000000..2aeb9907831d --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/bus.json @@ -0,0 +1,20 @@ +[ + { + "ArchStdEvent": "CPU_CYCLES" + }, + { + "ArchStdEvent": "BUS_CYCLES" + }, + { + "ArchStdEvent": "BUS_ACCESS_RD" + }, + { + "ArchStdEvent": "BUS_ACCESS_WR" + }, + { + "ArchStdEvent": "BUS_ACCESS" + }, + { + "ArchStdEvent": "CNT_CYCLES" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json new file mode 100644 index 000000000000..c50d8e930b05 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/cache.json @@ -0,0 +1,206 @@ +[ + { + "ArchStdEvent": "L1D_CACHE_RD" + }, + { + "ArchStdEvent": "L1D_CACHE_WR" + }, + { + "ArchStdEvent": "L1D_CACHE_REFILL_RD" + }, + { + "ArchStdEvent": "L1D_CACHE_INVAL" + }, + { + "ArchStdEvent": "L1D_TLB_REFILL_RD" + }, + { + "ArchStdEvent": "L1D_TLB_REFILL_WR" + }, + { + "ArchStdEvent": "L2D_CACHE_RD" + }, + { + "ArchStdEvent": "L2D_CACHE_WR" + }, + { + "ArchStdEvent": "L2D_CACHE_REFILL_RD" + }, + { + "ArchStdEvent": "L2D_CACHE_REFILL_WR" + }, + { + "ArchStdEvent": "L2D_CACHE_WB_VICTIM" + }, + { + "ArchStdEvent": "L2D_CACHE_WB_CLEAN" + }, + { + "ArchStdEvent": "L2D_CACHE_INVAL" + }, + { + "ArchStdEvent": "L1I_CACHE_REFILL" + }, + { + "ArchStdEvent": "L1I_TLB_REFILL" + }, + { + "ArchStdEvent": "L1D_CACHE_REFILL" + }, + { + "ArchStdEvent": "L1D_CACHE" + }, + { + "ArchStdEvent": "L1D_TLB_REFILL" + }, + { + "ArchStdEvent": "L1I_CACHE" + }, + { + "ArchStdEvent": "L2D_CACHE" + }, + { + "ArchStdEvent": "L2D_CACHE_REFILL" + }, + { + "ArchStdEvent": "L2D_CACHE_WB" + }, + { + "ArchStdEvent": "L1D_TLB" + }, + { + "ArchStdEvent": "L1I_TLB" + }, + { + "ArchStdEvent": "L2D_TLB_REFILL" + }, + { + "ArchStdEvent": "L2I_TLB_REFILL" + }, + { + "ArchStdEvent": "L2D_TLB" + }, + { + "ArchStdEvent": "L2I_TLB" + }, + { + "ArchStdEvent": "DTLB_WALK" + }, + { + "ArchStdEvent": "ITLB_WALK" + }, + { + "ArchStdEvent": "L1D_CACHE_REFILL_WR" + }, + { + "ArchStdEvent": "L1D_CACHE_LMISS_RD" + }, + { + "ArchStdEvent": "L1I_CACHE_LMISS" + }, + { + "ArchStdEvent": "L2D_CACHE_LMISS_RD" + }, + { + "PublicDescription": "Level 1 data or unified cache demand access", + "EventCode": "0x8140", + "EventName": "L1D_CACHE_RW", + "BriefDescription": "Level 1 data or unified cache demand access" + }, + { + "PublicDescription": "Level 1 data or unified cache preload or prefetch", + "EventCode": "0x8142", + "EventName": "L1D_CACHE_PRFM", + "BriefDescription": "Level 1 data or unified cache preload or prefetch" + }, + { + "PublicDescription": "Level 1 data or unified cache refill, preload or prefetch", + "EventCode": "0x8146", + "EventName": "L1D_CACHE_REFILL_PRFM", + "BriefDescription": "Level 1 data or unified cache refill, preload or prefetch" + }, + { + "ArchStdEvent": "L1D_TLB_RD" + }, + { + "ArchStdEvent": "L1D_TLB_WR" + }, + { + "ArchStdEvent": "L2D_TLB_REFILL_RD" + }, + { + "ArchStdEvent": "L2D_TLB_REFILL_WR" + }, + { + "ArchStdEvent": "L2D_TLB_RD" + }, + { + "ArchStdEvent": "L2D_TLB_WR" + }, + { + "PublicDescription": "L1D TLB miss", + "EventCode": "0xD600", + "EventName": "L1D_TLB_MISS", + "BriefDescription": "L1D TLB miss" + }, + { + "PublicDescription": "Level 1 prefetcher, load prefetch requests generated", + "EventCode": "0xd606", + "EventName": "L1_PREFETCH_LD_GEN", + "BriefDescription": "Level 1 prefetcher, load prefetch requests generated" + }, + { + "PublicDescription": "Level 1 prefetcher, load prefetch fills into the level 1 cache", + "EventCode": "0xd607", + "EventName": "L1_PREFETCH_LD_FILL", + "BriefDescription": "Level 1 prefetcher, load prefetch fills into the level 1 cache" + }, + { + "PublicDescription": "Level 1 prefetcher, load prefetch to level 2 generated", + "EventCode": "0xd608", + "EventName": "L1_PREFETCH_L2_REQ", + "BriefDescription": "Level 1 prefetcher, load prefetch to level 2 generated" + }, + { + "PublicDescription": "L1 prefetcher, distance was reset", + "EventCode": "0xd609", + "EventName": "L1_PREFETCH_DIST_RST", + "BriefDescription": "L1 prefetcher, distance was reset" + }, + { + "PublicDescription": "L1 prefetcher, distance was increased", + "EventCode": "0xd60a", + "EventName": "L1_PREFETCH_DIST_INC", + "BriefDescription": "L1 prefetcher, distance was increased" + }, + { + "PublicDescription": "Level 1 prefetcher, table entry is trained", + "EventCode": "0xd60b", + "EventName": "L1_PREFETCH_ENTRY_TRAINED", + "BriefDescription": "Level 1 prefetcher, table entry is trained" + }, + { + "PublicDescription": "L1 data cache refill - Read or Write", + "EventCode": "0xd60e", + "EventName": "L1D_CACHE_REFILL_RW", + "BriefDescription": "L1 data cache refill - Read or Write" + }, + { + "PublicDescription": "Level 2 cache refill from instruction-side miss, including IMMU refills", + "EventCode": "0xD701", + "EventName": "L2C_INST_REFILL", + "BriefDescription": "Level 2 cache refill from instruction-side miss, including IMMU refills" + }, + { + "PublicDescription": "Level 2 cache refill from data-side miss, including DMMU refills", + "EventCode": "0xD702", + "EventName": "L2C_DATA_REFILL", + "BriefDescription": "Level 2 cache refill from data-side miss, including DMMU refills" + }, + { + "PublicDescription": "Level 2 cache prefetcher, load prefetch requests generated", + "EventCode": "0xD703", + "EventName": "L2_PREFETCH_REQ", + "BriefDescription": "Level 2 cache prefetcher, load prefetch requests generated" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/core-imp-def.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/core-imp-def.json new file mode 100644 index 000000000000..eb5a2208d260 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/core-imp-def.json @@ -0,0 +1,464 @@ +[ + { + "PublicDescription": "Level 2 prefetch requests, refilled to L2 cache", + "EventCode": "0x10A", + "EventName": "L2_PREFETCH_REFILL", + "BriefDescription": "Level 2 prefetch requests, refilled to L2 cache" + }, + { + "PublicDescription": "Level 2 prefetch requests, late", + "EventCode": "0x10B", + "EventName": "L2_PREFETCH_UPGRADE", + "BriefDescription": "Level 2 prefetch requests, late" + }, + { + "PublicDescription": "Predictable branch speculatively executed that hit any level of BTB", + "EventCode": "0x110", + "EventName": "BPU_HIT_BTB", + "BriefDescription": "Predictable branch speculatively executed that hit any level of BTB" + }, + { + "PublicDescription": "Predictable conditional branch speculatively executed that hit any level of BTB", + "EventCode": "0x111", + "EventName": "BPU_CONDITIONAL_BRANCH_HIT_BTB", + "BriefDescription": "Predictable conditional branch speculatively executed that hit any level of BTB" + }, + { + "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor", + "EventCode": "0x112", + "EventName": "BPU_HIT_INDIRECT_PREDICTOR", + "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor" + }, + { + "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor", + "EventCode": "0x113", + "EventName": "BPU_HIT_RSB", + "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor" + }, + { + "PublicDescription": "Predictable unconditional branch speculatively executed that did not hit any level of BTB", + "EventCode": "0x114", + "EventName": "BPU_UNCONDITIONAL_BRANCH_MISS_BTB", + "BriefDescription": "Predictable unconditional branch speculatively executed that did not hit any level of BTB" + }, + { + "PublicDescription": "Predictable branch speculatively executed, unpredicted", + "EventCode": "0x115", + "EventName": "BPU_BRANCH_NO_HIT", + "BriefDescription": "Predictable branch speculatively executed, unpredicted" + }, + { + "PublicDescription": "Predictable branch speculatively executed that hit any level of BTB that mispredict", + "EventCode": "0x116", + "EventName": "BPU_HIT_BTB_AND_MISPREDICT", + "BriefDescription": "Predictable branch speculatively executed that hit any level of BTB that mispredict" + }, + { + "PublicDescription": "Predictable conditional branch speculatively executed that hit any level of BTB that (direction) mispredict", + "EventCode": "0x117", + "EventName": "BPU_CONDITIONAL_BRANCH_HIT_BTB_AND_MISPREDICT", + "BriefDescription": "Predictable conditional branch speculatively executed that hit any level of BTB that (direction) mispredict" + }, + { + "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor that mispredict", + "EventCode": "0x118", + "EventName": "BPU_INDIRECT_BRANCH_HIT_BTB_AND_MISPREDICT", + "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the indirect predictor that mispredict" + }, + { + "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor that mispredict", + "EventCode": "0x119", + "EventName": "BPU_HIT_RSB_AND_MISPREDICT", + "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the return predictor that mispredict" + }, + { + "PublicDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the overflow/underflow return predictor that mispredict", + "EventCode": "0x11a", + "EventName": "BPU_MISS_RSB_AND_MISPREDICT", + "BriefDescription": "Predictable taken branch speculatively executed that hit any level of BTB that access the overflow/underflow return predictor that mispredict" + }, + { + "PublicDescription": "Predictable branch speculatively executed, unpredicted, that mispredict", + "EventCode": "0x11b", + "EventName": "BPU_NO_PREDICTION_MISPREDICT", + "BriefDescription": "Predictable branch speculatively executed, unpredicted, that mispredict" + }, + { + "PublicDescription": "Preditable branch update the BTB region buffer entry", + "EventCode": "0x11c", + "EventName": "BPU_BTB_UPDATE", + "BriefDescription": "Preditable branch update the BTB region buffer entry" + }, + { + "PublicDescription": "Count predict pipe stalls due to speculative return address predictor full", + "EventCode": "0x11d", + "EventName": "BPU_RSB_FULL_STALL", + "BriefDescription": "Count predict pipe stalls due to speculative return address predictor full" + }, + { + "PublicDescription": "Macro-ops speculatively decoded", + "EventCode": "0x11f", + "EventName": "ICF_INST_SPEC_DECODE", + "BriefDescription": "Macro-ops speculatively decoded" + }, + { + "PublicDescription": "Flushes", + "EventCode": "0x120", + "EventName": "GPC_FLUSH", + "BriefDescription": "Flushes" + }, + { + "PublicDescription": "Flushes due to memory hazards", + "EventCode": "0x121", + "EventName": "GPC_FLUSH_MEM_FAULT", + "BriefDescription": "Flushes due to memory hazards" + }, + { + "PublicDescription": "ETM extout bit 0", + "EventCode": "0x141", + "EventName": "MSC_ETM_EXTOUT0", + "BriefDescription": "ETM extout bit 0" + }, + { + "PublicDescription": "ETM extout bit 1", + "EventCode": "0x142", + "EventName": "MSC_ETM_EXTOUT1", + "BriefDescription": "ETM extout bit 1" + }, + { + "PublicDescription": "ETM extout bit 2", + "EventCode": "0x143", + "EventName": "MSC_ETM_EXTOUT2", + "BriefDescription": "ETM extout bit 2" + }, + { + "PublicDescription": "ETM extout bit 3", + "EventCode": "0x144", + "EventName": "MSC_ETM_EXTOUT3", + "BriefDescription": "ETM extout bit 3" + }, + { + "PublicDescription": "Bus request sn", + "EventCode": "0x156", + "EventName": "L2C_SNOOP", + "BriefDescription": "Bus request sn" + }, + { + "PublicDescription": "L2 TXDAT LCRD blocked", + "EventCode": "0x169", + "EventName": "L2C_DAT_CRD_STALL", + "BriefDescription": "L2 TXDAT LCRD blocked" + }, + { + "PublicDescription": "L2 TXRSP LCRD blocked", + "EventCode": "0x16a", + "EventName": "L2C_RSP_CRD_STALL", + "BriefDescription": "L2 TXRSP LCRD blocked" + }, + { + "PublicDescription": "L2 TXREQ LCRD blocked", + "EventCode": "0x16b", + "EventName": "L2C_REQ_CRD_STALL", + "BriefDescription": "L2 TXREQ LCRD blocked" + }, + { + "PublicDescription": "Early mispredict", + "EventCode": "0xD100", + "EventName": "ICF_EARLY_MIS_PRED", + "BriefDescription": "Early mispredict" + }, + { + "PublicDescription": "FEQ full cycles", + "EventCode": "0xD101", + "EventName": "ICF_FEQ_FULL", + "BriefDescription": "FEQ full cycles" + }, + { + "PublicDescription": "Instruction FIFO Full", + "EventCode": "0xD102", + "EventName": "ICF_INST_FIFO_FULL", + "BriefDescription": "Instruction FIFO Full" + }, + { + "PublicDescription": "L1I TLB miss", + "EventCode": "0xD103", + "EventName": "L1I_TLB_MISS", + "BriefDescription": "L1I TLB miss" + }, + { + "PublicDescription": "ICF sent 0 instructions to IDR this cycle", + "EventCode": "0xD104", + "EventName": "ICF_STALL", + "BriefDescription": "ICF sent 0 instructions to IDR this cycle" + }, + { + "PublicDescription": "PC FIFO Full", + "EventCode": "0xD105", + "EventName": "ICF_PC_FIFO_FULL", + "BriefDescription": "PC FIFO Full" + }, + { + "PublicDescription": "Stall due to BOB ID", + "EventCode": "0xD200", + "EventName": "IDR_STALL_BOB_ID", + "BriefDescription": "Stall due to BOB ID" + }, + { + "PublicDescription": "Dispatch stall due to LOB entries", + "EventCode": "0xD201", + "EventName": "IDR_STALL_LOB_ID", + "BriefDescription": "Dispatch stall due to LOB entries" + }, + { + "PublicDescription": "Dispatch stall due to SOB entries", + "EventCode": "0xD202", + "EventName": "IDR_STALL_SOB_ID", + "BriefDescription": "Dispatch stall due to SOB entries" + }, + { + "PublicDescription": "Dispatch stall due to IXU scheduler entries", + "EventCode": "0xD203", + "EventName": "IDR_STALL_IXU_SCHED", + "BriefDescription": "Dispatch stall due to IXU scheduler entries" + }, + { + "PublicDescription": "Dispatch stall due to FSU scheduler entries", + "EventCode": "0xD204", + "EventName": "IDR_STALL_FSU_SCHED", + "BriefDescription": "Dispatch stall due to FSU scheduler entries" + }, + { + "PublicDescription": "Dispatch stall due to ROB entries", + "EventCode": "0xD205", + "EventName": "IDR_STALL_ROB_ID", + "BriefDescription": "Dispatch stall due to ROB entries" + }, + { + "PublicDescription": "Dispatch stall due to flush", + "EventCode": "0xD206", + "EventName": "IDR_STALL_FLUSH", + "BriefDescription": "Dispatch stall due to flush" + }, + { + "PublicDescription": "Dispatch stall due to WFI", + "EventCode": "0xD207", + "EventName": "IDR_STALL_WFI", + "BriefDescription": "Dispatch stall due to WFI" + }, + { + "PublicDescription": "Number of SWOB drains triggered by timeout", + "EventCode": "0xD208", + "EventName": "IDR_STALL_SWOB_TIMEOUT", + "BriefDescription": "Number of SWOB drains triggered by timeout" + }, + { + "PublicDescription": "Number of SWOB drains triggered by system register or special-purpose register read-after-write or specific special-purpose register writes that cause SWOB drain", + "EventCode": "0xD209", + "EventName": "IDR_STALL_SWOB_RAW", + "BriefDescription": "Number of SWOB drains triggered by system register or special-purpose register read-after-write or specific special-purpose register writes that cause SWOB drain" + }, + { + "PublicDescription": "Number of SWOB drains triggered by system register write when SWOB full", + "EventCode": "0xD20A", + "EventName": "IDR_STALL_SWOB_FULL", + "BriefDescription": "Number of SWOB drains triggered by system register write when SWOB full" + }, + { + "PublicDescription": "Dispatch stall due to L1 instruction cache miss", + "EventCode": "0xD20B", + "EventName": "STALL_FRONTEND_CACHE", + "BriefDescription": "Dispatch stall due to L1 instruction cache miss" + }, + { + "PublicDescription": "Dispatch stall due to L1 data cache miss", + "EventCode": "0xD20D", + "EventName": "STALL_BACKEND_CACHE", + "BriefDescription": "Dispatch stall due to L1 data cache miss" + }, + { + "PublicDescription": "Dispatch stall due to lack of any core resource", + "EventCode": "0xD20F", + "EventName": "STALL_BACKEND_RESOURCE", + "BriefDescription": "Dispatch stall due to lack of any core resource" + }, + { + "PublicDescription": "Instructions issued by the scheduler", + "EventCode": "0xD300", + "EventName": "IXU_NUM_UOPS_ISSUED", + "BriefDescription": "Instructions issued by the scheduler" + }, + { + "PublicDescription": "Any uop issued was canceled for any reason", + "EventCode": "0xD301", + "EventName": "IXU_ISSUE_CANCEL", + "BriefDescription": "Any uop issued was canceled for any reason" + }, + { + "PublicDescription": "A load wakeup to the scheduler has been canceled", + "EventCode": "0xD302", + "EventName": "IXU_LOAD_CANCEL", + "BriefDescription": "A load wakeup to the scheduler has been canceled" + }, + { + "PublicDescription": "The scheduler had to cancel one slow Uop due to resource conflict", + "EventCode": "0xD303", + "EventName": "IXU_SLOW_CANCEL", + "BriefDescription": "The scheduler had to cancel one slow Uop due to resource conflict" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXA", + "EventCode": "0xD304", + "EventName": "IXU_IXA_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXA" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXA Par 0", + "EventCode": "0xD305", + "EventName": "IXU_IXA_PAR0_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXA Par 0" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXA Par 1", + "EventCode": "0xD306", + "EventName": "IXU_IXA_PAR1_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXA Par 1" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXB", + "EventCode": "0xD307", + "EventName": "IXU_IXB_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXB" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXB Par 0", + "EventCode": "0xD308", + "EventName": "IXU_IXB_PAR0_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXB Par 0" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXB Par 1", + "EventCode": "0xD309", + "EventName": "IXU_IXB_PAR1_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXB Par 1" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXC", + "EventCode": "0xD30A", + "EventName": "IXU_IXC_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXC" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXC Par 0", + "EventCode": "0xD30B", + "EventName": "IXU_IXC_PAR0_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXC Par 0" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXC Par 1", + "EventCode": "0xD30C", + "EventName": "IXU_IXC_PAR1_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXC Par 1" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXD", + "EventCode": "0xD30D", + "EventName": "IXU_IXD_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXD" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXD Par 0", + "EventCode": "0xD30E", + "EventName": "IXU_IXD_PAR0_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXD Par 0" + }, + { + "PublicDescription": "Uops issued by the scheduler on IXD Par 1", + "EventCode": "0xD30F", + "EventName": "IXU_IXD_PAR1_ISSUED", + "BriefDescription": "Uops issued by the scheduler on IXD Par 1" + }, + { + "PublicDescription": "Uops issued by the FSU scheduler", + "EventCode": "0xD400", + "EventName": "FSU_ISSUED", + "BriefDescription": "Uops issued by the FSU scheduler" + }, + { + "PublicDescription": "Uops issued by the scheduler on FSX", + "EventCode": "0xD401", + "EventName": "FSU_FSX_ISSUED", + "BriefDescription": "Uops issued by the scheduler on FSX" + }, + { + "PublicDescription": "Uops issued by the scheduler on FSY", + "EventCode": "0xD402", + "EventName": "FSU_FSY_ISSUED", + "BriefDescription": "Uops issued by the scheduler on FSY" + }, + { + "PublicDescription": "Uops issued by the scheduler on FSZ", + "EventCode": "0xD403", + "EventName": "FSU_FSZ_ISSUED", + "BriefDescription": "Uops issued by the scheduler on FSZ" + }, + { + "PublicDescription": "Uops canceled (load cancels)", + "EventCode": "0xD404", + "EventName": "FSU_CANCEL", + "BriefDescription": "Uops canceled (load cancels)" + }, + { + "PublicDescription": "Count scheduler stalls due to divide/sqrt", + "EventCode": "0xD405", + "EventName": "FSU_DIV_SQRT_STALL", + "BriefDescription": "Count scheduler stalls due to divide/sqrt" + }, + { + "PublicDescription": "Number of SWOB drains", + "EventCode": "0xD500", + "EventName": "GPC_SWOB_DRAIN", + "BriefDescription": "Number of SWOB drains" + }, + { + "PublicDescription": "GPC detected a Breakpoint instruction match", + "EventCode": "0xD501", + "EventName": "BREAKPOINT_MATCH", + "BriefDescription": "GPC detected a Breakpoint instruction match" + }, + { + "PublicDescription": "Core progress monitor triggered", + "EventCode": "0xd502", + "EventName": "GPC_CPM_TRIGGER", + "BriefDescription": "Core progress monitor triggered" + }, + { + "PublicDescription": "Fill buffer full", + "EventCode": "0xD601", + "EventName": "OFB_FULL", + "BriefDescription": "Fill buffer full" + }, + { + "PublicDescription": "Load satisified from store forwarded data", + "EventCode": "0xD605", + "EventName": "LD_FROM_ST_FWD", + "BriefDescription": "Load satisified from store forwarded data" + }, + { + "PublicDescription": "Store retirement pipe stall", + "EventCode": "0xD60C", + "EventName": "LSU_ST_RETIRE_STALL", + "BriefDescription": "Store retirement pipe stall" + }, + { + "PublicDescription": "LSU detected a Watchpoint data match", + "EventCode": "0xD60D", + "EventName": "WATCHPOINT_MATCH", + "BriefDescription": "LSU detected a Watchpoint data match" + }, + { + "PublicDescription": "Counts cycles that MSC is telling GPC to stall commit due to ETM ISTALL feature", + "EventCode": "0xda00", + "EventName": "MSC_ETM_COMMIT_STALL", + "BriefDescription": "Counts cycles that MSC is telling GPC to stall commit due to ETM ISTALL feature" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/exception.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/exception.json new file mode 100644 index 000000000000..bd59ba7b74e4 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/exception.json @@ -0,0 +1,47 @@ +[ + { + "ArchStdEvent": "EXC_UNDEF" + }, + { + "ArchStdEvent": "EXC_SVC" + }, + { + "ArchStdEvent": "EXC_PABORT" + }, + { + "ArchStdEvent": "EXC_DABORT" + }, + { + "ArchStdEvent": "EXC_IRQ" + }, + { + "ArchStdEvent": "EXC_FIQ" + }, + { + "ArchStdEvent": "EXC_HVC" + }, + { + "ArchStdEvent": "EXC_TRAP_PABORT" + }, + { + "ArchStdEvent": "EXC_TRAP_DABORT" + }, + { + "ArchStdEvent": "EXC_TRAP_OTHER" + }, + { + "ArchStdEvent": "EXC_TRAP_IRQ" + }, + { + "ArchStdEvent": "EXC_TRAP_FIQ" + }, + { + "ArchStdEvent": "EXC_TAKEN" + }, + { + "ArchStdEvent": "EXC_RETURN" + }, + { + "ArchStdEvent": "EXC_SMC" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/instruction.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/instruction.json new file mode 100644 index 000000000000..a6a20f541e33 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/instruction.json @@ -0,0 +1,128 @@ +[ + { + "ArchStdEvent": "SW_INCR" + }, + { + "ArchStdEvent": "ST_RETIRED" + }, + { + "ArchStdEvent": "LD_SPEC" + }, + { + "ArchStdEvent": "ST_SPEC" + }, + { + "ArchStdEvent": "LDST_SPEC" + }, + { + "ArchStdEvent": "DP_SPEC" + }, + { + "ArchStdEvent": "ASE_SPEC" + }, + { + "ArchStdEvent": "VFP_SPEC" + }, + { + "ArchStdEvent": "PC_WRITE_SPEC" + }, + { + "ArchStdEvent": "BR_IMMED_RETIRED" + }, + { + "ArchStdEvent": "BR_RETURN_RETIRED" + }, + { + "ArchStdEvent": "CRYPTO_SPEC" + }, + { + "ArchStdEvent": "ISB_SPEC" + }, + { + "ArchStdEvent": "DSB_SPEC" + }, + { + "ArchStdEvent": "DMB_SPEC" + }, + { + "ArchStdEvent": "RC_LD_SPEC" + }, + { + "ArchStdEvent": "RC_ST_SPEC" + }, + { + "ArchStdEvent": "INST_RETIRED" + }, + { + "ArchStdEvent": "CID_WRITE_RETIRED" + }, + { + "ArchStdEvent": "PC_WRITE_RETIRED" + }, + { + "ArchStdEvent": "INST_SPEC" + }, + { + "ArchStdEvent": "TTBR_WRITE_RETIRED" + }, + { + "ArchStdEvent": "BR_RETIRED" + }, + { + "ArchStdEvent": "BR_MIS_PRED_RETIRED" + }, + { + "ArchStdEvent": "OP_RETIRED" + }, + { + "ArchStdEvent": "OP_SPEC" + }, + { + "PublicDescription": "Operation speculatively executed - ASE Scalar", + "EventCode": "0xd210", + "EventName": "ASE_SCALAR_SPEC", + "BriefDescription": "Operation speculatively executed - ASE Scalar" + }, + { + "PublicDescription": "Operation speculatively executed - ASE Vector", + "EventCode": "0xd211", + "EventName": "ASE_VECTOR_SPEC", + "BriefDescription": "Operation speculatively executed - ASE Vector" + }, + { + "PublicDescription": "Barrier speculatively executed, CSDB", + "EventCode": "0x7f", + "EventName": "CSDB_SPEC", + "BriefDescription": "Barrier speculatively executed, CSDB" + }, + { + "PublicDescription": "Prefetch sent to L2.", + "EventCode": "0xd106", + "EventName": "ICF_PREFETCH_DISPATCH", + "BriefDescription": "Prefetch sent to L2." + }, + { + "PublicDescription": "Prefetch response received but was dropped since we don't support inflight upgrades.", + "EventCode": "0xd107", + "EventName": "ICF_PREFETCH_DROPPED_NO_UPGRADE", + "BriefDescription": "Prefetch response received but was dropped since we don't support inflight upgrades." + }, + { + "PublicDescription": "Prefetch request missed TLB.", + "EventCode": "0xd108", + "EventName": "ICF_PREFETCH_DROPPED_TLB_MISS", + "BriefDescription": "Prefetch request missed TLB." + }, + { + "PublicDescription": "Prefetch request dropped since duplicate was found in TLB.", + "EventCode": "0xd109", + "EventName": "ICF_PREFETCH_DROPPED_DUPLICATE", + "BriefDescription": "Prefetch request dropped since duplicate was found in TLB." + }, + { + "PublicDescription": "Prefetch request dropped since it was found in cache.", + "EventCode": "0xd10a", + "EventName": "ICF_PREFETCH_DROPPED_CACHE_HIT", + "BriefDescription": "Prefetch request dropped since it was found in cache." + } +] diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/intrinsic.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/intrinsic.json new file mode 100644 index 000000000000..7ecffb989ae0 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/intrinsic.json @@ -0,0 +1,14 @@ +[ + { + "ArchStdEvent": "LDREX_SPEC" + }, + { + "ArchStdEvent": "STREX_PASS_SPEC" + }, + { + "ArchStdEvent": "STREX_FAIL_SPEC" + }, + { + "ArchStdEvent": "STREX_SPEC" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/memory.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/memory.json new file mode 100644 index 000000000000..a211d94aacde --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/memory.json @@ -0,0 +1,41 @@ +[ + { + "ArchStdEvent": "LD_RETIRED" + }, + { + "ArchStdEvent": "MEM_ACCESS_RD" + }, + { + "ArchStdEvent": "MEM_ACCESS_WR" + }, + { + "ArchStdEvent": "LD_ALIGN_LAT" + }, + { + "ArchStdEvent": "ST_ALIGN_LAT" + }, + { + "ArchStdEvent": "MEM_ACCESS" + }, + { + "ArchStdEvent": "MEMORY_ERROR" + }, + { + "ArchStdEvent": "LDST_ALIGN_LAT" + }, + { + "ArchStdEvent": "MEM_ACCESS_CHECKED" + }, + { + "ArchStdEvent": "MEM_ACCESS_CHECKED_RD" + }, + { + "ArchStdEvent": "MEM_ACCESS_CHECKED_WR" + }, + { + "PublicDescription": "Flushes due to memory hazards", + "EventCode": "0x121", + "EventName": "BPU_FLUSH_MEM_FAULT", + "BriefDescription": "Flushes due to memory hazards" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json new file mode 100644 index 000000000000..c5d1d22bd034 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/metrics.json @@ -0,0 +1,442 @@ +[ + { + "MetricName": "branch_miss_pred_rate", + "MetricExpr": "BR_MIS_PRED / BR_PRED", + "BriefDescription": "Branch predictor misprediction rate. May not count branches that are never resolved because they are in the misprediction shadow of an earlier branch", + "MetricGroup": "branch", + "ScaleUnit": "100%" + }, + { + "MetricName": "bus_utilization", + "MetricExpr": "BUS_ACCESS / (BUS_CYCLES * 1)", + "BriefDescription": "Core-to-uncore bus utilization", + "MetricGroup": "Bus", + "ScaleUnit": "100percent of bus cycles" + }, + { + "MetricName": "l1d_cache_miss_ratio", + "MetricExpr": "L1D_CACHE_REFILL / L1D_CACHE", + "BriefDescription": "This metric measures the ratio of level 1 data cache accesses missed to the total number of level 1 data cache accesses. This gives an indication of the effectiveness of the level 1 data cache.", + "MetricGroup": "Miss_Ratio;L1D_Cache_Effectiveness", + "ScaleUnit": "1per cache access" + }, + { + "MetricName": "l1i_cache_miss_ratio", + "MetricExpr": "L1I_CACHE_REFILL / L1I_CACHE", + "BriefDescription": "This metric measures the ratio of level 1 instruction cache accesses missed to the total number of level 1 instruction cache accesses. This gives an indication of the effectiveness of the level 1 instruction cache.", + "MetricGroup": "Miss_Ratio;L1I_Cache_Effectiveness", + "ScaleUnit": "1per cache access" + }, + { + "MetricName": "Miss_Ratio;l1d_cache_read_miss", + "MetricExpr": "L1D_CACHE_LMISS_RD / L1D_CACHE_RD", + "BriefDescription": "L1D cache read miss rate", + "MetricGroup": "Cache", + "ScaleUnit": "1per cache read access" + }, + { + "MetricName": "l2_cache_miss_ratio", + "MetricExpr": "L2D_CACHE_REFILL / L2D_CACHE", + "BriefDescription": "This metric measures the ratio of level 2 cache accesses missed to the total number of level 2 cache accesses. This gives an indication of the effectiveness of the level 2 cache, which is a unified cache that stores both data and instruction. Note that cache accesses in this cache are either data memory access or instruction fetch as this is a unified cache.", + "MetricGroup": "Miss_Ratio;L2_Cache_Effectiveness", + "ScaleUnit": "1per cache access" + }, + { + "MetricName": "l1i_cache_read_miss_rate", + "MetricExpr": "L1I_CACHE_LMISS / L1I_CACHE", + "BriefDescription": "L1I cache read miss rate", + "MetricGroup": "Cache", + "ScaleUnit": "1per cache access" + }, + { + "MetricName": "l2d_cache_read_miss_rate", + "MetricExpr": "L2D_CACHE_LMISS_RD / L2D_CACHE_RD", + "BriefDescription": "L2 cache read miss rate", + "MetricGroup": "Cache", + "ScaleUnit": "1per cache read access" + }, + { + "MetricName": "l1d_cache_miss_mpki", + "MetricExpr": "(L1D_CACHE_LMISS_RD * 1e3) / INST_RETIRED", + "BriefDescription": "Misses per thousand instructions (data)", + "MetricGroup": "Cache", + "ScaleUnit": "1MPKI" + }, + { + "MetricName": "l1i_cache_miss_mpki", + "MetricExpr": "(L1I_CACHE_LMISS * 1e3) / INST_RETIRED", + "BriefDescription": "Misses per thousand instructions (instruction)", + "MetricGroup": "Cache", + "ScaleUnit": "1MPKI" + }, + { + "MetricName": "simd_percentage", + "MetricExpr": "ASE_SPEC / INST_SPEC", + "BriefDescription": "This metric measures advanced SIMD operations as a percentage of total operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "100percent of operations" + }, + { + "MetricName": "crypto_percentage", + "MetricExpr": "CRYPTO_SPEC / INST_SPEC", + "BriefDescription": "This metric measures crypto operations as a percentage of operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "100percent of operations" + }, + { + "MetricName": "gflops", + "MetricExpr": "VFP_SPEC / (duration_time * 1e9)", + "BriefDescription": "Giga-floating point operations per second", + "MetricGroup": "InstructionMix" + }, + { + "MetricName": "integer_dp_percentage", + "MetricExpr": "DP_SPEC / INST_SPEC", + "BriefDescription": "This metric measures scalar integer operations as a percentage of operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "100percent of operations" + }, + { + "MetricName": "ipc", + "MetricExpr": "INST_RETIRED / CPU_CYCLES", + "BriefDescription": "This metric measures the number of instructions retired per cycle.", + "MetricGroup": "General", + "ScaleUnit": "1per cycle" + }, + { + "MetricName": "load_percentage", + "MetricExpr": "LD_SPEC / INST_SPEC", + "BriefDescription": "This metric measures load operations as a percentage of operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "100percent of operations" + }, + { + "MetricName": "load_store_spec_rate", + "MetricExpr": "LDST_SPEC / INST_SPEC", + "BriefDescription": "The rate of load or store instructions speculatively executed to overall instructions speclatively executed", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "100percent of operations" + }, + { + "MetricName": "retired_mips", + "MetricExpr": "INST_RETIRED / (duration_time * 1e6)", + "BriefDescription": "Millions of instructions per second", + "MetricGroup": "InstructionMix" + }, + { + "MetricName": "spec_utilization_mips", + "MetricExpr": "INST_SPEC / (duration_time * 1e6)", + "BriefDescription": "Millions of instructions per second", + "MetricGroup": "PEutilization" + }, + { + "MetricName": "pc_write_spec_rate", + "MetricExpr": "PC_WRITE_SPEC / INST_SPEC", + "BriefDescription": "The rate of software change of the PC speculatively executed to overall instructions speclatively executed", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "100percent of operations" + }, + { + "MetricName": "store_percentage", + "MetricExpr": "ST_SPEC / INST_SPEC", + "BriefDescription": "This metric measures store operations as a percentage of operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "100percent of operations" + }, + { + "MetricName": "scalar_fp_percentage", + "MetricExpr": "VFP_SPEC / INST_SPEC", + "BriefDescription": "This metric measures scalar floating point operations as a percentage of operations speculatively executed.", + "MetricGroup": "Operation_Mix", + "ScaleUnit": "100percent of operations" + }, + { + "MetricName": "retired_rate", + "MetricExpr": "OP_RETIRED / OP_SPEC", + "BriefDescription": "Of all the micro-operations issued, what percentage are retired(committed)", + "MetricGroup": "General", + "ScaleUnit": "100%" + }, + { + "MetricName": "wasted", + "MetricExpr": "1 - (OP_RETIRED / (CPU_CYCLES * #slots))", + "BriefDescription": "Of all the micro-operations issued, what proportion are lost", + "MetricGroup": "General", + "ScaleUnit": "100%" + }, + { + "MetricName": "wasted_rate", + "MetricExpr": "1 - OP_RETIRED / OP_SPEC", + "BriefDescription": "Of all the micro-operations issued, what percentage are not retired(committed)", + "MetricGroup": "General", + "ScaleUnit": "100%" + }, + { + "MetricName": "stall_backend_cache_rate", + "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES", + "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and cache miss", + "MetricGroup": "Stall", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "stall_backend_resource_rate", + "MetricExpr": "STALL_BACKEND_RESOURCE / CPU_CYCLES", + "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and resource full", + "MetricGroup": "Stall", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "stall_backend_tlb_rate", + "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES", + "BriefDescription": "Proportion of cycles stalled and no operations issued to backend and TLB miss", + "MetricGroup": "Stall", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "stall_frontend_cache_rate", + "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES", + "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and cache miss", + "MetricGroup": "Stall", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "stall_frontend_tlb_rate", + "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES", + "BriefDescription": "Proportion of cycles stalled and no ops delivered from frontend and TLB miss", + "MetricGroup": "Stall", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "dtlb_walk_ratio", + "MetricExpr": "DTLB_WALK / L1D_TLB", + "BriefDescription": "This metric measures the ratio of data TLB Walks to the total number of data TLB accesses. This gives an indication of the effectiveness of the data TLB accesses.", + "MetricGroup": "Miss_Ratio;DTLB_Effectiveness", + "ScaleUnit": "1per TLB access" + }, + { + "MetricName": "itlb_walk_ratio", + "MetricExpr": "ITLB_WALK / L1I_TLB", + "BriefDescription": "This metric measures the ratio of instruction TLB Walks to the total number of instruction TLB accesses. This gives an indication of the effectiveness of the instruction TLB accesses.", + "MetricGroup": "Miss_Ratio;ITLB_Effectiveness", + "ScaleUnit": "1per TLB access" + }, + { + "ArchStdEvent": "backend_bound" + }, + { + "ArchStdEvent": "frontend_bound", + "MetricExpr": "100 - (retired_fraction + slots_lost_misspeculation_fraction + backend_bound)" + }, + { + "MetricName": "slots_lost_misspeculation_fraction", + "MetricExpr": "(OP_SPEC - OP_RETIRED) / (CPU_CYCLES * #slots)", + "BriefDescription": "Fraction of slots lost due to misspeculation", + "DefaultMetricgroupName": "TopdownL1", + "MetricGroup": "Default;TopdownL1", + "ScaleUnit": "100percent of slots" + }, + { + "MetricName": "retired_fraction", + "MetricExpr": "OP_RETIRED / (CPU_CYCLES * #slots)", + "BriefDescription": "Fraction of slots retiring, useful work", + "DefaultMetricgroupName": "TopdownL1", + "MetricGroup": "Default;TopdownL1", + "ScaleUnit": "100percent of slots" + }, + { + "MetricName": "backend_core", + "MetricExpr": "(backend_bound / 100) - backend_memory", + "BriefDescription": "Fraction of slots the CPU was stalled due to backend non-memory subsystem issues", + "MetricGroup": "TopdownL2", + "ScaleUnit": "100%" + }, + { + "MetricName": "backend_memory", + "MetricExpr": "(STALL_BACKEND_TLB + STALL_BACKEND_CACHE) / CPU_CYCLES", + "BriefDescription": "Fraction of slots the CPU was stalled due to backend memory subsystem issues (cache/tlb miss)", + "MetricGroup": "TopdownL2", + "ScaleUnit": "100%" + }, + { + "MetricName": "branch_mispredict", + "MetricExpr": "(BR_MIS_PRED_RETIRED / GPC_FLUSH) * slots_lost_misspeculation_fraction", + "BriefDescription": "Fraction of slots lost due to branch misprediciton", + "MetricGroup": "TopdownL2", + "ScaleUnit": "1percent of slots" + }, + { + "MetricName": "frontend_bandwidth", + "MetricExpr": "frontend_bound - frontend_latency", + "BriefDescription": "Fraction of slots the CPU did not dispatch at full bandwidth - able to dispatch partial slots only (1, 2, or 3 uops)", + "MetricGroup": "TopdownL2", + "ScaleUnit": "1percent of slots" + }, + { + "MetricName": "frontend_latency", + "MetricExpr": "(STALL_FRONTEND - ((STALL_SLOT_FRONTEND - ((frontend_bound / 100) * CPU_CYCLES * #slots)) / #slots)) / CPU_CYCLES", + "BriefDescription": "Fraction of slots the CPU was stalled due to frontend latency issues (cache/tlb miss); nothing to dispatch", + "MetricGroup": "TopdownL2", + "ScaleUnit": "100percent of slots" + }, + { + "MetricName": "other_miss_pred", + "MetricExpr": "slots_lost_misspeculation_fraction - branch_mispredict", + "BriefDescription": "Fraction of slots lost due to other/non-branch misprediction misspeculation", + "MetricGroup": "TopdownL2", + "ScaleUnit": "1percent of slots" + }, + { + "MetricName": "pipe_utilization", + "MetricExpr": "100 * ((IXU_NUM_UOPS_ISSUED + FSU_ISSUED) / (CPU_CYCLES * 6))", + "BriefDescription": "Fraction of execute slots utilized", + "MetricGroup": "TopdownL2", + "ScaleUnit": "1percent of slots" + }, + { + "MetricName": "d_cache_l2_miss_rate", + "MetricExpr": "STALL_BACKEND_MEM / CPU_CYCLES", + "BriefDescription": "Fraction of cycles the CPU was stalled due to data L2 cache miss", + "MetricGroup": "TopdownL3", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "d_cache_miss_rate", + "MetricExpr": "STALL_BACKEND_CACHE / CPU_CYCLES", + "BriefDescription": "Fraction of cycles the CPU was stalled due to data cache miss", + "MetricGroup": "TopdownL3", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "d_tlb_miss_rate", + "MetricExpr": "STALL_BACKEND_TLB / CPU_CYCLES", + "BriefDescription": "Fraction of cycles the CPU was stalled due to data TLB miss", + "MetricGroup": "TopdownL3", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "fsu_pipe_utilization", + "MetricExpr": "FSU_ISSUED / (CPU_CYCLES * 2)", + "BriefDescription": "Fraction of FSU execute slots utilized", + "MetricGroup": "TopdownL3", + "ScaleUnit": "100percent of slots" + }, + { + "MetricName": "i_cache_miss_rate", + "MetricExpr": "STALL_FRONTEND_CACHE / CPU_CYCLES", + "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction cache miss", + "MetricGroup": "TopdownL3", + "ScaleUnit": "100percent of slots" + }, + { + "MetricName": "i_tlb_miss_rate", + "MetricExpr": "STALL_FRONTEND_TLB / CPU_CYCLES", + "BriefDescription": "Fraction of cycles the CPU was stalled due to instruction TLB miss", + "MetricGroup": "TopdownL3", + "ScaleUnit": "100percent of slots" + }, + { + "MetricName": "ixu_pipe_utilization", + "MetricExpr": "IXU_NUM_UOPS_ISSUED / (CPU_CYCLES * #slots)", + "BriefDescription": "Fraction of IXU execute slots utilized", + "MetricGroup": "TopdownL3", + "ScaleUnit": "100percent of slots" + }, + { + "MetricName": "stall_recovery_rate", + "MetricExpr": "IDR_STALL_FLUSH / CPU_CYCLES", + "BriefDescription": "Fraction of cycles the CPU was stalled due to flush recovery", + "MetricGroup": "TopdownL3", + "ScaleUnit": "100percent of slots" + }, + { + "MetricName": "stall_fsu_sched_rate", + "MetricExpr": "IDR_STALL_FSU_SCHED / CPU_CYCLES", + "BriefDescription": "Fraction of cycles the CPU was stalled and FSU was full", + "MetricGroup": "TopdownL4", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "stall_ixu_sched_rate", + "MetricExpr": "IDR_STALL_IXU_SCHED / CPU_CYCLES", + "BriefDescription": "Fraction of cycles the CPU was stalled and IXU was full", + "MetricGroup": "TopdownL4", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "stall_lob_id_rate", + "MetricExpr": "IDR_STALL_LOB_ID / CPU_CYCLES", + "BriefDescription": "Fraction of cycles the CPU was stalled and LOB was full", + "MetricGroup": "TopdownL4", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "stall_rob_id_rate", + "MetricExpr": "IDR_STALL_ROB_ID / CPU_CYCLES", + "BriefDescription": "Fraction of cycles the CPU was stalled and ROB was full", + "MetricGroup": "TopdownL4", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "stall_sob_id_rate", + "MetricExpr": "IDR_STALL_SOB_ID / CPU_CYCLES", + "BriefDescription": "Fraction of cycles the CPU was stalled and SOB was full", + "MetricGroup": "TopdownL4", + "ScaleUnit": "100percent of cycles" + }, + { + "MetricName": "l1d_cache_access_demand", + "MetricExpr": "L1D_CACHE_RW / L1D_CACHE", + "BriefDescription": "L1D cache access - demand", + "MetricGroup": "Cache", + "ScaleUnit": "100percent of cache acceses" + }, + { + "MetricName": "l1d_cache_access_prefetces", + "MetricExpr": "L1D_CACHE_PRFM / L1D_CACHE", + "BriefDescription": "L1D cache access - prefetch", + "MetricGroup": "Cache", + "ScaleUnit": "100percent of cache acceses" + }, + { + "MetricName": "l1d_cache_demand_misses", + "MetricExpr": "L1D_CACHE_REFILL_RW / L1D_CACHE", + "BriefDescription": "L1D cache demand misses", + "MetricGroup": "Cache", + "ScaleUnit": "100percent of cache acceses" + }, + { + "MetricName": "l1d_cache_demand_misses_read", + "MetricExpr": "L1D_CACHE_REFILL_RD / L1D_CACHE", + "BriefDescription": "L1D cache demand misses - read", + "MetricGroup": "Cache", + "ScaleUnit": "100percent of cache acceses" + }, + { + "MetricName": "l1d_cache_demand_misses_write", + "MetricExpr": "L1D_CACHE_REFILL_WR / L1D_CACHE", + "BriefDescription": "L1D cache demand misses - write", + "MetricGroup": "Cache", + "ScaleUnit": "100percent of cache acceses" + }, + { + "MetricName": "l1d_cache_prefetch_misses", + "MetricExpr": "L1D_CACHE_REFILL_PRFM / L1D_CACHE", + "BriefDescription": "L1D cache prefetch misses", + "MetricGroup": "Cache", + "ScaleUnit": "100percent of cache acceses" + }, + { + "MetricName": "ase_scalar_mix", + "MetricExpr": "ASE_SCALAR_SPEC / OP_SPEC", + "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) scalar operations", + "MetricGroup": "Instructions", + "ScaleUnit": "100percent of cache acceses" + }, + { + "MetricName": "ase_vector_mix", + "MetricExpr": "ASE_VECTOR_SPEC / OP_SPEC", + "BriefDescription": "Proportion of advanced SIMD data processing operations (excluding DP_SPEC/LD_SPEC) vector operations", + "MetricGroup": "Instructions", + "ScaleUnit": "100percent of cache acceses" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/mmu.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/mmu.json new file mode 100644 index 000000000000..66d83b680651 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/mmu.json @@ -0,0 +1,170 @@ +[ + { + "PublicDescription": "Level 2 data translation buffer allocation", + "EventCode": "0xD800", + "EventName": "MMU_D_OTB_ALLOC", + "BriefDescription": "Level 2 data translation buffer allocation" + }, + { + "PublicDescription": "Data TLB translation cache hit on S1L2 walk cache entry", + "EventCode": "0xd801", + "EventName": "MMU_D_TRANS_CACHE_HIT_S1L2_WALK", + "BriefDescription": "Data TLB translation cache hit on S1L2 walk cache entry" + }, + { + "PublicDescription": "Data TLB translation cache hit on S1L1 walk cache entry", + "EventCode": "0xd802", + "EventName": "MMU_D_TRANS_CACHE_HIT_S1L1_WALK", + "BriefDescription": "Data TLB translation cache hit on S1L1 walk cache entry" + }, + { + "PublicDescription": "Data TLB translation cache hit on S1L0 walk cache entry", + "EventCode": "0xd803", + "EventName": "MMU_D_TRANS_CACHE_HIT_S1L0_WALK", + "BriefDescription": "Data TLB translation cache hit on S1L0 walk cache entry" + }, + { + "PublicDescription": "Data TLB translation cache hit on S2L2 walk cache entry", + "EventCode": "0xd804", + "EventName": "MMU_D_TRANS_CACHE_HIT_S2L2_WALK", + "BriefDescription": "Data TLB translation cache hit on S2L2 walk cache entry" + }, + { + "PublicDescrition": "Data TLB translation cache hit on S2L1 walk cache entry", + "EventCode": "0xd805", + "EventName": "MMU_D_TRANS_CACHE_HIT_S2L1_WALK", + "BriefDescription": "Data TLB translation cache hit on S2L1 walk cache entry" + }, + { + "PublicDescrition": "Data TLB translation cache hit on S2L0 walk cache entry", + "EventCode": "0xd806", + "EventName": "MMU_D_TRANS_CACHE_HIT_S2L0_WALK", + "BriefDescription": "Data TLB translation cache hit on S2L0 walk cache entry" + }, + { + "PublicDescrition": "Data-side S1 page walk cache lookup", + "EventCode": "0xd807", + "EventName": "MMU_D_S1_WALK_CACHE_LOOKUP", + "BriefDescription": "Data-side S1 page walk cache lookup" + }, + { + "PublicDescrition": "Data-side S1 page walk cache refill", + "EventCode": "0xd808", + "EventName": "MMU_D_S1_WALK_CACHE_REFILL", + "BriefDescription": "Data-side S1 page walk cache refill" + }, + { + "PublicDescrition": "Data-side S2 page walk cache lookup", + "EventCode": "0xd809", + "EventName": "MMU_D_S2_WALK_CACHE_LOOKUP", + "BriefDescription": "Data-side S2 page walk cache lookup" + }, + { + "PublicDescrition": "Data-side S2 page walk cache refill", + "EventCode": "0xd80a", + "EventName": "MMU_D_S2_WALK_CACHE_REFILL", + "BriefDescription": "Data-side S2 page walk cache refill" + }, + { + "PublicDescription": "Data-side S1 table walk fault", + "EventCode": "0xD80B", + "EventName": "MMU_D_S1_WALK_FAULT", + "BriefDescription": "Data-side S1 table walk fault" + }, + { + "PublicDescription": "Data-side S2 table walk fault", + "EventCode": "0xD80C", + "EventName": "MMU_D_S2_WALK_FAULT", + "BriefDescription": "Data-side S2 table walk fault" + }, + { + "PublicDescription": "Data-side table walk steps or descriptor fetches", + "EventCode": "0xD80D", + "EventName": "MMU_D_WALK_STEPS", + "BriefDescription": "Data-side table walk steps or descriptor fetches" + }, + { + "PublicDescription": "Level 2 instruction translation buffer allocation", + "EventCode": "0xD900", + "EventName": "MMU_I_OTB_ALLOC", + "BriefDescription": "Level 2 instruction translation buffer allocation" + }, + { + "PublicDescrition": "Instruction TLB translation cache hit on S1L2 walk cache entry", + "EventCode": "0xd901", + "EventName": "MMU_I_TRANS_CACHE_HIT_S1L2_WALK", + "BriefDescription": "Instruction TLB translation cache hit on S1L2 walk cache entry" + }, + { + "PublicDescrition": "Instruction TLB translation cache hit on S1L1 walk cache entry", + "EventCode": "0xd902", + "EventName": "MMU_I_TRANS_CACHE_HIT_S1L1_WALK", + "BriefDescription": "Instruction TLB translation cache hit on S1L1 walk cache entry" + }, + { + "PublicDescrition": "Instruction TLB translation cache hit on S1L0 walk cache entry", + "EventCode": "0xd903", + "EventName": "MMU_I_TRANS_CACHE_HIT_S1L0_WALK", + "BriefDescription": "Instruction TLB translation cache hit on S1L0 walk cache entry" + }, + { + "PublicDescrition": "Instruction TLB translation cache hit on S2L2 walk cache entry", + "EventCode": "0xd904", + "EventName": "MMU_I_TRANS_CACHE_HIT_S2L2_WALK", + "BriefDescription": "Instruction TLB translation cache hit on S2L2 walk cache entry" + }, + { + "PublicDescrition": "Instruction TLB translation cache hit on S2L1 walk cache entry", + "EventCode": "0xd905", + "EventName": "MMU_I_TRANS_CACHE_HIT_S2L1_WALK", + "BriefDescription": "Instruction TLB translation cache hit on S2L1 walk cache entry" + }, + { + "PublicDescrition": "Instruction TLB translation cache hit on S2L0 walk cache entry", + "EventCode": "0xd906", + "EventName": "MMU_I_TRANS_CACHE_HIT_S2L0_WALK", + "BriefDescription": "Instruction TLB translation cache hit on S2L0 walk cache entry" + }, + { + "PublicDescrition": "Instruction-side S1 page walk cache lookup", + "EventCode": "0xd907", + "EventName": "MMU_I_S1_WALK_CACHE_LOOKUP", + "BriefDescription": "Instruction-side S1 page walk cache lookup" + }, + { + "PublicDescrition": "Instruction-side S1 page walk cache refill", + "EventCode": "0xd908", + "EventName": "MMU_I_S1_WALK_CACHE_REFILL", + "BriefDescription": "Instruction-side S1 page walk cache refill" + }, + { + "PublicDescrition": "Instruction-side S2 page walk cache lookup", + "EventCode": "0xd909", + "EventName": "MMU_I_S2_WALK_CACHE_LOOKUP", + "BriefDescription": "Instruction-side S2 page walk cache lookup" + }, + { + "PublicDescrition": "Instruction-side S2 page walk cache refill", + "EventCode": "0xd90a", + "EventName": "MMU_I_S2_WALK_CACHE_REFILL", + "BriefDescription": "Instruction-side S2 page walk cache refill" + }, + { + "PublicDescription": "Instruction-side S1 table walk fault", + "EventCode": "0xD90B", + "EventName": "MMU_I_S1_WALK_FAULT", + "BriefDescription": "Instruction-side S1 table walk fault" + }, + { + "PublicDescription": "Instruction-side S2 table walk fault", + "EventCode": "0xD90C", + "EventName": "MMU_I_S2_WALK_FAULT", + "BriefDescription": "Instruction-side S2 table walk fault" + }, + { + "PublicDescription": "Instruction-side table walk steps or descriptor fetches", + "EventCode": "0xD90D", + "EventName": "MMU_I_WALK_STEPS", + "BriefDescription": "Instruction-side table walk steps or descriptor fetches" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/pipeline.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/pipeline.json new file mode 100644 index 000000000000..2fb2d1f183fc --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/pipeline.json @@ -0,0 +1,41 @@ +[ + { + "ArchStdEvent": "STALL_FRONTEND", + "Errata": "Errata AC03_CPU_29", + "BriefDescription": "Impacted by errata, use metrics instead -" + }, + { + "ArchStdEvent": "STALL_BACKEND" + }, + { + "ArchStdEvent": "STALL", + "Errata": "Errata AC03_CPU_29", + "BriefDescription": "Impacted by errata, use metrics instead -" + }, + { + "ArchStdEvent": "STALL_SLOT_BACKEND" + }, + { + "ArchStdEvent": "STALL_SLOT_FRONTEND", + "Errata": "Errata AC03_CPU_29", + "BriefDescription": "Impacted by errata, use metrics instead -" + }, + { + "ArchStdEvent": "STALL_SLOT" + }, + { + "ArchStdEvent": "STALL_BACKEND_MEM" + }, + { + "PublicDescription": "Frontend stall cycles, TLB", + "EventCode": "0x815c", + "EventName": "STALL_FRONTEND_TLB", + "BriefDescription": "Frontend stall cycles, TLB" + }, + { + "PublicDescription": "Backend stall cycles, TLB", + "EventCode": "0x8167", + "EventName": "STALL_BACKEND_TLB", + "BriefDescription": "Backend stall cycles, TLB" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/spe.json b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/spe.json new file mode 100644 index 000000000000..20f2165c85fe --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/ampere/ampereonex/spe.json @@ -0,0 +1,14 @@ +[ + { + "ArchStdEvent": "SAMPLE_POP" + }, + { + "ArchStdEvent": "SAMPLE_FEED" + }, + { + "ArchStdEvent": "SAMPLE_FILTRATE" + }, + { + "ArchStdEvent": "SAMPLE_COLLISION" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/mapfile.csv b/tools/perf/pmu-events/arch/arm64/mapfile.csv index 5b58db5032c1..f4d1ca4d1493 100644 --- a/tools/perf/pmu-events/arch/arm64/mapfile.csv +++ b/tools/perf/pmu-events/arch/arm64/mapfile.csv @@ -42,3 +42,4 @@ 0x00000000480fd010,v1,hisilicon/hip08,core 0x00000000500f0000,v1,ampere/emag,core 0x00000000c00fac30,v1,ampere/ampereone,core +0x00000000c00fac40,v1,ampere/ampereonex,core From b809fc656e763296f227b9b31e8f225e5977a8af Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 29 Nov 2023 13:34:25 -0800 Subject: [PATCH 078/179] perf build: Shellcheck support for OUTPUT directory Migrate Makefile.tests to Build so that variables like rule_mkdir are defined via Makefile.build (needed so the output directory can be created). This requires SHELLCHECK being exported and the clean rule tweaking to remove the files in find. Change find "-perm -o=x" as it was failing on my Debian based Linux kernel tree, switch to using "-executable". Adding a filename prefix of "." to the shellcheck log files is a pain and error prone in make, remove this prefix and just add the shellcheck log files to .gitignore. Fix the command echo so that running the test is displayed. Fixes: 1638b11ef8156c85 ("perf tools: Add perf binary dependent rule for shellcheck log in Makefile.perf") Reviewed-by: Athira Jajeev Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231129213428.2227448-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/.gitignore | 3 +++ tools/perf/Makefile.perf | 30 ++++++++++-------------------- tools/perf/tests/Build | 14 ++++++++++++++ tools/perf/tests/Makefile.tests | 22 ---------------------- 4 files changed, 27 insertions(+), 42 deletions(-) delete mode 100644 tools/perf/tests/Makefile.tests diff --git a/tools/perf/.gitignore b/tools/perf/.gitignore index ee5c14f3b8b1..f5b81d439387 100644 --- a/tools/perf/.gitignore +++ b/tools/perf/.gitignore @@ -39,6 +39,9 @@ trace/beauty/generated/ pmu-events/pmu-events.c pmu-events/jevents pmu-events/metric_test.log +tests/shell/*.shellcheck_log +tests/shell/coresight/*.shellcheck_log +tests/shell/lib/*.shellcheck_log feature/ libapi/ libbpf/ diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 824cbc0af7d7..1ab2a908f240 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -229,8 +229,15 @@ else force_fixdep := $(config) endif +# Runs shellcheck on perf test shell scripts +ifeq ($(NO_SHELLCHECK),1) + SHELLCHECK := +else + SHELLCHECK := $(shell which shellcheck 2> /dev/null) +endif + export srctree OUTPUT RM CC CXX LD AR CFLAGS CXXFLAGS V BISON FLEX AWK -export HOSTCC HOSTLD HOSTAR HOSTCFLAGS +export HOSTCC HOSTLD HOSTAR HOSTCFLAGS SHELLCHECK include $(srctree)/tools/build/Makefile.include @@ -673,23 +680,7 @@ $(PERF_IN): prepare FORCE $(PMU_EVENTS_IN): FORCE prepare $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=pmu-events obj=pmu-events -# Runs shellcheck on perf test shell scripts - -SHELLCHECK := $(shell which shellcheck 2> /dev/null) - -ifeq ($(NO_SHELLCHECK),1) -SHELLCHECK := -endif - -ifneq ($(SHELLCHECK),) -SHELLCHECK_TEST: FORCE prepare - $(Q)$(MAKE) -f $(srctree)/tools/perf/tests/Makefile.tests -else -SHELLCHECK_TEST: - @: -endif - -$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) SHELLCHECK_TEST +$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(PMU_EVENTS_IN) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) \ $(PERF_IN) $(PMU_EVENTS_IN) $(LIBS) -o $@ @@ -1152,9 +1143,8 @@ bpf-skel-clean: $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS) clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean - $(Q)$(MAKE) -f $(srctree)/tools/perf/tests/Makefile.tests clean $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS) - $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete + $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete -o -name '*.shellcheck_log' -delete $(Q)$(RM) $(OUTPUT).config-detected $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32 $(OUTPUT)$(LIBJVMTI).so $(call QUIET_CLEAN, core-gen) $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \ diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index 2b45ffa462a6..53ba9c3e20e0 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -77,3 +77,17 @@ CFLAGS_python-use.o += -DPYTHONPATH="BUILD_STR($(OUTPUT)python)" -DPYTHON="BUI CFLAGS_dwarf-unwind.o += -fno-optimize-sibling-calls perf-y += workloads/ + +ifdef SHELLCHECK + SHELL_TESTS := $(shell find tests/shell -executable -type f -name '*.sh') + TEST_LOGS := $(SHELL_TESTS:tests/shell/%=shell/%.shellcheck_log) +else + SHELL_TESTS := + TEST_LOGS := +endif + +$(OUTPUT)%.shellcheck_log: % + $(call rule_mkdir) + $(Q)$(call echo-cmd,test)shellcheck -a -S warning "$<" > $@ || (cat $@ && rm $@ && false) + +perf-y += $(TEST_LOGS) diff --git a/tools/perf/tests/Makefile.tests b/tools/perf/tests/Makefile.tests deleted file mode 100644 index fdaca5f7a946..000000000000 --- a/tools/perf/tests/Makefile.tests +++ /dev/null @@ -1,22 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# Athira Rajeev , 2023 - -PROGS := $(shell find tests/shell -perm -o=x -type f -name '*.sh') -FILE_NAME := $(notdir $(PROGS)) -FILE_NAME := $(FILE_NAME:%=.%) -LOGS := $(join $(dir $(PROGS)),$(FILE_NAME)) -LOGS := $(LOGS:%=%.shellcheck_log) - -.PHONY: all -all: SHELLCHECK_RUN - @: - -SHELLCHECK_RUN: $(LOGS) - -.%.shellcheck_log: % - $(call rule_mkdir) - $(Q)$(call frecho-cmd,test)@shellcheck -S warning "$<" > $@ || (cat $@ && rm $@ && false) - -clean: - $(eval log_files := $(shell find . -name '.*.shellcheck_log')) - @rm -rf $(log_files) From 8226e4a3b35f525d11934484ee5979399ff8b7dc Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 29 Nov 2023 13:34:27 -0800 Subject: [PATCH 079/179] perf test: Use common python setup library Avoid replicated logic by having a common library to set the PYTHON environment variable. Reviewed-by: Athira Jajeev Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231129213428.2227448-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/lib/setup_python.sh | 16 ++++++++++++++++ tools/perf/tests/shell/stat+json_output.sh | 16 +++------------- tools/perf/tests/shell/stat_metrics_values.sh | 14 ++++---------- .../tests/shell/test_perf_data_converter_json.sh | 13 +++---------- 4 files changed, 26 insertions(+), 33 deletions(-) create mode 100644 tools/perf/tests/shell/lib/setup_python.sh diff --git a/tools/perf/tests/shell/lib/setup_python.sh b/tools/perf/tests/shell/lib/setup_python.sh new file mode 100644 index 000000000000..c2fce1793538 --- /dev/null +++ b/tools/perf/tests/shell/lib/setup_python.sh @@ -0,0 +1,16 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +if [ "x$PYTHON" = "x" ] +then + python3 --version >/dev/null 2>&1 && PYTHON=python3 +fi +if [ "x$PYTHON" = "x" ] +then + python --version >/dev/null 2>&1 && PYTHON=python +fi +if [ "x$PYTHON" = "x" ] +then + echo Skipping test, python not detected please set environment variable PYTHON. + exit 2 +fi diff --git a/tools/perf/tests/shell/stat+json_output.sh b/tools/perf/tests/shell/stat+json_output.sh index 196e22672c50..3bc900533a5d 100755 --- a/tools/perf/tests/shell/stat+json_output.sh +++ b/tools/perf/tests/shell/stat+json_output.sh @@ -8,20 +8,10 @@ set -e skip_test=0 +shelldir=$(dirname "$0") +# shellcheck source=lib/setup_python.sh +. "${shelldir}"/lib/setup_python.sh pythonchecker=$(dirname $0)/lib/perf_json_output_lint.py -if [ "x$PYTHON" == "x" ] -then - if which python3 > /dev/null - then - PYTHON=python3 - elif which python > /dev/null - then - PYTHON=python - else - echo Skipping test, python not detected please set environment variable PYTHON. - exit 2 - fi -fi stat_output=$(mktemp /tmp/__perf_test.stat_output.json.XXXXX) diff --git a/tools/perf/tests/shell/stat_metrics_values.sh b/tools/perf/tests/shell/stat_metrics_values.sh index ad94c936de7e..7ca172599aa6 100755 --- a/tools/perf/tests/shell/stat_metrics_values.sh +++ b/tools/perf/tests/shell/stat_metrics_values.sh @@ -1,16 +1,10 @@ #!/bin/bash # perf metrics value validation # SPDX-License-Identifier: GPL-2.0 -if [ "x$PYTHON" == "x" ] -then - if which python3 > /dev/null - then - PYTHON=python3 - else - echo Skipping test, python3 not detected please set environment variable PYTHON. - exit 2 - fi -fi + +shelldir=$(dirname "$0") +# shellcheck source=lib/setup_python.sh +. "${shelldir}"/lib/setup_python.sh grep -q GenuineIntel /proc/cpuinfo || { echo Skipping non-Intel; exit 2; } diff --git a/tools/perf/tests/shell/test_perf_data_converter_json.sh b/tools/perf/tests/shell/test_perf_data_converter_json.sh index 6ded58f98f55..c4f1b59d116f 100755 --- a/tools/perf/tests/shell/test_perf_data_converter_json.sh +++ b/tools/perf/tests/shell/test_perf_data_converter_json.sh @@ -6,16 +6,9 @@ set -e err=0 -if [ "$PYTHON" = "" ] ; then - if which python3 > /dev/null ; then - PYTHON=python3 - elif which python > /dev/null ; then - PYTHON=python - else - echo Skipping test, python not detected please set environment variable PYTHON. - exit 2 - fi -fi +shelldir=$(dirname "$0") +# shellcheck source=lib/setup_python.sh +. "${shelldir}"/lib/setup_python.sh perfdata=$(mktemp /tmp/__perf_test.perf.data.XXXXX) result=$(mktemp /tmp/__perf_test.output.json.XXXXX) From 7d723ef83b8070ab2304df22ed952dc627385406 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 29 Nov 2023 13:34:28 -0800 Subject: [PATCH 080/179] perf test: Add basic 'perf list --json" test Test that JSON output produces valid JSON. Reviewed-by: Athira Jajeev Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Athira Jajeev Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231129213428.2227448-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/list.sh | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100755 tools/perf/tests/shell/list.sh diff --git a/tools/perf/tests/shell/list.sh b/tools/perf/tests/shell/list.sh new file mode 100755 index 000000000000..22b004f2b23e --- /dev/null +++ b/tools/perf/tests/shell/list.sh @@ -0,0 +1,19 @@ +#!/bin/sh +# perf list tests +# SPDX-License-Identifier: GPL-2.0 + +set -e +err=0 + +shelldir=$(dirname "$0") +# shellcheck source=lib/setup_python.sh +. "${shelldir}"/lib/setup_python.sh + +test_list_json() { + echo "Json output test" + perf list -j | $PYTHON -m json.tool + echo "Json output test [Success]" +} + +test_list_json +exit $err From 9eef41014fe01287dae79fe208b9b433b13040bb Mon Sep 17 00:00:00 2001 From: Athira Rajeev Date: Thu, 23 Nov 2023 21:31:10 +0530 Subject: [PATCH 081/179] perf vendor events powerpc: Update datasource event name to fix duplicate events Running "perf list" on powerpc fails with segfault as below: $ ./perf list Segmentation fault (core dumped) $ This happens because of duplicate events in the JSON list. The powerpc JSON event list contains some event with same event name, but different event code. They are: - PM_INST_FROM_L3MISS (Present in datasource and frontend) - PM_MRK_DATA_FROM_L2MISS (Present in datasource and marked) - PM_MRK_INST_FROM_L3MISS (Present in datasource and marked) - PM_MRK_DATA_FROM_L3MISS (Present in datasource and marked) pmu_events_table__num_events() uses the value from table_pmu->num_entries which includes duplicate events as well. This causes issue during "perf list" and results in a segmentation fault. Since both event codes are valid, append _DSRC to the Data Source events (datasource.json), so that they would have a unique name. Also add PM_DATA_FROM_L2MISS_DSRC and PM_DATA_FROM_L3MISS_DSRC events. With the fix, 'perf list' works as expected. Fixes: fc143580753348c6 ("perf vendor events power10: Update JSON/events") Signed-off-by: Athira Jajeev Tested-by: Disha Goel Cc: Adrian Hunter Cc: Disha Goel Cc: Ian Rogers Cc: James Clark Cc: Jiri Olsa Cc: Kajol Jain Cc: linuxppc-dev@lists.ozlabs.org Cc: Madhavan Srinivasan Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231123160110.94090-1-atrajeev@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- .../arch/powerpc/power10/datasource.json | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/tools/perf/pmu-events/arch/powerpc/power10/datasource.json b/tools/perf/pmu-events/arch/powerpc/power10/datasource.json index 6b0356f2d301..0eeaaf1a95b8 100644 --- a/tools/perf/pmu-events/arch/powerpc/power10/datasource.json +++ b/tools/perf/pmu-events/arch/powerpc/power10/datasource.json @@ -99,6 +99,11 @@ "EventName": "PM_INST_FROM_L2MISS", "BriefDescription": "The processor's instruction cache was reloaded from a source beyond the local core's L2 due to a demand miss." }, + { + "EventCode": "0x0003C0000000C040", + "EventName": "PM_DATA_FROM_L2MISS_DSRC", + "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss." + }, { "EventCode": "0x000380000010C040", "EventName": "PM_INST_FROM_L2MISS_ALL", @@ -161,9 +166,14 @@ }, { "EventCode": "0x000780000000C040", - "EventName": "PM_INST_FROM_L3MISS", + "EventName": "PM_INST_FROM_L3MISS_DSRC", "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss." }, + { + "EventCode": "0x0007C0000000C040", + "EventName": "PM_DATA_FROM_L3MISS_DSRC", + "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss." + }, { "EventCode": "0x000780000010C040", "EventName": "PM_INST_FROM_L3MISS_ALL", @@ -981,7 +991,7 @@ }, { "EventCode": "0x0003C0000000C142", - "EventName": "PM_MRK_DATA_FROM_L2MISS", + "EventName": "PM_MRK_DATA_FROM_L2MISS_DSRC", "BriefDescription": "The processor's L1 data cache was reloaded from a source beyond the local core's L2 due to a demand miss for a marked instruction." }, { @@ -1046,12 +1056,12 @@ }, { "EventCode": "0x000780000000C142", - "EventName": "PM_MRK_INST_FROM_L3MISS", + "EventName": "PM_MRK_INST_FROM_L3MISS_DSRC", "BriefDescription": "The processor's instruction cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction." }, { "EventCode": "0x0007C0000000C142", - "EventName": "PM_MRK_DATA_FROM_L3MISS", + "EventName": "PM_MRK_DATA_FROM_L3MISS_DSRC", "BriefDescription": "The processor's L1 data cache was reloaded from beyond the local core's L3 due to a demand miss for a marked instruction." }, { From a4320085a6c694326dd8db46f563d52d1a826f07 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 28 Nov 2023 12:39:40 -0800 Subject: [PATCH 082/179] perf mem: Fix error on hybrid related to availability of mem event in a PMU The below error can be triggered on a hybrid machine. $ perf mem record -t load sleep 1 event syntax error: 'breakpoint/mem-loads,ldlat=30/P' \___ Bad event or PMU Unable to find PMU or event on a PMU of 'breakpoint' In the perf_mem_events__record_args(), the current perf never checks the availability of a mem event on a given PMU. All the PMUs will be added to the perf mem event list. Perf errors out for the unsupported PMU. Extend perf_mem_event__supported() and take a PMU into account. Check the mem event for each PMU before adding it to the perf mem event list. Optimize the perf_mem_events__init() a little bit. The function is to check whether the mem events are supported in the system. It doesn't need to scan all PMUs. Just return with the first supported PMU is good enough. Fixes: 5752c20f3787c9bc ("perf mem: Scan all PMUs instead of just core ones") Reported-by: Ammy Yi Signed-off-by: Kan Liang Tested-by: Ammy Yi Acked-by: Ian Rogers Cc: Adrian Hunter Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Link: https://lore.kernel.org/r/20231128203940.3964287-1-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/mem-events.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c index 954b235e12e5..3a2e3687878c 100644 --- a/tools/perf/util/mem-events.c +++ b/tools/perf/util/mem-events.c @@ -100,11 +100,14 @@ int perf_mem_events__parse(const char *str) return -1; } -static bool perf_mem_event__supported(const char *mnt, char *sysfs_name) +static bool perf_mem_event__supported(const char *mnt, struct perf_pmu *pmu, + struct perf_mem_event *e) { + char sysfs_name[100]; char path[PATH_MAX]; struct stat st; + scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name); scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name); return !stat(path, &st); } @@ -120,7 +123,6 @@ int perf_mem_events__init(void) for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) { struct perf_mem_event *e = perf_mem_events__ptr(j); - char sysfs_name[100]; struct perf_pmu *pmu = NULL; /* @@ -136,12 +138,12 @@ int perf_mem_events__init(void) * of core PMU. */ while ((pmu = perf_pmus__scan(pmu)) != NULL) { - scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, pmu->name); - e->supported |= perf_mem_event__supported(mnt, sysfs_name); + e->supported |= perf_mem_event__supported(mnt, pmu, e); + if (e->supported) { + found = true; + break; + } } - - if (e->supported) - found = true; } return found ? 0 : -ENOENT; @@ -167,13 +169,10 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e, int idx) { const char *mnt = sysfs__mount(); - char sysfs_name[100]; struct perf_pmu *pmu = NULL; while ((pmu = perf_pmus__scan(pmu)) != NULL) { - scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name, - pmu->name); - if (!perf_mem_event__supported(mnt, sysfs_name)) { + if (!perf_mem_event__supported(mnt, pmu, e)) { pr_err("failed: event '%s' not supported\n", perf_mem_events__name(idx, pmu->name)); } @@ -183,6 +182,7 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e, int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, char **rec_tmp, int *tmp_nr) { + const char *mnt = sysfs__mount(); int i = *argv_nr, k = 0; struct perf_mem_event *e; @@ -211,6 +211,9 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, while ((pmu = perf_pmus__scan(pmu)) != NULL) { const char *s = perf_mem_events__name(j, pmu->name); + if (!perf_mem_event__supported(mnt, pmu, e)) + continue; + rec_argv[i++] = "-e"; if (s) { char *copy = strdup(s); From 144081ef78c36e255c08b74da86ef68e6cf0a221 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 20 Nov 2023 11:04:08 -0800 Subject: [PATCH 083/179] perf test: Add basic 'perf diff' test There are some old bug reports on perf diff crashing: https://rhaas.blogspot.com/2012/06/perf-good-bad-ugly.html Happening across them I was prompted to add two very basic tests that will give some 'perf diff' coverage. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Athira Jajeev Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231120190408.281826-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/diff.sh | 101 +++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100755 tools/perf/tests/shell/diff.sh diff --git a/tools/perf/tests/shell/diff.sh b/tools/perf/tests/shell/diff.sh new file mode 100755 index 000000000000..213185763688 --- /dev/null +++ b/tools/perf/tests/shell/diff.sh @@ -0,0 +1,101 @@ +#!/bin/sh +# perf diff tests +# SPDX-License-Identifier: GPL-2.0 + +set -e + +err=0 +perfdata1=$(mktemp /tmp/__perf_test.perf.data.XXXXX) +perfdata2=$(mktemp /tmp/__perf_test.perf.data.XXXXX) +perfdata3=$(mktemp /tmp/__perf_test.perf.data.XXXXX) +testprog="perf test -w thloop" +testsym="test_loop" + +cleanup() { + rm -rf "${perfdata1}" + rm -rf "${perfdata1}".old + rm -rf "${perfdata2}" + rm -rf "${perfdata2}".old + rm -rf "${perfdata3}" + rm -rf "${perfdata3}".old + + trap - EXIT TERM INT +} + +trap_cleanup() { + cleanup + exit 1 +} +trap trap_cleanup EXIT TERM INT + +make_data() { + file="$1" + if ! perf record -o "${file}" ${testprog} 2> /dev/null + then + echo "Workload record [Failed record]" + echo 1 + return + fi + if ! perf report -i "${file}" -q | grep -q "${testsym}" + then + echo "Workload record [Failed missing output]" + echo 1 + return + fi + echo 0 +} + +test_two_files() { + echo "Basic two file diff test" + err=$(make_data "${perfdata1}") + if [ $err != 0 ] + then + return + fi + err=$(make_data "${perfdata2}") + if [ $err != 0 ] + then + return + fi + + if ! perf diff "${perfdata1}" "${perfdata2}" | grep -q "${testsym}" + then + echo "Basic two file diff test [Failed diff]" + err=1 + return + fi + echo "Basic two file diff test [Success]" +} + +test_three_files() { + echo "Basic three file diff test" + err=$(make_data "${perfdata1}") + if [ $err != 0 ] + then + return + fi + err=$(make_data "${perfdata2}") + if [ $err != 0 ] + then + return + fi + err=$(make_data "${perfdata3}") + if [ $err != 0 ] + then + return + fi + + if ! perf diff "${perfdata1}" "${perfdata2}" "${perfdata3}" | grep -q "${testsym}" + then + echo "Basic three file diff test [Failed diff]" + err=1 + return + fi + echo "Basic three file diff test [Success]" +} + +test_two_files +test_three_files + +cleanup +exit $err From 018b04248543f49d677011d4615f243a39a155a8 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Fri, 30 Jun 2023 09:00:29 +0100 Subject: [PATCH 084/179] perf bench sched-seccomp-notify: Fix spelling mistake "synchronious" -> "synchronous" There is a spelling mistake in an option description. Fix it. Signed-off-by: Colin Ian King Acked-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: kernel-janitors@vger.kernel.org Link: https://lore.kernel.org/r/20230630080029.15614-1-colin.i.king@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/bench/sched-seccomp-notify.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/bench/sched-seccomp-notify.c b/tools/perf/bench/sched-seccomp-notify.c index a01c40131493..269c1f4a6852 100644 --- a/tools/perf/bench/sched-seccomp-notify.c +++ b/tools/perf/bench/sched-seccomp-notify.c @@ -32,7 +32,7 @@ static bool sync_mode; static const struct option options[] = { OPT_U64('l', "loop", &loops, "Specify number of loops"), OPT_BOOLEAN('s', "sync-mode", &sync_mode, - "Enable the synchronious mode for seccomp notifications"), + "Enable the synchronous mode for seccomp notifications"), OPT_END() }; From eb2eac0c7b6180332ced94d2979f3d3c7d22aaff Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 20 Nov 2023 16:04:20 -0800 Subject: [PATCH 085/179] perf evsel: Fallback to "task-clock" when not system wide When the "cycles" event isn't available evsel will fallback to the "cpu-clock" software event. "task-clock" is similar to "cpu-clock" but only runs when the process is running. Falling back to "cpu-clock" when not system wide leads to confusion, by falling back to "task-clock" it is hoped the confusion is less. Pass the target to determine if "task-clock" is more appropriate. Update a nearby comment and debug string for the change. Signed-off-by: Ian Rogers Tested-by: Athira Rajeev Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Ajay Kaher Cc: Alexander Shishkin Cc: Alexey Makhalov Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Yang Jihong Link: https://lore.kernel.org/r/20231121000420.368075-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 2 +- tools/perf/builtin-stat.c | 2 +- tools/perf/builtin-top.c | 2 +- tools/perf/util/evsel.c | 18 ++++++++++-------- tools/perf/util/evsel.h | 3 ++- 5 files changed, 15 insertions(+), 12 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 9b4f3805ca92..db814eadc16b 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -1360,7 +1360,7 @@ static int record__open(struct record *rec) evlist__for_each_entry(evlist, pos) { try_again: if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) { - if (evsel__fallback(pos, errno, msg, sizeof(msg))) { + if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) { if (verbose > 0) ui__warning("%s\n", msg); goto try_again; diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index a3af805a1d57..d8e5d6f7a87a 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -653,7 +653,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) if ((evsel__leader(counter) != counter) || !(counter->core.leader->nr_members > 1)) return COUNTER_SKIP; - } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { + } else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) { if (verbose > 0) ui__warning("%s\n", msg); return COUNTER_RETRY; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index ea8c7eca5eee..1e42bd1c7d5a 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1044,7 +1044,7 @@ static int perf_top__start_counters(struct perf_top *top) perf_top_overwrite_fallback(top, counter)) goto try_again; - if (evsel__fallback(counter, errno, msg, sizeof(msg))) { + if (evsel__fallback(counter, &opts->target, errno, msg, sizeof(msg))) { if (verbose > 0) ui__warning("%s\n", msg); goto try_again; diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index a5da74e3a517..532f34d9fcb5 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2853,7 +2853,8 @@ u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const #endif -bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize) +bool evsel__fallback(struct evsel *evsel, struct target *target, int err, + char *msg, size_t msgsize) { int paranoid; @@ -2861,18 +2862,19 @@ bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize) evsel->core.attr.type == PERF_TYPE_HARDWARE && evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) { /* - * If it's cycles then fall back to hrtimer based - * cpu-clock-tick sw counter, which is always available even if - * no PMU support. + * If it's cycles then fall back to hrtimer based cpu-clock sw + * counter, which is always available even if no PMU support. * * PPC returns ENXIO until 2.6.37 (behavior changed with commit * b0a873e). */ - scnprintf(msg, msgsize, "%s", -"The cycles event is not supported, trying to fall back to cpu-clock-ticks"); - evsel->core.attr.type = PERF_TYPE_SOFTWARE; - evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK; + evsel->core.attr.config = target__has_cpu(target) + ? PERF_COUNT_SW_CPU_CLOCK + : PERF_COUNT_SW_TASK_CLOCK; + scnprintf(msg, msgsize, + "The cycles event is not supported, trying to fall back to %s", + target__has_cpu(target) ? "cpu-clock" : "task-clock"); zfree(&evsel->name); return true; diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index f19ac9f027ef..efbb6e848287 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -460,7 +460,8 @@ static inline bool evsel__is_clock(const struct evsel *evsel) evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK); } -bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize); +bool evsel__fallback(struct evsel *evsel, struct target *target, int err, + char *msg, size_t msgsize); int evsel__open_strerror(struct evsel *evsel, struct target *target, int err, char *msg, size_t size); From 030ac3cad28992ae9099a857848861053273cc8f Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 27 Nov 2023 14:08:20 -0800 Subject: [PATCH 086/179] perf record: Be lazier in allocating lost samples buffer Wait until a lost sample occurs to allocate the lost samples buffer, often the buffer isn't necessary. This saves a 64kb allocation and 5.3kb of peak memory consumption. Signed-off-by: Ian Rogers Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu (Google) Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231127220902.1315692-9-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index db814eadc16b..eb5a398ddb1d 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -1924,21 +1924,13 @@ static void __record__save_lost_samples(struct record *rec, struct evsel *evsel, static void record__read_lost_samples(struct record *rec) { struct perf_session *session = rec->session; - struct perf_record_lost_samples *lost; + struct perf_record_lost_samples *lost = NULL; struct evsel *evsel; /* there was an error during record__open */ if (session->evlist == NULL) return; - lost = zalloc(PERF_SAMPLE_MAX_SIZE); - if (lost == NULL) { - pr_debug("Memory allocation failed\n"); - return; - } - - lost->header.type = PERF_RECORD_LOST_SAMPLES; - evlist__for_each_entry(session->evlist, evsel) { struct xyarray *xy = evsel->core.sample_id; u64 lost_count; @@ -1961,6 +1953,14 @@ static void record__read_lost_samples(struct record *rec) } if (count.lost) { + if (!lost) { + lost = zalloc(PERF_SAMPLE_MAX_SIZE); + if (!lost) { + pr_debug("Memory allocation failed\n"); + return; + } + lost->header.type = PERF_RECORD_LOST_SAMPLES; + } __record__save_lost_samples(rec, evsel, lost, x, y, count.lost, 0); } @@ -1968,9 +1968,18 @@ static void record__read_lost_samples(struct record *rec) } lost_count = perf_bpf_filter__lost_count(evsel); - if (lost_count) + if (lost_count) { + if (!lost) { + lost = zalloc(PERF_SAMPLE_MAX_SIZE); + if (!lost) { + pr_debug("Memory allocation failed\n"); + return; + } + lost->header.type = PERF_RECORD_LOST_SAMPLES; + } __record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count, PERF_RECORD_MISC_LOST_SAMPLES_BPF); + } } out: free(lost); From d0acce68285e8645038a72c6792483160ca36e5a Mon Sep 17 00:00:00 2001 From: Chengen Du Date: Thu, 30 Nov 2023 21:57:23 +0800 Subject: [PATCH 087/179] perf symbols: Parse NOTE segments until the build id is found In the ELF file, multiple NOTE segments may exist. To locate the build id, the process shall persist in parsing NOTE segments until the build id is found. Signed-off-by: Chengen Du Acked-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231130135723.17562-1-chengen.du@canonical.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol-minimal.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c index a81a14769bd1..1da8b713509c 100644 --- a/tools/perf/util/symbol-minimal.c +++ b/tools/perf/util/symbol-minimal.c @@ -159,9 +159,10 @@ int filename__read_build_id(const char *filename, struct build_id *bid) goto out_free; ret = read_build_id(buf, buf_size, bid, need_swap); - if (ret == 0) + if (ret == 0) { ret = bid->size; - break; + break; + } } } else { Elf64_Ehdr ehdr; @@ -210,9 +211,10 @@ int filename__read_build_id(const char *filename, struct build_id *bid) goto out_free; ret = read_build_id(buf, buf_size, bid, need_swap); - if (ret == 0) + if (ret == 0) { ret = bid->size; - break; + break; + } } } out_free: From 407a3898d72ee0020b8cc9dd28b88c8eb8d68214 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 5 Dec 2023 08:49:24 -0800 Subject: [PATCH 088/179] perf test shell diff: Skip test if test_loop symbol is missing in the perf binary The diff test depends on finding the symbol test_loop in perf and will fail if perf has been stripped and no debug object is available. In that case, skip the test instead. Suggested-by: Adrian Hunter Signed-off-by: Ian Rogers Tested-by: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231205164924.835682-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/diff.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/perf/tests/shell/diff.sh b/tools/perf/tests/shell/diff.sh index 213185763688..14b87af88703 100755 --- a/tools/perf/tests/shell/diff.sh +++ b/tools/perf/tests/shell/diff.sh @@ -9,8 +9,15 @@ perfdata1=$(mktemp /tmp/__perf_test.perf.data.XXXXX) perfdata2=$(mktemp /tmp/__perf_test.perf.data.XXXXX) perfdata3=$(mktemp /tmp/__perf_test.perf.data.XXXXX) testprog="perf test -w thloop" + +shelldir=$(dirname "$0") +# shellcheck source=lib/perf_has_symbol.sh +. "${shelldir}"/lib/perf_has_symbol.sh + testsym="test_loop" +skip_test_missing_symbol ${testsym} + cleanup() { rm -rf "${perfdata1}" rm -rf "${perfdata1}".old From 9fa688ea341231837915f996b61f6e0a330d3b38 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 27 Nov 2023 14:08:24 -0800 Subject: [PATCH 089/179] perf map: Simplify map_ip/unmap_ip and make 'struct map' smaller When mapping an IP it is either an identity mapping or a DSO relative mapping, so a single bit is required in the struct to identify this. The current code uses function pointers, adding 2 pointers per map and also pushing the size of a map beyond 1 cache line. Switch to using a byte to identify the mapping type (as well as priv and erange_warned), to avoid any masking. Change struct maps's layout to avoid holes. Before: ``` struct map { u64 start; /* 0 8 */ u64 end; /* 8 8 */ _Bool erange_warned:1; /* 16: 0 1 */ _Bool priv:1; /* 16: 1 1 */ /* XXX 6 bits hole, try to pack */ /* XXX 3 bytes hole, try to pack */ u32 prot; /* 20 4 */ u64 pgoff; /* 24 8 */ u64 reloc; /* 32 8 */ u64 (*map_ip)(const struct map *, u64); /* 40 8 */ u64 (*unmap_ip)(const struct map *, u64); /* 48 8 */ struct dso * dso; /* 56 8 */ /* --- cacheline 1 boundary (64 bytes) --- */ refcount_t refcnt; /* 64 4 */ u32 flags; /* 68 4 */ /* size: 72, cachelines: 2, members: 12 */ /* sum members: 68, holes: 1, sum holes: 3 */ /* sum bitfield members: 2 bits, bit holes: 1, sum bit holes: 6 bits */ /* last cacheline: 8 bytes */ }; ``` After: ``` struct map { u64 start; /* 0 8 */ u64 end; /* 8 8 */ u64 pgoff; /* 16 8 */ u64 reloc; /* 24 8 */ struct dso * dso; /* 32 8 */ refcount_t refcnt; /* 40 4 */ u32 prot; /* 44 4 */ u32 flags; /* 48 4 */ enum mapping_type mapping_type:8; /* 52: 0 4 */ /* Bitfield combined with next fields */ _Bool erange_warned; /* 53 1 */ _Bool priv; /* 54 1 */ /* size: 56, cachelines: 1, members: 11 */ /* padding: 1 */ /* last cacheline: 56 bytes */ }; ``` Signed-off-by: Ian Rogers Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu (Google) Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231127220902.1315692-13-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 3 +- tools/perf/util/map.c | 20 +-------- tools/perf/util/map.h | 83 +++++++++++++++++++----------------- tools/perf/util/symbol-elf.c | 6 +-- tools/perf/util/symbol.c | 6 +-- 5 files changed, 50 insertions(+), 68 deletions(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index a985d004aa8d..c5de5363b5e7 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1359,8 +1359,7 @@ __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) if (machine->vmlinux_map == NULL) return -ENOMEM; - map__set_map_ip(machine->vmlinux_map, identity__map_ip); - map__set_unmap_ip(machine->vmlinux_map, identity__map_ip); + map__set_mapping_type(machine->vmlinux_map, MAPPING_TYPE__IDENTITY); return maps__insert(machine__kernel_maps(machine), machine->vmlinux_map); } diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index f64b83004421..54c67cb7ecef 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -109,8 +109,7 @@ void map__init(struct map *map, u64 start, u64 end, u64 pgoff, struct dso *dso) map__set_pgoff(map, pgoff); map__set_reloc(map, 0); map__set_dso(map, dso__get(dso)); - map__set_map_ip(map, map__dso_map_ip); - map__set_unmap_ip(map, map__dso_unmap_ip); + map__set_mapping_type(map, MAPPING_TYPE__DSO); map__set_erange_warned(map, false); refcount_set(map__refcnt(map), 1); } @@ -172,7 +171,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len, map__init(result, start, start + len, pgoff, dso); if (anon || no_dso) { - map->map_ip = map->unmap_ip = identity__map_ip; + map->mapping_type = MAPPING_TYPE__IDENTITY; /* * Set memory without DSO as loaded. All map__find_* @@ -630,18 +629,3 @@ struct maps *map__kmaps(struct map *map) } return kmap->kmaps; } - -u64 map__dso_map_ip(const struct map *map, u64 ip) -{ - return ip - map__start(map) + map__pgoff(map); -} - -u64 map__dso_unmap_ip(const struct map *map, u64 ip) -{ - return ip + map__start(map) - map__pgoff(map); -} - -u64 identity__map_ip(const struct map *map __maybe_unused, u64 ip) -{ - return ip; -} diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 1b53d53adc86..3a3b7757da5f 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -16,23 +16,25 @@ struct dso; struct maps; struct machine; +enum mapping_type { + /* map__map_ip/map__unmap_ip are given as offsets in the DSO. */ + MAPPING_TYPE__DSO, + /* map__map_ip/map__unmap_ip are just the given ip value. */ + MAPPING_TYPE__IDENTITY, +}; + DECLARE_RC_STRUCT(map) { u64 start; u64 end; - bool erange_warned:1; - bool priv:1; - u32 prot; u64 pgoff; u64 reloc; - - /* ip -> dso rip */ - u64 (*map_ip)(const struct map *, u64); - /* dso rip -> ip */ - u64 (*unmap_ip)(const struct map *, u64); - struct dso *dso; refcount_t refcnt; + u32 prot; u32 flags; + enum mapping_type mapping_type:8; + bool erange_warned; + bool priv; }; struct kmap; @@ -41,38 +43,11 @@ struct kmap *__map__kmap(struct map *map); struct kmap *map__kmap(struct map *map); struct maps *map__kmaps(struct map *map); -/* ip -> dso rip */ -u64 map__dso_map_ip(const struct map *map, u64 ip); -/* dso rip -> ip */ -u64 map__dso_unmap_ip(const struct map *map, u64 ip); -/* Returns ip */ -u64 identity__map_ip(const struct map *map __maybe_unused, u64 ip); - static inline struct dso *map__dso(const struct map *map) { return RC_CHK_ACCESS(map)->dso; } -static inline u64 map__map_ip(const struct map *map, u64 ip) -{ - return RC_CHK_ACCESS(map)->map_ip(map, ip); -} - -static inline u64 map__unmap_ip(const struct map *map, u64 ip) -{ - return RC_CHK_ACCESS(map)->unmap_ip(map, ip); -} - -static inline void *map__map_ip_ptr(struct map *map) -{ - return RC_CHK_ACCESS(map)->map_ip; -} - -static inline void* map__unmap_ip_ptr(struct map *map) -{ - return RC_CHK_ACCESS(map)->unmap_ip; -} - static inline u64 map__start(const struct map *map) { return RC_CHK_ACCESS(map)->start; @@ -123,6 +98,34 @@ static inline size_t map__size(const struct map *map) return map__end(map) - map__start(map); } +/* ip -> dso rip */ +static inline u64 map__dso_map_ip(const struct map *map, u64 ip) +{ + return ip - map__start(map) + map__pgoff(map); +} + +/* dso rip -> ip */ +static inline u64 map__dso_unmap_ip(const struct map *map, u64 ip) +{ + return ip + map__start(map) - map__pgoff(map); +} + +static inline u64 map__map_ip(const struct map *map, u64 ip) +{ + if ((RC_CHK_ACCESS(map)->mapping_type) == MAPPING_TYPE__DSO) + return map__dso_map_ip(map, ip); + else + return ip; +} + +static inline u64 map__unmap_ip(const struct map *map, u64 ip) +{ + if ((RC_CHK_ACCESS(map)->mapping_type) == MAPPING_TYPE__DSO) + return map__dso_unmap_ip(map, ip); + else + return ip; +} + /* rip/ip <-> addr suitable for passing to `objdump --start-address=` */ u64 map__rip_2objdump(struct map *map, u64 rip); @@ -294,13 +297,13 @@ static inline void map__set_dso(struct map *map, struct dso *dso) RC_CHK_ACCESS(map)->dso = dso; } -static inline void map__set_map_ip(struct map *map, u64 (*map_ip)(const struct map *map, u64 ip)) +static inline void map__set_mapping_type(struct map *map, enum mapping_type type) { - RC_CHK_ACCESS(map)->map_ip = map_ip; + RC_CHK_ACCESS(map)->mapping_type = type; } -static inline void map__set_unmap_ip(struct map *map, u64 (*unmap_ip)(const struct map *map, u64 rip)) +static inline enum mapping_type map__mapping_type(struct map *map) { - RC_CHK_ACCESS(map)->unmap_ip = unmap_ip; + return RC_CHK_ACCESS(map)->mapping_type; } #endif /* __PERF_MAP_H */ diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 9e7eeaf616b8..4b934ed3bfd1 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -1392,8 +1392,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, map__set_start(map, shdr->sh_addr + ref_reloc(kmap)); map__set_end(map, map__start(map) + shdr->sh_size); map__set_pgoff(map, shdr->sh_offset); - map__set_map_ip(map, map__dso_map_ip); - map__set_unmap_ip(map, map__dso_unmap_ip); + map__set_mapping_type(map, MAPPING_TYPE__DSO); /* Ensure maps are correctly ordered */ if (kmaps) { int err; @@ -1455,8 +1454,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map, map__set_end(curr_map, map__start(curr_map) + shdr->sh_size); map__set_pgoff(curr_map, shdr->sh_offset); } else { - map__set_map_ip(curr_map, identity__map_ip); - map__set_unmap_ip(curr_map, identity__map_ip); + map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY); } curr_dso->symtab_type = dso->symtab_type; if (maps__insert(kmaps, curr_map)) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 82cc74b9358e..314c0263bf3c 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -956,8 +956,7 @@ static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta, return -1; } - map__set_map_ip(curr_map, identity__map_ip); - map__set_unmap_ip(curr_map, identity__map_ip); + map__set_mapping_type(curr_map, MAPPING_TYPE__IDENTITY); if (maps__insert(kmaps, curr_map)) { dso__put(ndso); return -1; @@ -1475,8 +1474,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, map__set_start(map, map__start(new_map)); map__set_end(map, map__end(new_map)); map__set_pgoff(map, map__pgoff(new_map)); - map__set_map_ip(map, map__map_ip_ptr(new_map)); - map__set_unmap_ip(map, map__unmap_ip_ptr(new_map)); + map__set_mapping_type(map, map__mapping_type(new_map)); /* Ensure maps are correctly ordered */ map_ref = map__get(map); maps__remove(kmaps, map_ref); From 0f6ab6a3fb7e380a1277f8288f315724ed517114 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 27 Nov 2023 14:08:25 -0800 Subject: [PATCH 090/179] perf maps: Move symbol maps functions to maps.c Move the find and certain other symbol maps__* functions to maps.c for better abstraction. Signed-off-by: Ian Rogers Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu (Google) Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231127220902.1315692-14-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/maps.c | 238 +++++++++++++++++++++++++++++++++++++ tools/perf/util/maps.h | 12 ++ tools/perf/util/symbol.c | 248 --------------------------------------- tools/perf/util/symbol.h | 1 - 4 files changed, 250 insertions(+), 249 deletions(-) diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 233438c95b53..9a011aed4b75 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -475,3 +475,241 @@ struct map_rb_node *map_rb_node__next(struct map_rb_node *node) return rb_entry(next, struct map_rb_node, rb_node); } + +static int map__strcmp(const void *a, const void *b) +{ + const struct map *map_a = *(const struct map **)a; + const struct map *map_b = *(const struct map **)b; + const struct dso *dso_a = map__dso(map_a); + const struct dso *dso_b = map__dso(map_b); + int ret = strcmp(dso_a->short_name, dso_b->short_name); + + if (ret == 0 && map_a != map_b) { + /* + * Ensure distinct but name equal maps have an order in part to + * aid reference counting. + */ + ret = (int)map__start(map_a) - (int)map__start(map_b); + if (ret == 0) + ret = (int)((intptr_t)map_a - (intptr_t)map_b); + } + + return ret; +} + +static int map__strcmp_name(const void *name, const void *b) +{ + const struct dso *dso = map__dso(*(const struct map **)b); + + return strcmp(name, dso->short_name); +} + +void __maps__sort_by_name(struct maps *maps) +{ + qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp); +} + +static int map__groups__sort_by_name_from_rbtree(struct maps *maps) +{ + struct map_rb_node *rb_node; + struct map **maps_by_name = realloc(maps__maps_by_name(maps), + maps__nr_maps(maps) * sizeof(struct map *)); + int i = 0; + + if (maps_by_name == NULL) + return -1; + + up_read(maps__lock(maps)); + down_write(maps__lock(maps)); + + RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name; + RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps); + + maps__for_each_entry(maps, rb_node) + maps_by_name[i++] = map__get(rb_node->map); + + __maps__sort_by_name(maps); + + up_write(maps__lock(maps)); + down_read(maps__lock(maps)); + + return 0; +} + +static struct map *__maps__find_by_name(struct maps *maps, const char *name) +{ + struct map **mapp; + + if (maps__maps_by_name(maps) == NULL && + map__groups__sort_by_name_from_rbtree(maps)) + return NULL; + + mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), + sizeof(*mapp), map__strcmp_name); + if (mapp) + return *mapp; + return NULL; +} + +struct map *maps__find_by_name(struct maps *maps, const char *name) +{ + struct map_rb_node *rb_node; + struct map *map; + + down_read(maps__lock(maps)); + + + if (RC_CHK_ACCESS(maps)->last_search_by_name) { + const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name); + + if (strcmp(dso->short_name, name) == 0) { + map = RC_CHK_ACCESS(maps)->last_search_by_name; + goto out_unlock; + } + } + /* + * If we have maps->maps_by_name, then the name isn't in the rbtree, + * as maps->maps_by_name mirrors the rbtree when lookups by name are + * made. + */ + map = __maps__find_by_name(maps, name); + if (map || maps__maps_by_name(maps) != NULL) + goto out_unlock; + + /* Fallback to traversing the rbtree... */ + maps__for_each_entry(maps, rb_node) { + struct dso *dso; + + map = rb_node->map; + dso = map__dso(map); + if (strcmp(dso->short_name, name) == 0) { + RC_CHK_ACCESS(maps)->last_search_by_name = map; + goto out_unlock; + } + } + map = NULL; + +out_unlock: + up_read(maps__lock(maps)); + return map; +} + +void maps__fixup_end(struct maps *maps) +{ + struct map_rb_node *prev = NULL, *curr; + + down_write(maps__lock(maps)); + + maps__for_each_entry(maps, curr) { + if (prev != NULL && !map__end(prev->map)) + map__set_end(prev->map, map__start(curr->map)); + + prev = curr; + } + + /* + * We still haven't the actual symbols, so guess the + * last map final address. + */ + if (curr && !map__end(curr->map)) + map__set_end(curr->map, ~0ULL); + + up_write(maps__lock(maps)); +} + +/* + * Merges map into maps by splitting the new map within the existing map + * regions. + */ +int maps__merge_in(struct maps *kmaps, struct map *new_map) +{ + struct map_rb_node *rb_node; + LIST_HEAD(merged); + int err = 0; + + maps__for_each_entry(kmaps, rb_node) { + struct map *old_map = rb_node->map; + + /* no overload with this one */ + if (map__end(new_map) < map__start(old_map) || + map__start(new_map) >= map__end(old_map)) + continue; + + if (map__start(new_map) < map__start(old_map)) { + /* + * |new...... + * |old.... + */ + if (map__end(new_map) < map__end(old_map)) { + /* + * |new......| -> |new..| + * |old....| -> |old....| + */ + map__set_end(new_map, map__start(old_map)); + } else { + /* + * |new.............| -> |new..| |new..| + * |old....| -> |old....| + */ + struct map_list_node *m = map_list_node__new(); + + if (!m) { + err = -ENOMEM; + goto out; + } + + m->map = map__clone(new_map); + if (!m->map) { + free(m); + err = -ENOMEM; + goto out; + } + + map__set_end(m->map, map__start(old_map)); + list_add_tail(&m->node, &merged); + map__add_pgoff(new_map, map__end(old_map) - map__start(new_map)); + map__set_start(new_map, map__end(old_map)); + } + } else { + /* + * |new...... + * |old.... + */ + if (map__end(new_map) < map__end(old_map)) { + /* + * |new..| -> x + * |old.........| -> |old.........| + */ + map__put(new_map); + new_map = NULL; + break; + } else { + /* + * |new......| -> |new...| + * |old....| -> |old....| + */ + map__add_pgoff(new_map, map__end(old_map) - map__start(new_map)); + map__set_start(new_map, map__end(old_map)); + } + } + } + +out: + while (!list_empty(&merged)) { + struct map_list_node *old_node; + + old_node = list_entry(merged.next, struct map_list_node, node); + list_del_init(&old_node->node); + if (!err) + err = maps__insert(kmaps, old_node->map); + map__put(old_node->map); + free(old_node); + } + + if (new_map) { + if (!err) + err = maps__insert(kmaps, new_map); + map__put(new_map); + } + return err; +} diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index 83144e0645ed..a689149be8c4 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -21,6 +21,16 @@ struct map_rb_node { struct map *map; }; +struct map_list_node { + struct list_head node; + struct map *map; +}; + +static inline struct map_list_node *map_list_node__new(void) +{ + return malloc(sizeof(struct map_list_node)); +} + struct map_rb_node *maps__first(struct maps *maps); struct map_rb_node *map_rb_node__next(struct map_rb_node *node); struct map_rb_node *maps__find_node(struct maps *maps, struct map *map); @@ -133,4 +143,6 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map); void __maps__sort_by_name(struct maps *maps); +void maps__fixup_end(struct maps *maps); + #endif // __PERF_MAPS_H diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 314c0263bf3c..1cc42b8d8afb 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -48,11 +48,6 @@ static bool symbol__is_idle(const char *name); int vmlinux_path__nr_entries; char **vmlinux_path; -struct map_list_node { - struct list_head node; - struct map *map; -}; - struct symbol_conf symbol_conf = { .nanosecs = false, .use_modules = true, @@ -90,11 +85,6 @@ static enum dso_binary_type binary_type_symtab[] = { #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) -static struct map_list_node *map_list_node__new(void) -{ - return malloc(sizeof(struct map_list_node)); -} - static bool symbol_type__filter(char symbol_type) { symbol_type = toupper(symbol_type); @@ -270,29 +260,6 @@ void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms) curr->end = roundup(curr->start, 4096) + 4096; } -void maps__fixup_end(struct maps *maps) -{ - struct map_rb_node *prev = NULL, *curr; - - down_write(maps__lock(maps)); - - maps__for_each_entry(maps, curr) { - if (prev != NULL && !map__end(prev->map)) - map__set_end(prev->map, map__start(curr->map)); - - prev = curr; - } - - /* - * We still haven't the actual symbols, so guess the - * last map final address. - */ - if (curr && !map__end(curr->map)) - map__set_end(curr->map, ~0ULL); - - up_write(maps__lock(maps)); -} - struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name) { size_t namelen = strlen(name) + 1; @@ -1270,103 +1237,6 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) return 0; } -/* - * Merges map into maps by splitting the new map within the existing map - * regions. - */ -int maps__merge_in(struct maps *kmaps, struct map *new_map) -{ - struct map_rb_node *rb_node; - LIST_HEAD(merged); - int err = 0; - - maps__for_each_entry(kmaps, rb_node) { - struct map *old_map = rb_node->map; - - /* no overload with this one */ - if (map__end(new_map) < map__start(old_map) || - map__start(new_map) >= map__end(old_map)) - continue; - - if (map__start(new_map) < map__start(old_map)) { - /* - * |new...... - * |old.... - */ - if (map__end(new_map) < map__end(old_map)) { - /* - * |new......| -> |new..| - * |old....| -> |old....| - */ - map__set_end(new_map, map__start(old_map)); - } else { - /* - * |new.............| -> |new..| |new..| - * |old....| -> |old....| - */ - struct map_list_node *m = map_list_node__new(); - - if (!m) { - err = -ENOMEM; - goto out; - } - - m->map = map__clone(new_map); - if (!m->map) { - free(m); - err = -ENOMEM; - goto out; - } - - map__set_end(m->map, map__start(old_map)); - list_add_tail(&m->node, &merged); - map__add_pgoff(new_map, map__end(old_map) - map__start(new_map)); - map__set_start(new_map, map__end(old_map)); - } - } else { - /* - * |new...... - * |old.... - */ - if (map__end(new_map) < map__end(old_map)) { - /* - * |new..| -> x - * |old.........| -> |old.........| - */ - map__put(new_map); - new_map = NULL; - break; - } else { - /* - * |new......| -> |new...| - * |old....| -> |old....| - */ - map__add_pgoff(new_map, map__end(old_map) - map__start(new_map)); - map__set_start(new_map, map__end(old_map)); - } - } - } - -out: - while (!list_empty(&merged)) { - struct map_list_node *old_node; - - old_node = list_entry(merged.next, struct map_list_node, node); - list_del_init(&old_node->node); - if (!err) - err = maps__insert(kmaps, old_node->map); - map__put(old_node->map); - free(old_node); - } - - if (new_map) { - if (!err) - err = maps__insert(kmaps, new_map); - map__put(new_map); - } - return err; -} - static int dso__load_kcore(struct dso *dso, struct map *map, const char *kallsyms_filename) { @@ -2065,124 +1935,6 @@ int dso__load(struct dso *dso, struct map *map) return ret; } -static int map__strcmp(const void *a, const void *b) -{ - const struct map *map_a = *(const struct map **)a; - const struct map *map_b = *(const struct map **)b; - const struct dso *dso_a = map__dso(map_a); - const struct dso *dso_b = map__dso(map_b); - int ret = strcmp(dso_a->short_name, dso_b->short_name); - - if (ret == 0 && map_a != map_b) { - /* - * Ensure distinct but name equal maps have an order in part to - * aid reference counting. - */ - ret = (int)map__start(map_a) - (int)map__start(map_b); - if (ret == 0) - ret = (int)((intptr_t)map_a - (intptr_t)map_b); - } - - return ret; -} - -static int map__strcmp_name(const void *name, const void *b) -{ - const struct dso *dso = map__dso(*(const struct map **)b); - - return strcmp(name, dso->short_name); -} - -void __maps__sort_by_name(struct maps *maps) -{ - qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp); -} - -static int map__groups__sort_by_name_from_rbtree(struct maps *maps) -{ - struct map_rb_node *rb_node; - struct map **maps_by_name = realloc(maps__maps_by_name(maps), - maps__nr_maps(maps) * sizeof(struct map *)); - int i = 0; - - if (maps_by_name == NULL) - return -1; - - up_read(maps__lock(maps)); - down_write(maps__lock(maps)); - - RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name; - RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps); - - maps__for_each_entry(maps, rb_node) - maps_by_name[i++] = map__get(rb_node->map); - - __maps__sort_by_name(maps); - - up_write(maps__lock(maps)); - down_read(maps__lock(maps)); - - return 0; -} - -static struct map *__maps__find_by_name(struct maps *maps, const char *name) -{ - struct map **mapp; - - if (maps__maps_by_name(maps) == NULL && - map__groups__sort_by_name_from_rbtree(maps)) - return NULL; - - mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), - sizeof(*mapp), map__strcmp_name); - if (mapp) - return *mapp; - return NULL; -} - -struct map *maps__find_by_name(struct maps *maps, const char *name) -{ - struct map_rb_node *rb_node; - struct map *map; - - down_read(maps__lock(maps)); - - - if (RC_CHK_ACCESS(maps)->last_search_by_name) { - const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name); - - if (strcmp(dso->short_name, name) == 0) { - map = RC_CHK_ACCESS(maps)->last_search_by_name; - goto out_unlock; - } - } - /* - * If we have maps->maps_by_name, then the name isn't in the rbtree, - * as maps->maps_by_name mirrors the rbtree when lookups by name are - * made. - */ - map = __maps__find_by_name(maps, name); - if (map || maps__maps_by_name(maps) != NULL) - goto out_unlock; - - /* Fallback to traversing the rbtree... */ - maps__for_each_entry(maps, rb_node) { - struct dso *dso; - - map = rb_node->map; - dso = map__dso(map); - if (strcmp(dso->short_name, name) == 0) { - RC_CHK_ACCESS(maps)->last_search_by_name = map; - goto out_unlock; - } - } - map = NULL; - -out_unlock: - up_read(maps__lock(maps)); - return map; -} - int dso__load_vmlinux(struct dso *dso, struct map *map, const char *vmlinux, bool vmlinux_allocated) { diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index af87c46b3f89..071837ddce2a 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -189,7 +189,6 @@ void __symbols__insert(struct rb_root_cached *symbols, struct symbol *sym, void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym); void symbols__fixup_duplicate(struct rb_root_cached *symbols); void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms); -void maps__fixup_end(struct maps *maps); typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data); int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data, From 01261d8a0f082b1a926d14ecb3ae05e52c477c74 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 27 Nov 2023 14:08:26 -0800 Subject: [PATCH 091/179] perf thread: Add missing RC_CHK_EQUAL Comparing pointers without RC_CHK_ACCESS means the indirect object will be compared rather than the underlying maps when REFCNT_CHECKING is enabled. Fix by adding missing RC_CHK_EQUAL. Signed-off-by: Ian Rogers Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Jajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu (Google) Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231127220902.1315692-15-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/thread.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index fe5e6991ae4b..b9c2039c4230 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -385,7 +385,7 @@ static int thread__clone_maps(struct thread *thread, struct thread *parent, bool if (thread__pid(thread) == thread__pid(parent)) return thread__prepare_access(thread); - if (thread__maps(thread) == thread__maps(parent)) { + if (RC_CHK_EQUAL(thread__maps(thread), thread__maps(parent))) { pr_debug("broken map groups on thread %d/%d parent %d/%d\n", thread__pid(thread), thread__tid(thread), thread__pid(parent), thread__tid(parent)); From 0713ab3bd169da82c35eefd012b07b715e4ebcf7 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 10:35:33 -0800 Subject: [PATCH 092/179] perf stat: Exit perf stat if parse groups fails Metrics were added by a callback but commit a4b8cfcabb1d90ec ("perf stat: Delay metric parsing") postponed this to allow optimizations based on the CPU configuration. In doing so it stopped errors in metric parsing from causing 'perf stat' termination. This change adds the termination for bad metric names back in. Fixes: a4b8cfcabb1d90ec ("perf stat: Delay metric parsing") Reported-by: Arnaldo Carvalho de Melo Signed-off-by: Ian Rogers Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Closes: https://lore.kernel.org/lkml/ZXByT1K6enTh2EHT@kernel.org/ Link: https://lore.kernel.org/r/20231206183533.972028-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index d8e5d6f7a87a..d22228eddccb 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -2695,15 +2695,19 @@ int cmd_stat(int argc, const char **argv) */ if (metrics) { const char *pmu = parse_events_option_args.pmu_filter ?: "all"; + int ret = metricgroup__parse_groups(evsel_list, pmu, metrics, + stat_config.metric_no_group, + stat_config.metric_no_merge, + stat_config.metric_no_threshold, + stat_config.user_requested_cpu_list, + stat_config.system_wide, + &stat_config.metric_events); - metricgroup__parse_groups(evsel_list, pmu, metrics, - stat_config.metric_no_group, - stat_config.metric_no_merge, - stat_config.metric_no_threshold, - stat_config.user_requested_cpu_list, - stat_config.system_wide, - &stat_config.metric_events); zfree(&metrics); + if (ret) { + status = ret; + goto out; + } } if (add_default_attributes()) From 9d03194a36345796d4f0f8d6b72eb770a45d614e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 28 Nov 2023 09:54:34 -0800 Subject: [PATCH 093/179] perf annotate: Introduce global annotation_options The annotation options are to control the behavior of objdump and the output. It's basically used by 'perf annotate' but 'perf report' and 'perf top' can call it on TUI dynamically. But it doesn't need to have a copy of annotation options in many places. As most of the work is done in the util/annotate.c file, add a global variable and set/use it instead of having their own copies. Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231128175441.721579-2-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-annotate.c | 43 +++++++++++++++++------------------ tools/perf/util/annotate.c | 3 +++ tools/perf/util/annotate.h | 2 ++ 3 files changed, 26 insertions(+), 22 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index a9129b51d511..67b36a7a12e3 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -45,7 +45,6 @@ struct perf_annotate { struct perf_tool tool; struct perf_session *session; - struct annotation_options opts; #ifdef HAVE_SLANG_SUPPORT bool use_tui; #endif @@ -318,9 +317,9 @@ static int hist_entry__tty_annotate(struct hist_entry *he, struct perf_annotate *ann) { if (!ann->use_stdio2) - return symbol__tty_annotate(&he->ms, evsel, &ann->opts); + return symbol__tty_annotate(&he->ms, evsel, &annotate_opts); - return symbol__tty_annotate2(&he->ms, evsel, &ann->opts); + return symbol__tty_annotate2(&he->ms, evsel, &annotate_opts); } static void hists__find_annotations(struct hists *hists, @@ -376,14 +375,14 @@ static void hists__find_annotations(struct hists *hists, return; } - ret = annotate(he, evsel, &ann->opts, NULL); + ret = annotate(he, evsel, &annotate_opts, NULL); if (!ret || !ann->skip_missing) return; /* skip missing symbols */ nd = rb_next(nd); } else if (use_browser == 1) { - key = hist_entry__tui_annotate(he, evsel, NULL, &ann->opts); + key = hist_entry__tui_annotate(he, evsel, NULL, &annotate_opts); switch (key) { case -1: @@ -425,9 +424,9 @@ static int __cmd_annotate(struct perf_annotate *ann) goto out; } - if (!ann->opts.objdump_path) { + if (!annotate_opts.objdump_path) { ret = perf_env__lookup_objdump(&session->header.env, - &ann->opts.objdump_path); + &annotate_opts.objdump_path); if (ret) goto out; } @@ -561,9 +560,9 @@ int cmd_annotate(int argc, const char **argv) "file", "vmlinux pathname"), OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), - OPT_BOOLEAN('l', "print-line", &annotate.opts.print_lines, + OPT_BOOLEAN('l', "print-line", &annotate_opts.print_lines, "print matching source lines (may be slow)"), - OPT_BOOLEAN('P', "full-paths", &annotate.opts.full_path, + OPT_BOOLEAN('P', "full-paths", &annotate_opts.full_path, "Don't shorten the displayed pathnames"), OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing, "Skip symbols that cannot be annotated"), @@ -574,15 +573,15 @@ int cmd_annotate(int argc, const char **argv) OPT_CALLBACK(0, "symfs", NULL, "directory", "Look for files with symbols relative to this directory", symbol__config_symfs), - OPT_BOOLEAN(0, "source", &annotate.opts.annotate_src, + OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src, "Interleave source code with assembly code (default)"), - OPT_BOOLEAN(0, "asm-raw", &annotate.opts.show_asm_raw, + OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw, "Display raw encoding of assembly instructions (default)"), OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), - OPT_STRING(0, "prefix", &annotate.opts.prefix, "prefix", + OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix", "Add prefix to source file path names in programs (with --prefix-strip)"), - OPT_STRING(0, "prefix-strip", &annotate.opts.prefix_strip, "N", + OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N", "Strip first N entries of source file path name in programs (with --prefix)"), OPT_STRING(0, "objdump", &objdump_path, "path", "objdump binary to use for disassembly and annotations"), @@ -601,7 +600,7 @@ int cmd_annotate(int argc, const char **argv) OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode", "'always' (default), 'never' or 'auto' only applicable to --stdio mode", stdio__config_color, "always"), - OPT_CALLBACK(0, "percent-type", &annotate.opts, "local-period", + OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period", "Set percent type local/global-period/hits", annotate_parse_percent_type), OPT_CALLBACK(0, "percent-limit", &annotate, "percent", @@ -617,13 +616,13 @@ int cmd_annotate(int argc, const char **argv) set_option_flag(options, 0, "show-total-period", PARSE_OPT_EXCLUSIVE); set_option_flag(options, 0, "show-nr-samples", PARSE_OPT_EXCLUSIVE); - annotation_options__init(&annotate.opts); + annotation_options__init(&annotate_opts); ret = hists__init(); if (ret < 0) return ret; - annotation_config__init(&annotate.opts); + annotation_config__init(&annotate_opts); argc = parse_options(argc, argv, options, annotate_usage, 0); if (argc) { @@ -638,13 +637,13 @@ int cmd_annotate(int argc, const char **argv) } if (disassembler_style) { - annotate.opts.disassembler_style = strdup(disassembler_style); - if (!annotate.opts.disassembler_style) + annotate_opts.disassembler_style = strdup(disassembler_style); + if (!annotate_opts.disassembler_style) return -ENOMEM; } if (objdump_path) { - annotate.opts.objdump_path = strdup(objdump_path); - if (!annotate.opts.objdump_path) + annotate_opts.objdump_path = strdup(objdump_path); + if (!annotate_opts.objdump_path) return -ENOMEM; } if (addr2line_path) { @@ -653,7 +652,7 @@ int cmd_annotate(int argc, const char **argv) return -ENOMEM; } - if (annotate_check_args(&annotate.opts) < 0) + if (annotate_check_args(&annotate_opts) < 0) return -EINVAL; #ifdef HAVE_GTK2_SUPPORT @@ -734,7 +733,7 @@ int cmd_annotate(int argc, const char **argv) #ifndef NDEBUG perf_session__delete(annotate.session); #endif - annotation_options__exit(&annotate.opts); + annotation_options__exit(&annotate_opts); return ret; } diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 9a828dc601c7..77b78001b94d 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -57,6 +57,9 @@ #include +/* global annotation options */ +struct annotation_options annotate_opts; + static regex_t file_lineno; static struct ins_ops *ins__find(struct arch *arch, const char *name); diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index b64a2be287b3..8c1a070725fa 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -105,6 +105,8 @@ struct annotation_options { unsigned int percent_type; }; +extern struct annotation_options annotate_opts; + enum { ANNOTATION__OFFSET_JUMP_TARGETS = 1, ANNOTATION__OFFSET_CALL, From 14953f038d6b30e3dc9d1aa4d4584ac505e5a8ec Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 28 Nov 2023 09:54:35 -0800 Subject: [PATCH 094/179] perf report: Convert to the global annotation_options Use the global option and drop the local copy. Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231128175441.721579-3-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 121a2781323c..90f98953587c 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -98,7 +98,6 @@ struct report { bool skip_empty; int max_stack; struct perf_read_values show_threads_values; - struct annotation_options annotation_opts; const char *pretty_printing_style; const char *cpu_list; const char *symbol_filter_str; @@ -542,7 +541,7 @@ static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report * ret = report__browse_block_hists(&rep->block_reports[i++].hist, rep->min_percent, pos, &rep->session->header.env, - &rep->annotation_opts); + &annotate_opts); if (ret != 0) return ret; } @@ -670,7 +669,7 @@ static int report__browse_hists(struct report *rep) } ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent, - &session->header.env, true, &rep->annotation_opts); + &session->header.env, true, &annotate_opts); /* * Usually "ret" is the last pressed key, and we only * care if the key notifies us to switch data file. @@ -745,7 +744,7 @@ static int hists__resort_cb(struct hist_entry *he, void *arg) if (rep->symbol_ipc && sym && !sym->annotate2) { struct evsel *evsel = hists_to_evsel(he->hists); - symbol__annotate2(&he->ms, evsel, &rep->annotation_opts, NULL); + symbol__annotate2(&he->ms, evsel, &annotate_opts, NULL); } return 0; @@ -1341,15 +1340,15 @@ int cmd_report(int argc, const char **argv) "list of cpus to profile"), OPT_BOOLEAN('I', "show-info", &report.show_full_info, "Display extended information about perf.data file"), - OPT_BOOLEAN(0, "source", &report.annotation_opts.annotate_src, + OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src, "Interleave source code with assembly code (default)"), - OPT_BOOLEAN(0, "asm-raw", &report.annotation_opts.show_asm_raw, + OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw, "Display raw encoding of assembly instructions (default)"), OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), - OPT_STRING(0, "prefix", &report.annotation_opts.prefix, "prefix", + OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix", "Add prefix to source file path names in programs (with --prefix-strip)"), - OPT_STRING(0, "prefix-strip", &report.annotation_opts.prefix_strip, "N", + OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N", "Strip first N entries of source file path name in programs (with --prefix)"), OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), @@ -1401,7 +1400,7 @@ int cmd_report(int argc, const char **argv) "Time span of interest (start,stop)"), OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name, "Show inline function"), - OPT_CALLBACK(0, "percent-type", &report.annotation_opts, "local-period", + OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period", "Set percent type local/global-period/hits", annotate_parse_percent_type), OPT_BOOLEAN(0, "ns", &symbol_conf.nanosecs, "Show times in nanosecs"), @@ -1433,7 +1432,7 @@ int cmd_report(int argc, const char **argv) */ symbol_conf.keep_exited_threads = true; - annotation_options__init(&report.annotation_opts); + annotation_options__init(&annotate_opts); ret = perf_config(report__config, &report); if (ret) @@ -1452,13 +1451,13 @@ int cmd_report(int argc, const char **argv) } if (disassembler_style) { - report.annotation_opts.disassembler_style = strdup(disassembler_style); - if (!report.annotation_opts.disassembler_style) + annotate_opts.disassembler_style = strdup(disassembler_style); + if (!annotate_opts.disassembler_style) return -ENOMEM; } if (objdump_path) { - report.annotation_opts.objdump_path = strdup(objdump_path); - if (!report.annotation_opts.objdump_path) + annotate_opts.objdump_path = strdup(objdump_path); + if (!annotate_opts.objdump_path) return -ENOMEM; } if (addr2line_path) { @@ -1467,7 +1466,7 @@ int cmd_report(int argc, const char **argv) return -ENOMEM; } - if (annotate_check_args(&report.annotation_opts) < 0) { + if (annotate_check_args(&annotate_opts) < 0) { ret = -EINVAL; goto exit; } @@ -1699,7 +1698,7 @@ int cmd_report(int argc, const char **argv) */ symbol_conf.priv_size += sizeof(u32); } - annotation_config__init(&report.annotation_opts); + annotation_config__init(&annotate_opts); } if (symbol__init(&session->header.env) < 0) @@ -1753,7 +1752,7 @@ int cmd_report(int argc, const char **argv) zstd_fini(&(session->zstd_data)); perf_session__delete(session); exit: - annotation_options__exit(&report.annotation_opts); + annotation_options__exit(&annotate_opts); free(sort_order_help); free(field_order_help); return ret; From c9a21a872c69032cb9a94ebc171649c0c28141d7 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 28 Nov 2023 09:54:36 -0800 Subject: [PATCH 095/179] perf top: Convert to the global annotation_options Use the global option and drop the local copy. Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231128175441.721579-4-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-top.c | 44 ++++++++++++++++++++-------------------- tools/perf/util/top.h | 1 - 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 1e42bd1c7d5a..a6495aa5898e 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -147,7 +147,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) return err; } - err = symbol__annotate(&he->ms, evsel, &top->annotation_opts, NULL); + err = symbol__annotate(&he->ms, evsel, &annotate_opts, NULL); if (err == 0) { top->sym_filter_entry = he; } else { @@ -261,9 +261,9 @@ static void perf_top__show_details(struct perf_top *top) goto out_unlock; printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name); - printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt); + printf(" Events Pcnt (>=%d%%)\n", annotate_opts.min_pcnt); - more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts); + more = symbol__annotate_printf(&he->ms, top->sym_evsel, &annotate_opts); if (top->evlist->enabled) { if (top->zero) @@ -450,7 +450,7 @@ static void perf_top__print_mapped_keys(struct perf_top *top) fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter); - fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt); + fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", annotate_opts.min_pcnt); fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL"); fprintf(stdout, "\t[S] stop annotation.\n"); @@ -553,7 +553,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c) prompt_integer(&top->count_filter, "Enter display event count filter"); break; case 'F': - prompt_percent(&top->annotation_opts.min_pcnt, + prompt_percent(&annotate_opts.min_pcnt, "Enter details display event filter (percent)"); break; case 'K': @@ -647,7 +647,7 @@ static void *display_thread_tui(void *arg) ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent, &top->session->header.env, !top->record_opts.overwrite, - &top->annotation_opts); + &annotate_opts); if (ret == K_RELOAD) { top->zero = true; goto repeat; @@ -1241,9 +1241,9 @@ static int __cmd_top(struct perf_top *top) pthread_t thread, thread_process; int ret; - if (!top->annotation_opts.objdump_path) { + if (!annotate_opts.objdump_path) { ret = perf_env__lookup_objdump(&top->session->header.env, - &top->annotation_opts.objdump_path); + &annotate_opts.objdump_path); if (ret) return ret; } @@ -1536,9 +1536,9 @@ int cmd_top(int argc, const char **argv) "only consider symbols in these comms"), OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", "only consider these symbols"), - OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src, + OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src, "Interleave source code with assembly code (default)"), - OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw, + OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw, "Display raw encoding of assembly instructions (default)"), OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, "Enable kernel symbol demangling"), @@ -1549,9 +1549,9 @@ int cmd_top(int argc, const char **argv) "addr2line binary to use for line numbers"), OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), - OPT_STRING(0, "prefix", &top.annotation_opts.prefix, "prefix", + OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix", "Add prefix to source file path names in programs (with --prefix-strip)"), - OPT_STRING(0, "prefix-strip", &top.annotation_opts.prefix_strip, "N", + OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N", "Strip first N entries of source file path name in programs (with --prefix)"), OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"), OPT_CALLBACK(0, "percent-limit", &top, "percent", @@ -1609,10 +1609,10 @@ int cmd_top(int argc, const char **argv) if (status < 0) return status; - annotation_options__init(&top.annotation_opts); + annotation_options__init(&annotate_opts); - top.annotation_opts.min_pcnt = 5; - top.annotation_opts.context = 4; + annotate_opts.min_pcnt = 5; + annotate_opts.context = 4; top.evlist = evlist__new(); if (top.evlist == NULL) @@ -1642,13 +1642,13 @@ int cmd_top(int argc, const char **argv) usage_with_options(top_usage, options); if (disassembler_style) { - top.annotation_opts.disassembler_style = strdup(disassembler_style); - if (!top.annotation_opts.disassembler_style) + annotate_opts.disassembler_style = strdup(disassembler_style); + if (!annotate_opts.disassembler_style) return -ENOMEM; } if (objdump_path) { - top.annotation_opts.objdump_path = strdup(objdump_path); - if (!top.annotation_opts.objdump_path) + annotate_opts.objdump_path = strdup(objdump_path); + if (!annotate_opts.objdump_path) return -ENOMEM; } if (addr2line_path) { @@ -1661,7 +1661,7 @@ int cmd_top(int argc, const char **argv) if (status) goto out_delete_evlist; - if (annotate_check_args(&top.annotation_opts) < 0) + if (annotate_check_args(&annotate_opts) < 0) goto out_delete_evlist; if (!top.evlist->core.nr_entries) { @@ -1787,7 +1787,7 @@ int cmd_top(int argc, const char **argv) if (status < 0) goto out_delete_evlist; - annotation_config__init(&top.annotation_opts); + annotation_config__init(&annotate_opts); symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); status = symbol__init(NULL); @@ -1840,7 +1840,7 @@ int cmd_top(int argc, const char **argv) out_delete_evlist: evlist__delete(top.evlist); perf_session__delete(top.session); - annotation_options__exit(&top.annotation_opts); + annotation_options__exit(&annotate_opts); return status; } diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index a8b0d79bd96c..4c5588dbb131 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h @@ -21,7 +21,6 @@ struct perf_top { struct perf_tool tool; struct evlist *evlist, *sb_evlist; struct record_opts record_opts; - struct annotation_options annotation_opts; struct evswitch evswitch; /* * Symbols will be added here in perf_event__process_sample and will From 41fd3cacd29f47f6b9c6474b27c5b0513786c4e9 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 28 Nov 2023 09:54:37 -0800 Subject: [PATCH 096/179] perf annotate: Use global annotation_options Now it can directly use the global options and no need to pass it as an argument. Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231128175441.721579-5-namhyung@kernel.org [ Fixup build with GTK2=1 ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-annotate.c | 7 +- tools/perf/builtin-report.c | 2 +- tools/perf/builtin-top.c | 4 +- tools/perf/ui/browsers/annotate.c | 6 +- tools/perf/ui/gtk/annotate.c | 6 +- tools/perf/ui/gtk/gtk.h | 2 - tools/perf/util/annotate.c | 118 ++++++++++++++---------------- tools/perf/util/annotate.h | 15 ++-- 8 files changed, 71 insertions(+), 89 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 67b36a7a12e3..9c1e2b2b5bc0 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -317,9 +317,9 @@ static int hist_entry__tty_annotate(struct hist_entry *he, struct perf_annotate *ann) { if (!ann->use_stdio2) - return symbol__tty_annotate(&he->ms, evsel, &annotate_opts); + return symbol__tty_annotate(&he->ms, evsel); - return symbol__tty_annotate2(&he->ms, evsel, &annotate_opts); + return symbol__tty_annotate2(&he->ms, evsel); } static void hists__find_annotations(struct hists *hists, @@ -365,7 +365,6 @@ static void hists__find_annotations(struct hists *hists, int ret; int (*annotate)(struct hist_entry *he, struct evsel *evsel, - struct annotation_options *options, struct hist_browser_timer *hbt); annotate = dlsym(perf_gtk_handle, @@ -375,7 +374,7 @@ static void hists__find_annotations(struct hists *hists, return; } - ret = annotate(he, evsel, &annotate_opts, NULL); + ret = annotate(he, evsel, NULL); if (!ret || !ann->skip_missing) return; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 90f98953587c..2b86651615cd 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -744,7 +744,7 @@ static int hists__resort_cb(struct hist_entry *he, void *arg) if (rep->symbol_ipc && sym && !sym->annotate2) { struct evsel *evsel = hists_to_evsel(he->hists); - symbol__annotate2(&he->ms, evsel, &annotate_opts, NULL); + symbol__annotate2(&he->ms, evsel, NULL); } return 0; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index a6495aa5898e..46a61634701b 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -147,7 +147,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he) return err; } - err = symbol__annotate(&he->ms, evsel, &annotate_opts, NULL); + err = symbol__annotate(&he->ms, evsel, NULL); if (err == 0) { top->sym_filter_entry = he; } else { @@ -263,7 +263,7 @@ static void perf_top__show_details(struct perf_top *top) printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name); printf(" Events Pcnt (>=%d%%)\n", annotate_opts.min_pcnt); - more = symbol__annotate_printf(&he->ms, top->sym_evsel, &annotate_opts); + more = symbol__annotate_printf(&he->ms, top->sym_evsel); if (top->evlist->enabled) { if (top->zero) diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 163f916fff68..ed0e692afdbe 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -114,7 +114,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int if (!browser->navkeypressed) ops.width += 1; - annotation_line__write(al, notes, &ops, ab->opts); + annotation_line__write(al, notes, &ops); if (ops.current_entry) ab->selection = al; @@ -884,7 +884,7 @@ static int annotate_browser__run(struct annotate_browser *browser, continue; } case 'P': - map_symbol__annotation_dump(ms, evsel, browser->opts); + map_symbol__annotation_dump(ms, evsel); continue; case 't': if (symbol_conf.show_total_period) { @@ -979,7 +979,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, return -1; if (not_annotated) { - err = symbol__annotate2(ms, evsel, opts, &browser.arch); + err = symbol__annotate2(ms, evsel, &browser.arch); if (err) { char msg[BUFSIZ]; dso->annotate_warned = true; diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c index 2effac77ca8c..394861245fd3 100644 --- a/tools/perf/ui/gtk/annotate.c +++ b/tools/perf/ui/gtk/annotate.c @@ -162,7 +162,6 @@ static int perf_gtk__annotate_symbol(GtkWidget *window, struct map_symbol *ms, } static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel, - struct annotation_options *options, struct hist_browser_timer *hbt) { struct dso *dso = map__dso(ms->map); @@ -176,7 +175,7 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel, if (dso->annotate_warned) return -1; - err = symbol__annotate(ms, evsel, options, NULL); + err = symbol__annotate(ms, evsel, NULL); if (err) { char msg[BUFSIZ]; dso->annotate_warned = true; @@ -244,10 +243,9 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel, int hist_entry__gtk_annotate(struct hist_entry *he, struct evsel *evsel, - struct annotation_options *options, struct hist_browser_timer *hbt) { - return symbol__gtk_annotate(&he->ms, evsel, options, hbt); + return symbol__gtk_annotate(&he->ms, evsel, hbt); } void perf_gtk__show_annotations(void) diff --git a/tools/perf/ui/gtk/gtk.h b/tools/perf/ui/gtk/gtk.h index 1e84dceb5267..a2b497f03fd6 100644 --- a/tools/perf/ui/gtk/gtk.h +++ b/tools/perf/ui/gtk/gtk.h @@ -56,13 +56,11 @@ struct evsel; struct evlist; struct hist_entry; struct hist_browser_timer; -struct annotation_options; int evlist__gtk_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt, float min_pcnt); int hist_entry__gtk_annotate(struct hist_entry *he, struct evsel *evsel, - struct annotation_options *options, struct hist_browser_timer *hbt); void perf_gtk__show_annotations(void); diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 77b78001b94d..daff9af552f4 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -1896,7 +1896,6 @@ static int symbol__disassemble_bpf(struct symbol *sym, struct annotate_args *args) { struct annotation *notes = symbol__annotation(sym); - struct annotation_options *opts = args->options; struct bpf_prog_linfo *prog_linfo = NULL; struct bpf_prog_info_node *info_node; int len = sym->end - sym->start; @@ -2006,7 +2005,7 @@ static int symbol__disassemble_bpf(struct symbol *sym, prev_buf_size = buf_size; fflush(s); - if (!opts->hide_src_code && srcline) { + if (!annotate_opts.hide_src_code && srcline) { args->offset = -1; args->line = strdup(srcline); args->line_nr = 0; @@ -2129,7 +2128,7 @@ static char *expand_tabs(char *line, char **storage, size_t *storage_len) static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) { - struct annotation_options *opts = args->options; + struct annotation_options *opts = &annotate_opts; struct map *map = args->ms.map; struct dso *dso = map__dso(map); char *command; @@ -2380,13 +2379,13 @@ void symbol__calc_percent(struct symbol *sym, struct evsel *evsel) } int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, - struct annotation_options *options, struct arch **parch) + struct arch **parch) { struct symbol *sym = ms->sym; struct annotation *notes = symbol__annotation(sym); struct annotate_args args = { .evsel = evsel, - .options = options, + .options = &annotate_opts, }; struct perf_env *env = evsel__env(evsel); const char *arch_name = perf_env__arch(env); @@ -2414,7 +2413,7 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, } args.ms = *ms; - if (notes->options && notes->options->full_addr) + if (annotate_opts.full_addr) notes->start = map__objdump_2mem(ms->map, ms->sym->start); else notes->start = map__rip_2objdump(ms->map, ms->sym->start); @@ -2422,12 +2421,12 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, return symbol__disassemble(sym, &args); } -static void insert_source_line(struct rb_root *root, struct annotation_line *al, - struct annotation_options *opts) +static void insert_source_line(struct rb_root *root, struct annotation_line *al) { struct annotation_line *iter; struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; + unsigned int percent_type = annotate_opts.percent_type; int i, ret; while (*p != NULL) { @@ -2438,7 +2437,7 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al, if (ret == 0) { for (i = 0; i < al->data_nr; i++) { iter->data[i].percent_sum += annotation_data__percent(&al->data[i], - opts->percent_type); + percent_type); } return; } @@ -2451,7 +2450,7 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al, for (i = 0; i < al->data_nr; i++) { al->data[i].percent_sum = annotation_data__percent(&al->data[i], - opts->percent_type); + percent_type); } rb_link_node(&al->rb_node, parent, p); @@ -2573,8 +2572,7 @@ static int annotated_source__addr_fmt_width(struct list_head *lines, u64 start) return 0; } -int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel, - struct annotation_options *opts) +int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel) { struct map *map = ms->map; struct symbol *sym = ms->sym; @@ -2585,6 +2583,7 @@ int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel, struct annotation *notes = symbol__annotation(sym); struct sym_hist *h = annotation__histogram(notes, evsel->core.idx); struct annotation_line *pos, *queue = NULL; + struct annotation_options *opts = &annotate_opts; u64 start = map__rip_2objdump(map, sym->start); int printed = 2, queue_len = 0, addr_fmt_width; int more = 0; @@ -2713,8 +2712,7 @@ static void FILE__write_graph(void *fp, int graph) fputs(s, fp); } -static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp, - struct annotation_options *opts) +static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp) { struct annotation *notes = symbol__annotation(sym); struct annotation_write_ops wops = { @@ -2731,7 +2729,7 @@ static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp, list_for_each_entry(al, ¬es->src->source, node) { if (annotation_line__filter(al, notes)) continue; - annotation_line__write(al, notes, &wops, opts); + annotation_line__write(al, notes, &wops); fputc('\n', fp); wops.first_line = false; } @@ -2739,8 +2737,7 @@ static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp, return 0; } -int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel, - struct annotation_options *opts) +int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel) { const char *ev_name = evsel__name(evsel); char buf[1024]; @@ -2762,7 +2759,7 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel, fprintf(fp, "%s() %s\nEvent: %s\n\n", ms->sym->name, map__dso(ms->map)->long_name, ev_name); - symbol__annotate_fprintf2(ms->sym, fp, opts); + symbol__annotate_fprintf2(ms->sym, fp); fclose(fp); err = 0; @@ -2939,24 +2936,24 @@ void annotation__init_column_widths(struct annotation *notes, struct symbol *sym void annotation__update_column_widths(struct annotation *notes) { - if (notes->options->use_offset) + if (annotate_opts.use_offset) notes->widths.target = notes->widths.min_addr; - else if (notes->options->full_addr) + else if (annotate_opts.full_addr) notes->widths.target = BITS_PER_LONG / 4; else notes->widths.target = notes->widths.max_addr; notes->widths.addr = notes->widths.target; - if (notes->options->show_nr_jumps) + if (annotate_opts.show_nr_jumps) notes->widths.addr += notes->widths.jumps + 1; } void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *ms) { - notes->options->full_addr = !notes->options->full_addr; + annotate_opts.full_addr = !annotate_opts.full_addr; - if (notes->options->full_addr) + if (annotate_opts.full_addr) notes->start = map__objdump_2mem(ms->map, ms->sym->start); else notes->start = map__rip_2objdump(ms->map, ms->sym->start); @@ -2965,8 +2962,7 @@ void annotation__toggle_full_addr(struct annotation *notes, struct map_symbol *m } static void annotation__calc_lines(struct annotation *notes, struct map *map, - struct rb_root *root, - struct annotation_options *opts) + struct rb_root *root) { struct annotation_line *al; struct rb_root tmp_root = RB_ROOT; @@ -2979,7 +2975,7 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map, double percent; percent = annotation_data__percent(&al->data[i], - opts->percent_type); + annotate_opts.percent_type); if (percent > percent_max) percent_max = percent; @@ -2990,22 +2986,20 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map, al->path = get_srcline(map__dso(map), notes->start + al->offset, NULL, false, true, notes->start + al->offset); - insert_source_line(&tmp_root, al, opts); + insert_source_line(&tmp_root, al); } resort_source_line(root, &tmp_root); } -static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root, - struct annotation_options *opts) +static void symbol__calc_lines(struct map_symbol *ms, struct rb_root *root) { struct annotation *notes = symbol__annotation(ms->sym); - annotation__calc_lines(notes, ms->map, root, opts); + annotation__calc_lines(notes, ms->map, root); } -int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel, - struct annotation_options *opts) +int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel) { struct dso *dso = map__dso(ms->map); struct symbol *sym = ms->sym; @@ -3014,7 +3008,7 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel, char buf[1024]; int err; - err = symbol__annotate2(ms, evsel, opts, NULL); + err = symbol__annotate2(ms, evsel, NULL); if (err) { char msg[BUFSIZ]; @@ -3024,31 +3018,31 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel, return -1; } - if (opts->print_lines) { - srcline_full_filename = opts->full_path; - symbol__calc_lines(ms, &source_line, opts); + if (annotate_opts.print_lines) { + srcline_full_filename = annotate_opts.full_path; + symbol__calc_lines(ms, &source_line); print_summary(&source_line, dso->long_name); } hists__scnprintf_title(hists, buf, sizeof(buf)); fprintf(stdout, "%s, [percent: %s]\n%s() %s\n", - buf, percent_type_str(opts->percent_type), sym->name, dso->long_name); - symbol__annotate_fprintf2(sym, stdout, opts); + buf, percent_type_str(annotate_opts.percent_type), sym->name, + dso->long_name); + symbol__annotate_fprintf2(sym, stdout); annotated_source__purge(symbol__annotation(sym)->src); return 0; } -int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel, - struct annotation_options *opts) +int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel) { struct dso *dso = map__dso(ms->map); struct symbol *sym = ms->sym; struct rb_root source_line = RB_ROOT; int err; - err = symbol__annotate(ms, evsel, opts, NULL); + err = symbol__annotate(ms, evsel, NULL); if (err) { char msg[BUFSIZ]; @@ -3060,13 +3054,13 @@ int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel, symbol__calc_percent(sym, evsel); - if (opts->print_lines) { - srcline_full_filename = opts->full_path; - symbol__calc_lines(ms, &source_line, opts); + if (annotate_opts.print_lines) { + srcline_full_filename = annotate_opts.full_path; + symbol__calc_lines(ms, &source_line); print_summary(&source_line, dso->long_name); } - symbol__annotate_printf(ms, evsel, opts); + symbol__annotate_printf(ms, evsel); annotated_source__purge(symbol__annotation(sym)->src); @@ -3127,7 +3121,7 @@ static void disasm_line__write(struct disasm_line *dl, struct annotation *notes, obj__printf(obj, " "); } - disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset, notes->widths.max_ins_name); + disasm_line__scnprintf(dl, bf, size, !annotate_opts.use_offset, notes->widths.max_ins_name); } static void ipc_coverage_string(char *bf, int size, struct annotation *notes) @@ -3210,7 +3204,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati else obj__printf(obj, "%*s ", ANNOTATION__IPC_WIDTH - 1, "IPC"); - if (!notes->options->show_minmax_cycle) { + if (!annotate_opts.show_minmax_cycle) { if (al->cycles && al->cycles->avg) obj__printf(obj, "%*" PRIu64 " ", ANNOTATION__CYCLES_WIDTH - 1, al->cycles->avg); @@ -3254,7 +3248,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati if (!*al->line) obj__printf(obj, "%-*s", width - pcnt_width - cycles_width, " "); else if (al->offset == -1) { - if (al->line_nr && notes->options->show_linenr) + if (al->line_nr && annotate_opts.show_linenr) printed = scnprintf(bf, sizeof(bf), "%-*d ", notes->widths.addr + 1, al->line_nr); else printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " "); @@ -3264,15 +3258,15 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati u64 addr = al->offset; int color = -1; - if (!notes->options->use_offset) + if (!annotate_opts.use_offset) addr += notes->start; - if (!notes->options->use_offset) { + if (!annotate_opts.use_offset) { printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); } else { if (al->jump_sources && - notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) { - if (notes->options->show_nr_jumps) { + annotate_opts.offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) { + if (annotate_opts.show_nr_jumps) { int prev; printed = scnprintf(bf, sizeof(bf), "%*d ", notes->widths.jumps, @@ -3286,9 +3280,9 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", notes->widths.target, addr); } else if (ins__is_call(&disasm_line(al)->ins) && - notes->options->offset_level >= ANNOTATION__OFFSET_CALL) { + annotate_opts.offset_level >= ANNOTATION__OFFSET_CALL) { goto print_addr; - } else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) { + } else if (annotate_opts.offset_level == ANNOTATION__MAX_OFFSET_LEVEL) { goto print_addr; } else { printed = scnprintf(bf, sizeof(bf), "%-*s ", @@ -3310,19 +3304,18 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati } void annotation_line__write(struct annotation_line *al, struct annotation *notes, - struct annotation_write_ops *wops, - struct annotation_options *opts) + struct annotation_write_ops *wops) { __annotation_line__write(al, notes, wops->first_line, wops->current_entry, wops->change_color, wops->width, wops->obj, - opts->percent_type, + annotate_opts.percent_type, wops->set_color, wops->set_percent_color, wops->set_jumps_percent_color, wops->printf, wops->write_graph); } int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, - struct annotation_options *options, struct arch **parch) + struct arch **parch) { struct symbol *sym = ms->sym; struct annotation *notes = symbol__annotation(sym); @@ -3336,11 +3329,11 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, if (evsel__is_group_event(evsel)) nr_pcnt = evsel->core.nr_members; - err = symbol__annotate(ms, evsel, options, parch); + err = symbol__annotate(ms, evsel, parch); if (err) goto out_free_offsets; - notes->options = options; + notes->options = &annotate_opts; symbol__calc_percent(sym, evsel); @@ -3468,10 +3461,9 @@ static unsigned int parse_percent_type(char *str1, char *str2) return type; } -int annotate_parse_percent_type(const struct option *opt, const char *_str, +int annotate_parse_percent_type(const struct option *opt __maybe_unused, const char *_str, int unset __maybe_unused) { - struct annotation_options *opts = opt->value; unsigned int type; char *str1, *str2; int err = -1; @@ -3490,7 +3482,7 @@ int annotate_parse_percent_type(const struct option *opt, const char *_str, if (type == (unsigned int) -1) type = parse_percent_type(str2, str1); if (type != (unsigned int) -1) { - opts->percent_type = type; + annotate_opts.percent_type = type; err = 0; } diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 8c1a070725fa..7bf29baa43f5 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -224,8 +224,7 @@ struct annotation_write_ops { }; void annotation_line__write(struct annotation_line *al, struct annotation *notes, - struct annotation_write_ops *ops, - struct annotation_options *opts); + struct annotation_write_ops *ops); int __annotation__scnprintf_samples_period(struct annotation *notes, char *bf, size_t size, @@ -375,11 +374,9 @@ void symbol__annotate_zero_histograms(struct symbol *sym); int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, - struct annotation_options *options, struct arch **parch); int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, - struct annotation_options *options, struct arch **parch); enum symbol_disassemble_errno { @@ -406,20 +403,18 @@ enum symbol_disassemble_errno { int symbol__strerror_disassemble(struct map_symbol *ms, int errnum, char *buf, size_t buflen); -int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel, - struct annotation_options *options); +int symbol__annotate_printf(struct map_symbol *ms, struct evsel *evsel); void symbol__annotate_zero_histogram(struct symbol *sym, int evidx); void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); void annotated_source__purge(struct annotated_source *as); -int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel, - struct annotation_options *opts); +int map_symbol__annotation_dump(struct map_symbol *ms, struct evsel *evsel); bool ui__has_annotation(void); -int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel, struct annotation_options *opts); +int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel); -int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel, struct annotation_options *opts); +int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel); #ifdef HAVE_SLANG_SUPPORT int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, From 22197fb296913f83c7182befd2a8b23bf042f279 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 28 Nov 2023 09:54:38 -0800 Subject: [PATCH 097/179] perf ui/browser/annotate: Use global annotation_options Now it can use the global options and no need save local browser options separately. Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231128175441.721579-6-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-annotate.c | 2 +- tools/perf/builtin-report.c | 8 ++-- tools/perf/builtin-top.c | 3 +- tools/perf/ui/browsers/annotate.c | 65 ++++++++++++++----------------- tools/perf/ui/browsers/hists.c | 34 ++++++---------- tools/perf/ui/browsers/hists.h | 2 - tools/perf/util/annotate.h | 6 +-- tools/perf/util/block-info.c | 6 +-- tools/perf/util/block-info.h | 3 +- tools/perf/util/hist.h | 25 ++++-------- 10 files changed, 59 insertions(+), 95 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 9c1e2b2b5bc0..d17213bd8332 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -381,7 +381,7 @@ static void hists__find_annotations(struct hists *hists, /* skip missing symbols */ nd = rb_next(nd); } else if (use_browser == 1) { - key = hist_entry__tui_annotate(he, evsel, NULL, &annotate_opts); + key = hist_entry__tui_annotate(he, evsel, NULL); switch (key) { case -1: diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 2b86651615cd..bc0d986c1e0c 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -540,8 +540,7 @@ static int evlist__tui_block_hists_browse(struct evlist *evlist, struct report * evlist__for_each_entry(evlist, pos) { ret = report__browse_block_hists(&rep->block_reports[i++].hist, rep->min_percent, pos, - &rep->session->header.env, - &annotate_opts); + &rep->session->header.env); if (ret != 0) return ret; } @@ -573,8 +572,7 @@ static int evlist__tty_browse_hists(struct evlist *evlist, struct report *rep, c if (rep->total_cycles_mode) { report__browse_block_hists(&rep->block_reports[i++].hist, - rep->min_percent, pos, - NULL, NULL); + rep->min_percent, pos, NULL); continue; } @@ -669,7 +667,7 @@ static int report__browse_hists(struct report *rep) } ret = evlist__tui_browse_hists(evlist, help, NULL, rep->min_percent, - &session->header.env, true, &annotate_opts); + &session->header.env, true); /* * Usually "ret" is the last pressed key, and we only * care if the key notifies us to switch data file. diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 46a61634701b..b5222d241983 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -646,8 +646,7 @@ static void *display_thread_tui(void *arg) } ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent, - &top->session->header.env, !top->record_opts.overwrite, - &annotate_opts); + &top->session->header.env, !top->record_opts.overwrite); if (ret == K_RELOAD) { top->zero = true; goto repeat; diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index ed0e692afdbe..fda17c1f2031 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -27,7 +27,6 @@ struct annotate_browser { struct rb_node *curr_hot; struct annotation_line *selection; struct arch *arch; - struct annotation_options *opts; bool searching_backwards; char search_bf[128]; }; @@ -97,7 +96,7 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int struct annotation_write_ops ops = { .first_line = row == 0, .current_entry = is_current_entry, - .change_color = (!notes->options->hide_src_code && + .change_color = (!annotate_opts.hide_src_code && (!is_current_entry || (browser->use_navkeypressed && !browser->navkeypressed))), @@ -128,7 +127,7 @@ static int is_fused(struct annotate_browser *ab, struct disasm_line *cursor) while (pos && pos->al.offset == -1) { pos = list_prev_entry(pos, al.node); - if (!ab->opts->hide_src_code) + if (!annotate_opts.hide_src_code) diff++; } @@ -195,7 +194,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser) return; } - if (notes->options->hide_src_code) { + if (annotate_opts.hide_src_code) { from = cursor->al.idx_asm; to = target->idx_asm; } else { @@ -224,7 +223,7 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser) int ret = ui_browser__list_head_refresh(browser); int pcnt_width = annotation__pcnt_width(notes); - if (notes->options->jump_arrows) + if (annotate_opts.jump_arrows) annotate_browser__draw_current_jump(browser); ui_browser__set_color(browser, HE_COLORSET_NORMAL); @@ -258,7 +257,7 @@ static void disasm_rb_tree__insert(struct annotate_browser *browser, parent = *p; l = rb_entry(parent, struct annotation_line, rb_node); - if (disasm__cmp(al, l, browser->opts->percent_type) < 0) + if (disasm__cmp(al, l, annotate_opts.percent_type) < 0) p = &(*p)->rb_left; else p = &(*p)->rb_right; @@ -294,11 +293,10 @@ static void annotate_browser__set_top(struct annotate_browser *browser, static void annotate_browser__set_rb_top(struct annotate_browser *browser, struct rb_node *nd) { - struct annotation *notes = browser__annotation(&browser->b); struct annotation_line * pos = rb_entry(nd, struct annotation_line, rb_node); u32 idx = pos->idx; - if (notes->options->hide_src_code) + if (annotate_opts.hide_src_code) idx = pos->idx_asm; annotate_browser__set_top(browser, pos, idx); browser->curr_hot = nd; @@ -331,7 +329,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser, double percent; percent = annotation_data__percent(&pos->al.data[i], - browser->opts->percent_type); + annotate_opts.percent_type); if (max_percent < percent) max_percent = percent; @@ -380,12 +378,12 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser) browser->b.seek(&browser->b, offset, SEEK_CUR); al = list_entry(browser->b.top, struct annotation_line, node); - if (notes->options->hide_src_code) { + if (annotate_opts.hide_src_code) { if (al->idx_asm < offset) offset = al->idx; browser->b.nr_entries = notes->src->nr_entries; - notes->options->hide_src_code = false; + annotate_opts.hide_src_code = false; browser->b.seek(&browser->b, -offset, SEEK_CUR); browser->b.top_idx = al->idx - offset; browser->b.index = al->idx; @@ -403,7 +401,7 @@ static bool annotate_browser__toggle_source(struct annotate_browser *browser) offset = al->idx_asm; browser->b.nr_entries = notes->src->nr_asm_entries; - notes->options->hide_src_code = true; + annotate_opts.hide_src_code = true; browser->b.seek(&browser->b, -offset, SEEK_CUR); browser->b.top_idx = al->idx_asm - offset; browser->b.index = al->idx_asm; @@ -483,8 +481,8 @@ static bool annotate_browser__callq(struct annotate_browser *browser, target_ms.map = ms->map; target_ms.sym = dl->ops.target.sym; annotation__unlock(notes); - symbol__tui_annotate(&target_ms, evsel, hbt, browser->opts); - sym_title(ms->sym, ms->map, title, sizeof(title), browser->opts->percent_type); + symbol__tui_annotate(&target_ms, evsel, hbt); + sym_title(ms->sym, ms->map, title, sizeof(title), annotate_opts.percent_type); ui_browser__show_title(&browser->b, title); return true; } @@ -659,7 +657,6 @@ bool annotate_browser__continue_search_reverse(struct annotate_browser *browser, static int annotate_browser__show(struct ui_browser *browser, char *title, const char *help) { - struct annotate_browser *ab = container_of(browser, struct annotate_browser, b); struct map_symbol *ms = browser->priv; struct symbol *sym = ms->sym; char symbol_dso[SYM_TITLE_MAX_SIZE]; @@ -667,7 +664,7 @@ static int annotate_browser__show(struct ui_browser *browser, char *title, const if (ui_browser__show(browser, title, help) < 0) return -1; - sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), ab->opts->percent_type); + sym_title(sym, ms->map, symbol_dso, sizeof(symbol_dso), annotate_opts.percent_type); ui_browser__gotorc_title(browser, 0, 0); ui_browser__set_color(browser, HE_COLORSET_ROOT); @@ -809,7 +806,7 @@ static int annotate_browser__run(struct annotate_browser *browser, annotate_browser__show(&browser->b, title, help); continue; case 'k': - notes->options->show_linenr = !notes->options->show_linenr; + annotate_opts.show_linenr = !annotate_opts.show_linenr; continue; case 'l': annotate_browser__show_full_location (&browser->b); @@ -822,18 +819,18 @@ static int annotate_browser__run(struct annotate_browser *browser, ui_helpline__puts(help); continue; case 'o': - notes->options->use_offset = !notes->options->use_offset; + annotate_opts.use_offset = !annotate_opts.use_offset; annotation__update_column_widths(notes); continue; case 'O': - if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL) - notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL; + if (++annotate_opts.offset_level > ANNOTATION__MAX_OFFSET_LEVEL) + annotate_opts.offset_level = ANNOTATION__MIN_OFFSET_LEVEL; continue; case 'j': - notes->options->jump_arrows = !notes->options->jump_arrows; + annotate_opts.jump_arrows = !annotate_opts.jump_arrows; continue; case 'J': - notes->options->show_nr_jumps = !notes->options->show_nr_jumps; + annotate_opts.show_nr_jumps = !annotate_opts.show_nr_jumps; annotation__update_column_widths(notes); continue; case '/': @@ -897,15 +894,15 @@ static int annotate_browser__run(struct annotate_browser *browser, annotation__update_column_widths(notes); continue; case 'c': - if (notes->options->show_minmax_cycle) - notes->options->show_minmax_cycle = false; + if (annotate_opts.show_minmax_cycle) + annotate_opts.show_minmax_cycle = false; else - notes->options->show_minmax_cycle = true; + annotate_opts.show_minmax_cycle = true; annotation__update_column_widths(notes); continue; case 'p': case 'b': - switch_percent_type(browser->opts, key == 'b'); + switch_percent_type(&annotate_opts, key == 'b'); hists__scnprintf_title(hists, title, sizeof(title)); annotate_browser__show(&browser->b, title, help); continue; @@ -932,26 +929,23 @@ static int annotate_browser__run(struct annotate_browser *browser, } int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *opts) + struct hist_browser_timer *hbt) { - return symbol__tui_annotate(ms, evsel, hbt, opts); + return symbol__tui_annotate(ms, evsel, hbt); } int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *opts) + struct hist_browser_timer *hbt) { /* reset abort key so that it can get Ctrl-C as a key */ SLang_reset_tty(); SLang_init_tty(0, 0, 0); - return map_symbol__tui_annotate(&he->ms, evsel, hbt, opts); + return map_symbol__tui_annotate(&he->ms, evsel, hbt); } int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *opts) + struct hist_browser_timer *hbt) { struct symbol *sym = ms->sym; struct annotation *notes = symbol__annotation(sym); @@ -965,7 +959,6 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, .priv = ms, .use_navkeypressed = true, }, - .opts = opts, }; struct dso *dso; int ret = -1, err; @@ -996,7 +989,7 @@ int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, browser.b.entries = ¬es->src->source, browser.b.width += 18; /* Percentage */ - if (notes->options->hide_src_code) + if (annotate_opts.hide_src_code) ui_browser__init_asm_mode(&browser.b); ret = annotate_browser__run(&browser, evsel, hbt); diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index f4812b226818..3061dea29e6b 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -2250,8 +2250,7 @@ struct hist_browser *hist_browser__new(struct hists *hists) static struct hist_browser * perf_evsel_browser__new(struct evsel *evsel, struct hist_browser_timer *hbt, - struct perf_env *env, - struct annotation_options *annotation_opts) + struct perf_env *env) { struct hist_browser *browser = hist_browser__new(evsel__hists(evsel)); @@ -2259,7 +2258,6 @@ perf_evsel_browser__new(struct evsel *evsel, browser->hbt = hbt; browser->env = env; browser->title = hists_browser__scnprintf_title; - browser->annotation_opts = annotation_opts; } return browser; } @@ -2432,8 +2430,8 @@ do_annotate(struct hist_browser *browser, struct popup_action *act) struct hist_entry *he; int err; - if (!browser->annotation_opts->objdump_path && - perf_env__lookup_objdump(browser->env, &browser->annotation_opts->objdump_path)) + if (!annotate_opts.objdump_path && + perf_env__lookup_objdump(browser->env, &annotate_opts.objdump_path)) return 0; notes = symbol__annotation(act->ms.sym); @@ -2445,8 +2443,7 @@ do_annotate(struct hist_browser *browser, struct popup_action *act) else evsel = hists_to_evsel(browser->hists); - err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt, - browser->annotation_opts); + err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt); he = hist_browser__selected_entry(browser); /* * offer option to annotate the other branch source or target @@ -2943,11 +2940,10 @@ static void hist_browser__update_percent_limit(struct hist_browser *hb, static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *helpline, bool left_exits, struct hist_browser_timer *hbt, float min_pcnt, - struct perf_env *env, bool warn_lost_event, - struct annotation_options *annotation_opts) + struct perf_env *env, bool warn_lost_event) { struct hists *hists = evsel__hists(evsel); - struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts); + struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env); struct branch_info *bi = NULL; #define MAX_OPTIONS 16 char *options[MAX_OPTIONS]; @@ -3398,7 +3394,6 @@ static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *h struct evsel_menu { struct ui_browser b; struct evsel *selection; - struct annotation_options *annotation_opts; bool lost_events, lost_events_warned; float min_pcnt; struct perf_env *env; @@ -3499,8 +3494,7 @@ static int perf_evsel_menu__run(struct evsel_menu *menu, hbt->timer(hbt->arg); key = evsel__hists_browse(pos, nr_events, help, true, hbt, menu->min_pcnt, menu->env, - warn_lost_event, - menu->annotation_opts); + warn_lost_event); ui_browser__show_title(&menu->b, title); switch (key) { case K_TAB: @@ -3557,7 +3551,7 @@ static bool filter_group_entries(struct ui_browser *browser __maybe_unused, static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, const char *help, struct hist_browser_timer *hbt, float min_pcnt, struct perf_env *env, - bool warn_lost_event, struct annotation_options *annotation_opts) + bool warn_lost_event) { struct evsel *pos; struct evsel_menu menu = { @@ -3572,7 +3566,6 @@ static int __evlist__tui_browse_hists(struct evlist *evlist, int nr_entries, con }, .min_pcnt = min_pcnt, .env = env, - .annotation_opts = annotation_opts, }; ui_helpline__push("Press ESC to exit"); @@ -3607,8 +3600,7 @@ static bool evlist__single_entry(struct evlist *evlist) } int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt, - float min_pcnt, struct perf_env *env, bool warn_lost_event, - struct annotation_options *annotation_opts) + float min_pcnt, struct perf_env *env, bool warn_lost_event) { int nr_entries = evlist->core.nr_entries; @@ -3617,7 +3609,7 @@ single_entry: { struct evsel *first = evlist__first(evlist); return evsel__hists_browse(first, nr_entries, help, false, hbt, min_pcnt, - env, warn_lost_event, annotation_opts); + env, warn_lost_event); } } @@ -3635,7 +3627,7 @@ single_entry: { } return __evlist__tui_browse_hists(evlist, nr_entries, help, hbt, min_pcnt, env, - warn_lost_event, annotation_opts); + warn_lost_event); } static int block_hists_browser__title(struct hist_browser *browser, char *bf, @@ -3654,8 +3646,7 @@ static int block_hists_browser__title(struct hist_browser *browser, char *bf, } int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel, - float min_percent, struct perf_env *env, - struct annotation_options *annotation_opts) + float min_percent, struct perf_env *env) { struct hists *hists = &bh->block_hists; struct hist_browser *browser; @@ -3672,7 +3663,6 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel, browser->title = block_hists_browser__title; browser->min_pcnt = min_percent; browser->env = env; - browser->annotation_opts = annotation_opts; /* reset abort key so that it can get Ctrl-C as a key */ SLang_reset_tty(); diff --git a/tools/perf/ui/browsers/hists.h b/tools/perf/ui/browsers/hists.h index 1e938d9ffa5e..de46f6c56b0e 100644 --- a/tools/perf/ui/browsers/hists.h +++ b/tools/perf/ui/browsers/hists.h @@ -4,7 +4,6 @@ #include "ui/browser.h" -struct annotation_options; struct evsel; struct hist_browser { @@ -15,7 +14,6 @@ struct hist_browser { struct hist_browser_timer *hbt; struct pstack *pstack; struct perf_env *env; - struct annotation_options *annotation_opts; struct evsel *block_evsel; int print_seq; bool show_dso; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 7bf29baa43f5..857c5fa0e6b1 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -418,13 +418,11 @@ int symbol__tty_annotate2(struct map_symbol *ms, struct evsel *evsel); #ifdef HAVE_SLANG_SUPPORT int symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *opts); + struct hist_browser_timer *hbt); #else static inline int symbol__tui_annotate(struct map_symbol *ms __maybe_unused, struct evsel *evsel __maybe_unused, - struct hist_browser_timer *hbt __maybe_unused, - struct annotation_options *opts __maybe_unused) + struct hist_browser_timer *hbt __maybe_unused) { return 0; } diff --git a/tools/perf/util/block-info.c b/tools/perf/util/block-info.c index 08f82c1f166c..dec910989701 100644 --- a/tools/perf/util/block-info.c +++ b/tools/perf/util/block-info.c @@ -464,8 +464,7 @@ void block_info__free_report(struct block_report *reps, int nr_reps) } int report__browse_block_hists(struct block_hist *bh, float min_percent, - struct evsel *evsel, struct perf_env *env, - struct annotation_options *annotation_opts) + struct evsel *evsel, struct perf_env *env) { int ret; @@ -477,8 +476,7 @@ int report__browse_block_hists(struct block_hist *bh, float min_percent, return 0; case 1: symbol_conf.report_individual_block = true; - ret = block_hists_tui_browse(bh, evsel, min_percent, - env, annotation_opts); + ret = block_hists_tui_browse(bh, evsel, min_percent, env); return ret; default: return -1; diff --git a/tools/perf/util/block-info.h b/tools/perf/util/block-info.h index 42e9dcc4cf0a..96f53e89795e 100644 --- a/tools/perf/util/block-info.h +++ b/tools/perf/util/block-info.h @@ -78,8 +78,7 @@ struct block_report *block_info__create_report(struct evlist *evlist, void block_info__free_report(struct block_report *reps, int nr_reps); int report__browse_block_hists(struct block_hist *bh, float min_percent, - struct evsel *evsel, struct perf_env *env, - struct annotation_options *annotation_opts); + struct evsel *evsel, struct perf_env *env); float block_info__total_cycles_percent(struct hist_entry *he); diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index afc9f1c7f4dc..5d0db96609df 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -457,7 +457,6 @@ struct hist_browser_timer { int refresh; }; -struct annotation_options; struct res_sample; enum rstype { @@ -473,16 +472,13 @@ struct block_hist; void attr_to_script(char *buf, struct perf_event_attr *attr); int map_symbol__tui_annotate(struct map_symbol *ms, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *annotation_opts); + struct hist_browser_timer *hbt); int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel, - struct hist_browser_timer *hbt, - struct annotation_options *annotation_opts); + struct hist_browser_timer *hbt); int evlist__tui_browse_hists(struct evlist *evlist, const char *help, struct hist_browser_timer *hbt, - float min_pcnt, struct perf_env *env, bool warn_lost_event, - struct annotation_options *annotation_options); + float min_pcnt, struct perf_env *env, bool warn_lost_event); int script_browse(const char *script_opt, struct evsel *evsel); @@ -492,8 +488,7 @@ int res_sample_browse(struct res_sample *res_samples, int num_res, void res_sample_init(void); int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel, - float min_percent, struct perf_env *env, - struct annotation_options *annotation_opts); + float min_percent, struct perf_env *env); #else static inline int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused, @@ -501,23 +496,20 @@ int evlist__tui_browse_hists(struct evlist *evlist __maybe_unused, struct hist_browser_timer *hbt __maybe_unused, float min_pcnt __maybe_unused, struct perf_env *env __maybe_unused, - bool warn_lost_event __maybe_unused, - struct annotation_options *annotation_options __maybe_unused) + bool warn_lost_event __maybe_unused) { return 0; } static inline int map_symbol__tui_annotate(struct map_symbol *ms __maybe_unused, struct evsel *evsel __maybe_unused, - struct hist_browser_timer *hbt __maybe_unused, - struct annotation_options *annotation_options __maybe_unused) + struct hist_browser_timer *hbt __maybe_unused) { return 0; } static inline int hist_entry__tui_annotate(struct hist_entry *he __maybe_unused, struct evsel *evsel __maybe_unused, - struct hist_browser_timer *hbt __maybe_unused, - struct annotation_options *annotation_opts __maybe_unused) + struct hist_browser_timer *hbt __maybe_unused) { return 0; } @@ -541,8 +533,7 @@ static inline void res_sample_init(void) {} static inline int block_hists_tui_browse(struct block_hist *bh __maybe_unused, struct evsel *evsel __maybe_unused, float min_percent __maybe_unused, - struct perf_env *env __maybe_unused, - struct annotation_options *annotation_opts __maybe_unused) + struct perf_env *env __maybe_unused) { return 0; } From 7f929aea21fd0be5e0d9ee5827d5b809daa69f29 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 28 Nov 2023 09:54:39 -0800 Subject: [PATCH 098/179] perf annotate: Ensure init/exit for global options Now it only cares about the global options so it can just handle it without the argument. Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231128175441.721579-7-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-annotate.c | 8 ++++---- tools/perf/builtin-report.c | 8 ++++---- tools/perf/builtin-top.c | 8 ++++---- tools/perf/util/annotate.c | 19 +++++++++++-------- tools/perf/util/annotate.h | 8 ++++---- 5 files changed, 27 insertions(+), 24 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index d17213bd8332..d880f1b039fd 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -615,13 +615,13 @@ int cmd_annotate(int argc, const char **argv) set_option_flag(options, 0, "show-total-period", PARSE_OPT_EXCLUSIVE); set_option_flag(options, 0, "show-nr-samples", PARSE_OPT_EXCLUSIVE); - annotation_options__init(&annotate_opts); + annotation_options__init(); ret = hists__init(); if (ret < 0) return ret; - annotation_config__init(&annotate_opts); + annotation_config__init(); argc = parse_options(argc, argv, options, annotate_usage, 0); if (argc) { @@ -651,7 +651,7 @@ int cmd_annotate(int argc, const char **argv) return -ENOMEM; } - if (annotate_check_args(&annotate_opts) < 0) + if (annotate_check_args() < 0) return -EINVAL; #ifdef HAVE_GTK2_SUPPORT @@ -732,7 +732,7 @@ int cmd_annotate(int argc, const char **argv) #ifndef NDEBUG perf_session__delete(annotate.session); #endif - annotation_options__exit(&annotate_opts); + annotation_options__exit(); return ret; } diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index bc0d986c1e0c..17fb171e898b 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -1430,7 +1430,7 @@ int cmd_report(int argc, const char **argv) */ symbol_conf.keep_exited_threads = true; - annotation_options__init(&annotate_opts); + annotation_options__init(); ret = perf_config(report__config, &report); if (ret) @@ -1464,7 +1464,7 @@ int cmd_report(int argc, const char **argv) return -ENOMEM; } - if (annotate_check_args(&annotate_opts) < 0) { + if (annotate_check_args() < 0) { ret = -EINVAL; goto exit; } @@ -1696,7 +1696,7 @@ int cmd_report(int argc, const char **argv) */ symbol_conf.priv_size += sizeof(u32); } - annotation_config__init(&annotate_opts); + annotation_config__init(); } if (symbol__init(&session->header.env) < 0) @@ -1750,7 +1750,7 @@ int cmd_report(int argc, const char **argv) zstd_fini(&(session->zstd_data)); perf_session__delete(session); exit: - annotation_options__exit(&annotate_opts); + annotation_options__exit(); free(sort_order_help); free(field_order_help); return ret; diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index b5222d241983..ed83afeeced0 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1608,7 +1608,7 @@ int cmd_top(int argc, const char **argv) if (status < 0) return status; - annotation_options__init(&annotate_opts); + annotation_options__init(); annotate_opts.min_pcnt = 5; annotate_opts.context = 4; @@ -1660,7 +1660,7 @@ int cmd_top(int argc, const char **argv) if (status) goto out_delete_evlist; - if (annotate_check_args(&annotate_opts) < 0) + if (annotate_check_args() < 0) goto out_delete_evlist; if (!top.evlist->core.nr_entries) { @@ -1786,7 +1786,7 @@ int cmd_top(int argc, const char **argv) if (status < 0) goto out_delete_evlist; - annotation_config__init(&annotate_opts); + annotation_config__init(); symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); status = symbol__init(NULL); @@ -1839,7 +1839,7 @@ int cmd_top(int argc, const char **argv) out_delete_evlist: evlist__delete(top.evlist); perf_session__delete(top.session); - annotation_options__exit(&annotate_opts); + annotation_options__exit(); return status; } diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index daff9af552f4..626ff3baeb85 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -3416,8 +3416,10 @@ static int annotation__config(const char *var, const char *value, void *data) return 0; } -void annotation_options__init(struct annotation_options *opt) +void annotation_options__init(void) { + struct annotation_options *opt = &annotate_opts; + memset(opt, 0, sizeof(*opt)); /* Default values. */ @@ -3428,16 +3430,15 @@ void annotation_options__init(struct annotation_options *opt) opt->percent_type = PERCENT_PERIOD_LOCAL; } - -void annotation_options__exit(struct annotation_options *opt) +void annotation_options__exit(void) { - zfree(&opt->disassembler_style); - zfree(&opt->objdump_path); + zfree(&annotate_opts.disassembler_style); + zfree(&annotate_opts.objdump_path); } -void annotation_config__init(struct annotation_options *opt) +void annotation_config__init(void) { - perf_config(annotation__config, opt); + perf_config(annotation__config, &annotate_opts); } static unsigned int parse_percent_type(char *str1, char *str2) @@ -3491,8 +3492,10 @@ int annotate_parse_percent_type(const struct option *opt __maybe_unused, const c return err; } -int annotate_check_args(struct annotation_options *args) +int annotate_check_args(void) { + struct annotation_options *args = &annotate_opts; + if (args->prefix_strip && !args->prefix) { pr_err("--prefix-strip requires --prefix\n"); return -1; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 857c5fa0e6b1..4283eb4522b2 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -428,14 +428,14 @@ static inline int symbol__tui_annotate(struct map_symbol *ms __maybe_unused, } #endif -void annotation_options__init(struct annotation_options *opt); -void annotation_options__exit(struct annotation_options *opt); +void annotation_options__init(void); +void annotation_options__exit(void); -void annotation_config__init(struct annotation_options *opt); +void annotation_config__init(void); int annotate_parse_percent_type(const struct option *opt, const char *_str, int unset); -int annotate_check_args(struct annotation_options *args); +int annotate_check_args(void); #endif /* __PERF_ANNOTATE_H */ From 2fa21d694c63081f26444847c916e5fc83bcefa1 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 28 Nov 2023 09:54:40 -0800 Subject: [PATCH 099/179] perf annotate: Remove remaining usages of local annotation options So that it can get rid of the unused data. Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231128175441.721579-8-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/annotate.c | 14 ++++++-------- tools/perf/util/annotate.c | 2 +- tools/perf/util/annotate.h | 6 +++--- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index fda17c1f2031..cb2eb6dcb532 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -37,11 +37,10 @@ static inline struct annotation *browser__annotation(struct ui_browser *browser) return symbol__annotation(ms->sym); } -static bool disasm_line__filter(struct ui_browser *browser, void *entry) +static bool disasm_line__filter(struct ui_browser *browser __maybe_unused, void *entry) { - struct annotation *notes = browser__annotation(browser); struct annotation_line *al = list_entry(entry, struct annotation_line, node); - return annotation_line__filter(al, notes); + return annotation_line__filter(al); } static int ui_browser__jumps_percent_color(struct ui_browser *browser, int nr, bool current) @@ -269,7 +268,6 @@ static void disasm_rb_tree__insert(struct annotate_browser *browser, static void annotate_browser__set_top(struct annotate_browser *browser, struct annotation_line *pos, u32 idx) { - struct annotation *notes = browser__annotation(&browser->b); unsigned back; ui_browser__refresh_dimensions(&browser->b); @@ -279,7 +277,7 @@ static void annotate_browser__set_top(struct annotate_browser *browser, while (browser->b.top_idx != 0 && back != 0) { pos = list_entry(pos->node.prev, struct annotation_line, node); - if (annotation_line__filter(pos, notes)) + if (annotation_line__filter(pos)) continue; --browser->b.top_idx; @@ -498,7 +496,7 @@ struct disasm_line *annotate_browser__find_offset(struct annotate_browser *brows list_for_each_entry(pos, ¬es->src->source, al.node) { if (pos->al.offset == offset) return pos; - if (!annotation_line__filter(&pos->al, notes)) + if (!annotation_line__filter(&pos->al)) ++*idx; } @@ -542,7 +540,7 @@ struct annotation_line *annotate_browser__find_string(struct annotate_browser *b *idx = browser->b.index; list_for_each_entry_continue(al, ¬es->src->source, node) { - if (annotation_line__filter(al, notes)) + if (annotation_line__filter(al)) continue; ++*idx; @@ -579,7 +577,7 @@ struct annotation_line *annotate_browser__find_string_reverse(struct annotate_br *idx = browser->b.index; list_for_each_entry_continue_reverse(al, ¬es->src->source, node) { - if (annotation_line__filter(al, notes)) + if (annotation_line__filter(al)) continue; --*idx; diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 626ff3baeb85..09c399ab0384 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -2727,7 +2727,7 @@ static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp) struct annotation_line *al; list_for_each_entry(al, ¬es->src->source, node) { - if (annotation_line__filter(al, notes)) + if (annotation_line__filter(al)) continue; annotation_line__write(al, notes, &wops); fputc('\n', fp); diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 4283eb4522b2..6d5a6bb49a97 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -320,7 +320,7 @@ bool annotation__trylock(struct annotation *notes) EXCLUSIVE_TRYLOCK_FUNCTION(tr static inline int annotation__cycles_width(struct annotation *notes) { - if (notes->branch && notes->options->show_minmax_cycle) + if (notes->branch && annotate_opts.show_minmax_cycle) return ANNOTATION__IPC_WIDTH + ANNOTATION__MINMAX_CYCLES_WIDTH; return notes->branch ? ANNOTATION__IPC_WIDTH + ANNOTATION__CYCLES_WIDTH : 0; @@ -331,9 +331,9 @@ static inline int annotation__pcnt_width(struct annotation *notes) return (symbol_conf.show_total_period ? 12 : 7) * notes->nr_events; } -static inline bool annotation_line__filter(struct annotation_line *al, struct annotation *notes) +static inline bool annotation_line__filter(struct annotation_line *al) { - return notes->options->hide_src_code && al->offset == -1; + return annotate_opts.hide_src_code && al->offset == -1; } void annotation__set_offsets(struct annotation *notes, s64 size); From 327f7533cc596427b62f431c8852951412b6c0dc Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 28 Nov 2023 09:54:41 -0800 Subject: [PATCH 100/179] perf annotate: Get rid of local annotation options It doesn't need the option in the struct annotation which is allocated for each symbol. It can directly use the global options and save 8 bytes per symbol. Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ingo Molnar Cc: Jiri Olsa Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231128175441.721579-9-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 2 -- tools/perf/util/annotate.h | 1 - 2 files changed, 3 deletions(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 09c399ab0384..c81fa0791918 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -3333,8 +3333,6 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel, if (err) goto out_free_offsets; - notes->options = &annotate_opts; - symbol__calc_percent(sym, evsel); annotation__set_offsets(notes, size); diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 6d5a6bb49a97..589f8aaf0236 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -294,7 +294,6 @@ struct annotated_branch { struct LOCKABLE annotation { u64 start; - struct annotation_options *options; int nr_events; int max_jump_sources; struct { From 8596ba324356a7392a6639024de8c9ae7a9fce92 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 29 Nov 2023 14:35:40 -0800 Subject: [PATCH 101/179] perf stat: Fix help message for --metric-no-threshold option Copy-paste error led to help message for metric-no-threshold repeating that of metric-no-merge. Fixes: 1fd09e299bdd434b ("perf metric: Add --metric-no-threshold option") Reported-by: Stephane Eranian Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231129223540.2247030-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-stat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index d22228eddccb..0bfa70791cfc 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1255,7 +1255,7 @@ static struct option stat_options[] = { OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, "don't try to share events between metrics in a group"), OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold, - "don't try to share events between metrics in a group "), + "disable adding events for the metric threshold calculation"), OPT_BOOLEAN(0, "topdown", &topdown_run, "measure top-down statistics"), OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, From 48219b089d84f109e8a81d8a7fa1bbc2e6e5f97d Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 28 Nov 2023 22:01:58 -0800 Subject: [PATCH 102/179] libperf cpumap: Rename perf_cpu_map__dummy_new() to perf_cpu_map__new_any_cpu() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename perf_cpu_map__dummy_new() to perf_cpu_map__new_any_cpu() to better indicate this is creating a CPU map for the perf_event_open "any" CPU case. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexandre Ghiti Cc: Andrew Jones Cc: AndrĂ© Almeida Cc: Athira Jajeev Cc: Atish Patra Cc: Changbin Du Cc: Darren Hart Cc: Davidlohr Bueso Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mike Leach Cc: Namhyung Kim Cc: Nick Desaulniers Cc: Paolo Bonzini Cc: Paran Lee Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Suzuki Poulouse Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Jihong Cc: Yang Li Cc: Yanteng Si Cc: bpf@vger.kernel.org Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20231129060211.1890454-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/Documentation/libperf.txt | 2 +- tools/lib/perf/cpumap.c | 4 ++-- tools/lib/perf/evsel.c | 2 +- tools/lib/perf/include/perf/cpumap.h | 4 ++-- tools/lib/perf/libperf.map | 2 +- tools/lib/perf/tests/test-cpumap.c | 2 +- tools/lib/perf/tests/test-evlist.c | 2 +- tools/perf/tests/cpumap.c | 2 +- tools/perf/tests/sw-clock.c | 2 +- tools/perf/tests/task-exit.c | 2 +- tools/perf/util/evlist.c | 2 +- tools/perf/util/evsel.c | 2 +- 12 files changed, 14 insertions(+), 14 deletions(-) diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt index a8f1a237931b..a256a26598b0 100644 --- a/tools/lib/perf/Documentation/libperf.txt +++ b/tools/lib/perf/Documentation/libperf.txt @@ -37,7 +37,7 @@ SYNOPSIS struct perf_cpu_map; - struct perf_cpu_map *perf_cpu_map__dummy_new(void); + struct perf_cpu_map *perf_cpu_map__new_any_cpu(void); struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list); struct perf_cpu_map *perf_cpu_map__read(FILE *file); struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map); diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c index 2a5a29217374..2bd6aba3d8c9 100644 --- a/tools/lib/perf/cpumap.c +++ b/tools/lib/perf/cpumap.c @@ -27,7 +27,7 @@ struct perf_cpu_map *perf_cpu_map__alloc(int nr_cpus) return result; } -struct perf_cpu_map *perf_cpu_map__dummy_new(void) +struct perf_cpu_map *perf_cpu_map__new_any_cpu(void) { struct perf_cpu_map *cpus = perf_cpu_map__alloc(1); @@ -271,7 +271,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) else if (*cpu_list != '\0') cpus = cpu_map__default_new(); else - cpus = perf_cpu_map__dummy_new(); + cpus = perf_cpu_map__new_any_cpu(); invalid: free(tmp_cpus); out: diff --git a/tools/lib/perf/evsel.c b/tools/lib/perf/evsel.c index 8b51b008a81f..c07160953224 100644 --- a/tools/lib/perf/evsel.c +++ b/tools/lib/perf/evsel.c @@ -120,7 +120,7 @@ int perf_evsel__open(struct perf_evsel *evsel, struct perf_cpu_map *cpus, static struct perf_cpu_map *empty_cpu_map; if (empty_cpu_map == NULL) { - empty_cpu_map = perf_cpu_map__dummy_new(); + empty_cpu_map = perf_cpu_map__new_any_cpu(); if (empty_cpu_map == NULL) return -ENOMEM; } diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h index e38d859a384d..d0bf218ada11 100644 --- a/tools/lib/perf/include/perf/cpumap.h +++ b/tools/lib/perf/include/perf/cpumap.h @@ -19,9 +19,9 @@ struct perf_cache { struct perf_cpu_map; /** - * perf_cpu_map__dummy_new - a map with a singular "any CPU"/dummy -1 value. + * perf_cpu_map__new_any_cpu - a map with a singular "any CPU"/dummy -1 value. */ -LIBPERF_API struct perf_cpu_map *perf_cpu_map__dummy_new(void); +LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_any_cpu(void); LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void); LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list); LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file); diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map index 190b56ae923a..a8ff64baea3e 100644 --- a/tools/lib/perf/libperf.map +++ b/tools/lib/perf/libperf.map @@ -1,7 +1,7 @@ LIBPERF_0.0.1 { global: libperf_init; - perf_cpu_map__dummy_new; + perf_cpu_map__new_any_cpu; perf_cpu_map__default_new; perf_cpu_map__get; perf_cpu_map__put; diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c index 87b0510a556f..2c359bdb951e 100644 --- a/tools/lib/perf/tests/test-cpumap.c +++ b/tools/lib/perf/tests/test-cpumap.c @@ -21,7 +21,7 @@ int test_cpumap(int argc, char **argv) libperf_init(libperf_print); - cpus = perf_cpu_map__dummy_new(); + cpus = perf_cpu_map__new_any_cpu(); if (!cpus) return -1; diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c index ed616fc19b4f..ab63878bacb9 100644 --- a/tools/lib/perf/tests/test-evlist.c +++ b/tools/lib/perf/tests/test-evlist.c @@ -261,7 +261,7 @@ static int test_mmap_thread(void) threads = perf_thread_map__new_dummy(); __T("failed to create threads", threads); - cpus = perf_cpu_map__dummy_new(); + cpus = perf_cpu_map__new_any_cpu(); __T("failed to create cpus", cpus); perf_thread_map__set_pid(threads, 0, pid); diff --git a/tools/perf/tests/cpumap.c b/tools/perf/tests/cpumap.c index 7730fc2ab40b..bd8e396f3e57 100644 --- a/tools/perf/tests/cpumap.c +++ b/tools/perf/tests/cpumap.c @@ -213,7 +213,7 @@ static int test__cpu_map_intersect(struct test_suite *test __maybe_unused, static int test__cpu_map_equal(struct test_suite *test __maybe_unused, int subtest __maybe_unused) { - struct perf_cpu_map *any = perf_cpu_map__dummy_new(); + struct perf_cpu_map *any = perf_cpu_map__new_any_cpu(); struct perf_cpu_map *one = perf_cpu_map__new("1"); struct perf_cpu_map *two = perf_cpu_map__new("2"); struct perf_cpu_map *empty = perf_cpu_map__intersect(one, two); diff --git a/tools/perf/tests/sw-clock.c b/tools/perf/tests/sw-clock.c index 4d7493fa0105..290716783ac6 100644 --- a/tools/perf/tests/sw-clock.c +++ b/tools/perf/tests/sw-clock.c @@ -62,7 +62,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id) } evlist__add(evlist, evsel); - cpus = perf_cpu_map__dummy_new(); + cpus = perf_cpu_map__new_any_cpu(); threads = thread_map__new_by_tid(getpid()); if (!cpus || !threads) { err = -ENOMEM; diff --git a/tools/perf/tests/task-exit.c b/tools/perf/tests/task-exit.c index 968dddde6dda..d33d0952025c 100644 --- a/tools/perf/tests/task-exit.c +++ b/tools/perf/tests/task-exit.c @@ -70,7 +70,7 @@ static int test__task_exit(struct test_suite *test __maybe_unused, int subtest _ * evlist__prepare_workload we'll fill in the only thread * we're monitoring, the one forked there. */ - cpus = perf_cpu_map__dummy_new(); + cpus = perf_cpu_map__new_any_cpu(); threads = thread_map__new_by_tid(-1); if (!cpus || !threads) { err = -ENOMEM; diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index e36da58522ef..ff7f85ded89d 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1056,7 +1056,7 @@ int evlist__create_maps(struct evlist *evlist, struct target *target) return -1; if (target__uses_dummy_map(target)) - cpus = perf_cpu_map__dummy_new(); + cpus = perf_cpu_map__new_any_cpu(); else cpus = perf_cpu_map__new(target->cpu_list); diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 532f34d9fcb5..6d7c9c58a9bc 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1801,7 +1801,7 @@ static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus, if (cpus == NULL) { if (empty_cpu_map == NULL) { - empty_cpu_map = perf_cpu_map__dummy_new(); + empty_cpu_map = perf_cpu_map__new_any_cpu(); if (empty_cpu_map == NULL) return -ENOMEM; } From 8f60f870a9af53295ab4301da05ca453f115a6b6 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 28 Nov 2023 22:01:59 -0800 Subject: [PATCH 103/179] libperf cpumap: Rename perf_cpu_map__default_new() to perf_cpu_map__new_online_cpus() and prefer sysfs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename perf_cpu_map__default_new() to perf_cpu_map__new_online_cpus() to better indicate what the implementation does. Read the online CPUs from /sys/devices/system/cpu/online first before using sysconf() as it can't accurately configure holes in the CPU map. If sysconf() is used, warn when the configured and online processors disagree. When reading from a file, if the read doesn't yield a CPU map then return an empty map rather than the default online. This avoids recursion but also better yields being able to detect failures. Add more comments. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexandre Ghiti Cc: Andrew Jones Cc: AndrĂ© Almeida Cc: Athira Jajeev Cc: Atish Patra Cc: Changbin Du Cc: Darren Hart Cc: Davidlohr Bueso Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mike Leach Cc: Namhyung Kim Cc: Nick Desaulniers Cc: Paolo Bonzini Cc: Paran Lee Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Suzuki Poulouse Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Jihong Cc: Yang Li Cc: Yanteng Si Cc: bpf@vger.kernel.org Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20231129060211.1890454-3-irogers@google.com [ s/syfs/sysfs/g typo ] Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/cpumap.c | 59 +++++++++++++++++----------- tools/lib/perf/include/perf/cpumap.h | 15 ++++++- tools/lib/perf/libperf.map | 2 +- tools/lib/perf/tests/test-cpumap.c | 2 +- 4 files changed, 51 insertions(+), 27 deletions(-) diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c index 2bd6aba3d8c9..3aa80d0d26e8 100644 --- a/tools/lib/perf/cpumap.c +++ b/tools/lib/perf/cpumap.c @@ -9,6 +9,7 @@ #include #include #include +#include "internal.h" void perf_cpu_map__set_nr(struct perf_cpu_map *map, int nr_cpus) { @@ -66,15 +67,21 @@ void perf_cpu_map__put(struct perf_cpu_map *map) } } -static struct perf_cpu_map *cpu_map__default_new(void) +static struct perf_cpu_map *cpu_map__new_sysconf(void) { struct perf_cpu_map *cpus; - int nr_cpus; + int nr_cpus, nr_cpus_conf; nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); if (nr_cpus < 0) return NULL; + nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF); + if (nr_cpus != nr_cpus_conf) { + pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.", + nr_cpus, nr_cpus_conf, nr_cpus); + } + cpus = perf_cpu_map__alloc(nr_cpus); if (cpus != NULL) { int i; @@ -86,9 +93,27 @@ static struct perf_cpu_map *cpu_map__default_new(void) return cpus; } -struct perf_cpu_map *perf_cpu_map__default_new(void) +static struct perf_cpu_map *cpu_map__new_sysfs_online(void) { - return cpu_map__default_new(); + struct perf_cpu_map *cpus = NULL; + FILE *onlnf; + + onlnf = fopen("/sys/devices/system/cpu/online", "r"); + if (onlnf) { + cpus = perf_cpu_map__read(onlnf); + fclose(onlnf); + } + return cpus; +} + +struct perf_cpu_map *perf_cpu_map__new_online_cpus(void) +{ + struct perf_cpu_map *cpus = cpu_map__new_sysfs_online(); + + if (cpus) + return cpus; + + return cpu_map__new_sysconf(); } @@ -180,27 +205,11 @@ struct perf_cpu_map *perf_cpu_map__read(FILE *file) if (nr_cpus > 0) cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); - else - cpus = cpu_map__default_new(); out_free_tmp: free(tmp_cpus); return cpus; } -static struct perf_cpu_map *cpu_map__read_all_cpu_map(void) -{ - struct perf_cpu_map *cpus = NULL; - FILE *onlnf; - - onlnf = fopen("/sys/devices/system/cpu/online", "r"); - if (!onlnf) - return cpu_map__default_new(); - - cpus = perf_cpu_map__read(onlnf); - fclose(onlnf); - return cpus; -} - struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) { struct perf_cpu_map *cpus = NULL; @@ -211,7 +220,7 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) int max_entries = 0; if (!cpu_list) - return cpu_map__read_all_cpu_map(); + return perf_cpu_map__new_online_cpus(); /* * must handle the case of empty cpumap to cover @@ -268,9 +277,11 @@ struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list) if (nr_cpus > 0) cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); - else if (*cpu_list != '\0') - cpus = cpu_map__default_new(); - else + else if (*cpu_list != '\0') { + pr_warning("Unexpected characters at end of cpu list ('%s'), using online CPUs.", + cpu_list); + cpus = perf_cpu_map__new_online_cpus(); + } else cpus = perf_cpu_map__new_any_cpu(); invalid: free(tmp_cpus); diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h index d0bf218ada11..b24bd8b8f34e 100644 --- a/tools/lib/perf/include/perf/cpumap.h +++ b/tools/lib/perf/include/perf/cpumap.h @@ -22,7 +22,20 @@ struct perf_cpu_map; * perf_cpu_map__new_any_cpu - a map with a singular "any CPU"/dummy -1 value. */ LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_any_cpu(void); -LIBPERF_API struct perf_cpu_map *perf_cpu_map__default_new(void); +/** + * perf_cpu_map__new_online_cpus - a map read from + * /sys/devices/system/cpu/online if + * available. If reading wasn't possible a map + * is created using the online processors + * assuming the first 'n' processors are all + * online. + */ +LIBPERF_API struct perf_cpu_map *perf_cpu_map__new_online_cpus(void); +/** + * perf_cpu_map__new - create a map from the given cpu_list such as "0-7". If no + * cpu_list argument is provided then + * perf_cpu_map__new_online_cpus is returned. + */ LIBPERF_API struct perf_cpu_map *perf_cpu_map__new(const char *cpu_list); LIBPERF_API struct perf_cpu_map *perf_cpu_map__read(FILE *file); LIBPERF_API struct perf_cpu_map *perf_cpu_map__get(struct perf_cpu_map *map); diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map index a8ff64baea3e..8a71f841498e 100644 --- a/tools/lib/perf/libperf.map +++ b/tools/lib/perf/libperf.map @@ -2,7 +2,7 @@ LIBPERF_0.0.1 { global: libperf_init; perf_cpu_map__new_any_cpu; - perf_cpu_map__default_new; + perf_cpu_map__new_online_cpus; perf_cpu_map__get; perf_cpu_map__put; perf_cpu_map__new; diff --git a/tools/lib/perf/tests/test-cpumap.c b/tools/lib/perf/tests/test-cpumap.c index 2c359bdb951e..c998b1dae863 100644 --- a/tools/lib/perf/tests/test-cpumap.c +++ b/tools/lib/perf/tests/test-cpumap.c @@ -29,7 +29,7 @@ int test_cpumap(int argc, char **argv) perf_cpu_map__put(cpus); perf_cpu_map__put(cpus); - cpus = perf_cpu_map__default_new(); + cpus = perf_cpu_map__new_online_cpus(); if (!cpus) return -1; From 923ca62a7b1edceaa61eb6ac8dc56fdac51913b8 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 28 Nov 2023 22:02:00 -0800 Subject: [PATCH 104/179] libperf cpumap: Rename perf_cpu_map__empty() to perf_cpu_map__has_any_cpu_or_is_empty() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The name perf_cpu_map_empty is misleading as true is also returned when the map contains an "any" CPU (aka dummy) map. Rename to perf_cpu_map__has_any_cpu_or_is_empty(), later changes will (re)introduce perf_cpu_map__empty() and perf_cpu_map__has_any_cpu(). Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexandre Ghiti Cc: Andrew Jones Cc: AndrĂ© Almeida Cc: Athira Jajeev Cc: Atish Patra Cc: Changbin Du Cc: Darren Hart Cc: Davidlohr Bueso Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mike Leach Cc: Namhyung Kim Cc: Nick Desaulniers Cc: Paolo Bonzini Cc: Paran Lee Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Suzuki Poulouse Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Jihong Cc: Yang Li Cc: Yanteng Si Cc: bpf@vger.kernel.org Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20231129060211.1890454-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/Documentation/libperf.txt | 2 +- tools/lib/perf/cpumap.c | 2 +- tools/lib/perf/evlist.c | 4 ++-- tools/lib/perf/include/perf/cpumap.h | 4 ++-- tools/lib/perf/libperf.map | 2 +- tools/perf/arch/arm/util/cs-etm.c | 10 +++++----- tools/perf/arch/arm64/util/arm-spe.c | 4 ++-- tools/perf/arch/x86/util/intel-bts.c | 4 ++-- tools/perf/arch/x86/util/intel-pt.c | 10 +++++----- tools/perf/builtin-c2c.c | 2 +- tools/perf/builtin-stat.c | 6 +++--- tools/perf/util/auxtrace.c | 4 ++-- tools/perf/util/record.c | 2 +- tools/perf/util/stat.c | 2 +- 14 files changed, 29 insertions(+), 29 deletions(-) diff --git a/tools/lib/perf/Documentation/libperf.txt b/tools/lib/perf/Documentation/libperf.txt index a256a26598b0..fcfb9499ef9c 100644 --- a/tools/lib/perf/Documentation/libperf.txt +++ b/tools/lib/perf/Documentation/libperf.txt @@ -46,7 +46,7 @@ SYNOPSIS void perf_cpu_map__put(struct perf_cpu_map *map); int perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); int perf_cpu_map__nr(const struct perf_cpu_map *cpus); - bool perf_cpu_map__empty(const struct perf_cpu_map *map); + bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map); int perf_cpu_map__max(struct perf_cpu_map *map); bool perf_cpu_map__has(const struct perf_cpu_map *map, int cpu); diff --git a/tools/lib/perf/cpumap.c b/tools/lib/perf/cpumap.c index 3aa80d0d26e8..4adcd7920d03 100644 --- a/tools/lib/perf/cpumap.c +++ b/tools/lib/perf/cpumap.c @@ -311,7 +311,7 @@ int perf_cpu_map__nr(const struct perf_cpu_map *cpus) return cpus ? __perf_cpu_map__nr(cpus) : 1; } -bool perf_cpu_map__empty(const struct perf_cpu_map *map) +bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map) { return map ? __perf_cpu_map__cpu(map, 0).cpu == -1 : true; } diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c index 3acbbccc1901..75f36218fdd9 100644 --- a/tools/lib/perf/evlist.c +++ b/tools/lib/perf/evlist.c @@ -619,7 +619,7 @@ static int perf_evlist__nr_mmaps(struct perf_evlist *evlist) /* One for each CPU */ nr_mmaps = perf_cpu_map__nr(evlist->all_cpus); - if (perf_cpu_map__empty(evlist->all_cpus)) { + if (perf_cpu_map__has_any_cpu_or_is_empty(evlist->all_cpus)) { /* Plus one for each thread */ nr_mmaps += perf_thread_map__nr(evlist->threads); /* Minus the per-thread CPU (-1) */ @@ -653,7 +653,7 @@ int perf_evlist__mmap_ops(struct perf_evlist *evlist, if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0) return -ENOMEM; - if (perf_cpu_map__empty(cpus)) + if (perf_cpu_map__has_any_cpu_or_is_empty(cpus)) return mmap_per_thread(evlist, ops, mp); return mmap_per_cpu(evlist, ops, mp); diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h index b24bd8b8f34e..9cf361fc5edc 100644 --- a/tools/lib/perf/include/perf/cpumap.h +++ b/tools/lib/perf/include/perf/cpumap.h @@ -47,9 +47,9 @@ LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map); LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); /** - * perf_cpu_map__empty - is map either empty or the "any CPU"/dummy value. + * perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value. */ -LIBPERF_API bool perf_cpu_map__empty(const struct perf_cpu_map *map); +LIBPERF_API bool perf_cpu_map__has_any_cpu_or_is_empty(const struct perf_cpu_map *map); LIBPERF_API struct perf_cpu perf_cpu_map__max(const struct perf_cpu_map *map); LIBPERF_API bool perf_cpu_map__has(const struct perf_cpu_map *map, struct perf_cpu cpu); LIBPERF_API bool perf_cpu_map__equal(const struct perf_cpu_map *lhs, diff --git a/tools/lib/perf/libperf.map b/tools/lib/perf/libperf.map index 8a71f841498e..10b3f3722642 100644 --- a/tools/lib/perf/libperf.map +++ b/tools/lib/perf/libperf.map @@ -9,7 +9,7 @@ LIBPERF_0.0.1 { perf_cpu_map__read; perf_cpu_map__nr; perf_cpu_map__cpu; - perf_cpu_map__empty; + perf_cpu_map__has_any_cpu_or_is_empty; perf_cpu_map__max; perf_cpu_map__has; perf_thread_map__new_array; diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c index 2cf873d71dff..c6b7b3066324 100644 --- a/tools/perf/arch/arm/util/cs-etm.c +++ b/tools/perf/arch/arm/util/cs-etm.c @@ -211,7 +211,7 @@ static int cs_etm_validate_config(struct auxtrace_record *itr, * program can run on any CPUs in this case, thus don't skip * validation. */ - if (!perf_cpu_map__empty(event_cpus) && + if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus) && !perf_cpu_map__has(event_cpus, cpu)) continue; @@ -435,7 +435,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr, * Also the case of per-cpu mmaps, need the contextID in order to be notified * when a context switch happened. */ - if (!perf_cpu_map__empty(cpus)) { + if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, "timestamp", 1); evsel__set_config_if_unset(cs_etm_pmu, cs_etm_evsel, @@ -461,7 +461,7 @@ static int cs_etm_recording_options(struct auxtrace_record *itr, evsel->core.attr.sample_period = 1; /* In per-cpu case, always need the time of mmap events etc */ - if (!perf_cpu_map__empty(cpus)) + if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) evsel__set_sample_bit(evsel, TIME); err = cs_etm_validate_config(itr, cs_etm_evsel); @@ -539,7 +539,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); /* cpu map is not empty, we have specific CPUs to work with */ - if (!perf_cpu_map__empty(event_cpus)) { + if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) { for (i = 0; i < cpu__max_cpu().cpu; i++) { struct perf_cpu cpu = { .cpu = i, }; @@ -814,7 +814,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, return -EINVAL; /* If the cpu_map is empty all online CPUs are involved */ - if (perf_cpu_map__empty(event_cpus)) { + if (perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) { cpu_map = online_cpus; } else { /* Make sure all specified CPUs are online */ diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c index e3acc739bd00..51ccbfd3d246 100644 --- a/tools/perf/arch/arm64/util/arm-spe.c +++ b/tools/perf/arch/arm64/util/arm-spe.c @@ -232,7 +232,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr, * In the case of per-cpu mmaps, sample CPU for AUX event; * also enable the timestamp tracing for samples correlation. */ - if (!perf_cpu_map__empty(cpus)) { + if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { evsel__set_sample_bit(arm_spe_evsel, CPU); evsel__set_config_if_unset(arm_spe_pmu, arm_spe_evsel, "ts_enable", 1); @@ -265,7 +265,7 @@ static int arm_spe_recording_options(struct auxtrace_record *itr, tracking_evsel->core.attr.sample_period = 1; /* In per-cpu case, always need the time of mmap events etc */ - if (!perf_cpu_map__empty(cpus)) { + if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { evsel__set_sample_bit(tracking_evsel, TIME); evsel__set_sample_bit(tracking_evsel, CPU); diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c index d2c8cac11470..af8ae4647585 100644 --- a/tools/perf/arch/x86/util/intel-bts.c +++ b/tools/perf/arch/x86/util/intel-bts.c @@ -143,7 +143,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, if (!opts->full_auxtrace) return 0; - if (opts->full_auxtrace && !perf_cpu_map__empty(cpus)) { + if (opts->full_auxtrace && !perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n"); return -EINVAL; } @@ -224,7 +224,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, * In the case of per-cpu mmaps, we need the CPU on the * AUX event. */ - if (!perf_cpu_map__empty(cpus)) + if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) evsel__set_sample_bit(intel_bts_evsel, CPU); } diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index fa0c718b9e72..d199619df3ab 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -369,7 +369,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, ui__warning("Intel Processor Trace: TSC not available\n"); } - per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus); + per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(session->evlist->core.user_requested_cpus); auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; @@ -774,7 +774,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * Per-cpu recording needs sched_switch events to distinguish different * threads. */ - if (have_timing_info && !perf_cpu_map__empty(cpus) && + if (have_timing_info && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) && !record_opts__no_switch_events(opts)) { if (perf_can_record_switch_events()) { bool cpu_wide = !target__none(&opts->target) && @@ -832,7 +832,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * In the case of per-cpu mmaps, we need the CPU on the * AUX event. */ - if (!perf_cpu_map__empty(cpus)) + if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) evsel__set_sample_bit(intel_pt_evsel, CPU); } @@ -858,7 +858,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, tracking_evsel->immediate = true; /* In per-cpu case, always need the time of mmap events etc */ - if (!perf_cpu_map__empty(cpus)) { + if (!perf_cpu_map__has_any_cpu_or_is_empty(cpus)) { evsel__set_sample_bit(tracking_evsel, TIME); /* And the CPU for switch events */ evsel__set_sample_bit(tracking_evsel, CPU); @@ -870,7 +870,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * Warn the user when we do not have enough information to decode i.e. * per-cpu with no sched_switch (except workload-only). */ - if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) && + if (!ptr->have_sched_switch && !perf_cpu_map__has_any_cpu_or_is_empty(cpus) && !target__none(&opts->target) && !intel_pt_evsel->core.attr.exclude_user) ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n"); diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c index a4cf9de7a7b5..f78eea9e2153 100644 --- a/tools/perf/builtin-c2c.c +++ b/tools/perf/builtin-c2c.c @@ -2320,7 +2320,7 @@ static int setup_nodes(struct perf_session *session) nodes[node] = set; /* empty node, skip */ - if (perf_cpu_map__empty(map)) + if (perf_cpu_map__has_any_cpu_or_is_empty(map)) continue; perf_cpu_map__for_each_cpu(cpu, idx, map) { diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 0bfa70791cfc..bda020c0b9d5 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1316,7 +1316,7 @@ static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map) * be the first online CPU in the cache domain else use the * first online CPU of the cache domain as the ID. */ - if (perf_cpu_map__empty(cpu_map)) + if (perf_cpu_map__has_any_cpu_or_is_empty(cpu_map)) id = cpu.cpu; else id = perf_cpu_map__cpu(cpu_map, 0).cpu; @@ -1622,7 +1622,7 @@ static int perf_stat_init_aggr_mode(void) * taking the highest cpu number to be the size of * the aggregation translate cpumap. */ - if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus)) + if (!perf_cpu_map__has_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus)) nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; else nr = 0; @@ -2289,7 +2289,7 @@ int process_stat_config_event(struct perf_session *session, perf_event__read_stat_config(&stat_config, &event->stat_config); - if (perf_cpu_map__empty(st->cpus)) { + if (perf_cpu_map__has_any_cpu_or_is_empty(st->cpus)) { if (st->aggr_mode != AGGR_UNSET) pr_warning("warning: processing task data, aggregation mode not set\n"); } else if (st->aggr_mode != AGGR_UNSET) { diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index f528c4364d23..3684e6009b63 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -174,7 +174,7 @@ void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp, struct evlist *evlist, struct evsel *evsel, int idx) { - bool per_cpu = !perf_cpu_map__empty(evlist->core.user_requested_cpus); + bool per_cpu = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus); mp->mmap_needed = evsel->needs_auxtrace_mmap; @@ -648,7 +648,7 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr, static int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx) { - bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus); + bool per_cpu_mmaps = !perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus); if (per_cpu_mmaps) { struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->core.all_cpus, idx); diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c index 9eb5c6a08999..40290382b2d7 100644 --- a/tools/perf/util/record.c +++ b/tools/perf/util/record.c @@ -237,7 +237,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) evsel = evlist__last(temp_evlist); - if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) { + if (!evlist || perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) { struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); if (cpus) diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index ec3506042217..012c4946b9c4 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -315,7 +315,7 @@ static int check_per_pkg(struct evsel *counter, struct perf_counts_values *vals, if (!counter->per_pkg) return 0; - if (perf_cpu_map__empty(cpus)) + if (perf_cpu_map__has_any_cpu_or_is_empty(cpus)) return 0; if (!mask) { From effe957c6bb70cac12918c0f5fd4cefb35967618 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 28 Nov 2023 22:02:01 -0800 Subject: [PATCH 105/179] libperf cpumap: Replace usage of perf_cpu_map__new(NULL) with perf_cpu_map__new_online_cpus() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Passing NULL to perf_cpu_map__new() performs perf_cpu_map__new_online_cpus(), just directly call perf_cpu_map__new_online_cpus() to be more intention revealing. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexandre Ghiti Cc: Andrew Jones Cc: AndrĂ© Almeida Cc: Athira Jajeev Cc: Atish Patra Cc: Changbin Du Cc: Darren Hart Cc: Davidlohr Bueso Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mike Leach Cc: Namhyung Kim Cc: Nick Desaulniers Cc: Paolo Bonzini Cc: Paran Lee Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Suzuki Poulouse Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Jihong Cc: Yang Li Cc: Yanteng Si Cc: bpf@vger.kernel.org Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20231129060211.1890454-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/Documentation/examples/sampling.c | 2 +- tools/lib/perf/Documentation/libperf-sampling.txt | 2 +- tools/lib/perf/evlist.c | 2 +- tools/lib/perf/tests/test-evlist.c | 4 ++-- tools/lib/perf/tests/test-evsel.c | 2 +- tools/perf/arch/arm/util/cs-etm.c | 6 +++--- tools/perf/arch/arm64/util/header.c | 2 +- tools/perf/bench/epoll-ctl.c | 2 +- tools/perf/bench/epoll-wait.c | 2 +- tools/perf/bench/futex-hash.c | 2 +- tools/perf/bench/futex-lock-pi.c | 2 +- tools/perf/bench/futex-requeue.c | 2 +- tools/perf/bench/futex-wake-parallel.c | 2 +- tools/perf/bench/futex-wake.c | 2 +- tools/perf/builtin-ftrace.c | 2 +- tools/perf/tests/code-reading.c | 2 +- tools/perf/tests/keep-tracking.c | 2 +- tools/perf/tests/mmap-basic.c | 2 +- tools/perf/tests/openat-syscall-all-cpus.c | 2 +- tools/perf/tests/perf-time-to-tsc.c | 2 +- tools/perf/tests/switch-tracking.c | 2 +- tools/perf/tests/topology.c | 2 +- tools/perf/util/bpf_counter.c | 2 +- tools/perf/util/cpumap.c | 2 +- tools/perf/util/cputopo.c | 2 +- tools/perf/util/evlist.c | 2 +- tools/perf/util/perf_api_probe.c | 4 ++-- tools/perf/util/record.c | 2 +- 28 files changed, 32 insertions(+), 32 deletions(-) diff --git a/tools/lib/perf/Documentation/examples/sampling.c b/tools/lib/perf/Documentation/examples/sampling.c index 8e1a926a9cfe..bc142f0664b5 100644 --- a/tools/lib/perf/Documentation/examples/sampling.c +++ b/tools/lib/perf/Documentation/examples/sampling.c @@ -39,7 +39,7 @@ int main(int argc, char **argv) libperf_init(libperf_print); - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); if (!cpus) { fprintf(stderr, "failed to create cpus\n"); return -1; diff --git a/tools/lib/perf/Documentation/libperf-sampling.txt b/tools/lib/perf/Documentation/libperf-sampling.txt index d6ca24f6ef78..2378980fab8a 100644 --- a/tools/lib/perf/Documentation/libperf-sampling.txt +++ b/tools/lib/perf/Documentation/libperf-sampling.txt @@ -97,7 +97,7 @@ In this case we will monitor all the available CPUs: [source,c] -- - 42 cpus = perf_cpu_map__new(NULL); + 42 cpus = perf_cpu_map__new_online_cpus(); 43 if (!cpus) { 44 fprintf(stderr, "failed to create cpus\n"); 45 return -1; diff --git a/tools/lib/perf/evlist.c b/tools/lib/perf/evlist.c index 75f36218fdd9..058e3ff10f9b 100644 --- a/tools/lib/perf/evlist.c +++ b/tools/lib/perf/evlist.c @@ -39,7 +39,7 @@ static void __perf_evlist__propagate_maps(struct perf_evlist *evlist, if (evsel->system_wide) { /* System wide: set the cpu map of the evsel to all online CPUs. */ perf_cpu_map__put(evsel->cpus); - evsel->cpus = perf_cpu_map__new(NULL); + evsel->cpus = perf_cpu_map__new_online_cpus(); } else if (evlist->has_user_cpus && evsel->is_pmu_core) { /* * User requested CPUs on a core PMU, ensure the requested CPUs diff --git a/tools/lib/perf/tests/test-evlist.c b/tools/lib/perf/tests/test-evlist.c index ab63878bacb9..10f70cb41ff1 100644 --- a/tools/lib/perf/tests/test-evlist.c +++ b/tools/lib/perf/tests/test-evlist.c @@ -46,7 +46,7 @@ static int test_stat_cpu(void) }; int err, idx; - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); __T("failed to create cpus", cpus); evlist = perf_evlist__new(); @@ -350,7 +350,7 @@ static int test_mmap_cpus(void) attr.config = id; - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); __T("failed to create cpus", cpus); evlist = perf_evlist__new(); diff --git a/tools/lib/perf/tests/test-evsel.c b/tools/lib/perf/tests/test-evsel.c index a11fc51bfb68..545ec3150546 100644 --- a/tools/lib/perf/tests/test-evsel.c +++ b/tools/lib/perf/tests/test-evsel.c @@ -27,7 +27,7 @@ static int test_stat_cpu(void) }; int err, idx; - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); __T("failed to create cpus", cpus); evsel = perf_evsel__new(&attr); diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c index c6b7b3066324..77e6663c1703 100644 --- a/tools/perf/arch/arm/util/cs-etm.c +++ b/tools/perf/arch/arm/util/cs-etm.c @@ -199,7 +199,7 @@ static int cs_etm_validate_config(struct auxtrace_record *itr, { int i, err = -EINVAL; struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus; - struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); + struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); /* Set option of each CPU we have */ for (i = 0; i < cpu__max_cpu().cpu; i++) { @@ -536,7 +536,7 @@ cs_etm_info_priv_size(struct auxtrace_record *itr __maybe_unused, int i; int etmv3 = 0, etmv4 = 0, ete = 0; struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus; - struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); + struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); /* cpu map is not empty, we have specific CPUs to work with */ if (!perf_cpu_map__has_any_cpu_or_is_empty(event_cpus)) { @@ -802,7 +802,7 @@ static int cs_etm_info_fill(struct auxtrace_record *itr, u64 nr_cpu, type; struct perf_cpu_map *cpu_map; struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus; - struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL); + struct perf_cpu_map *online_cpus = perf_cpu_map__new_online_cpus(); struct cs_etm_recording *ptr = container_of(itr, struct cs_etm_recording, itr); struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu; diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c index a2eef9ec5491..97037499152e 100644 --- a/tools/perf/arch/arm64/util/header.c +++ b/tools/perf/arch/arm64/util/header.c @@ -57,7 +57,7 @@ static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus) int get_cpuid(char *buf, size_t sz) { - struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); + struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus(); int ret; if (!cpus) diff --git a/tools/perf/bench/epoll-ctl.c b/tools/perf/bench/epoll-ctl.c index 6bfffe83dde9..d3db73dac66a 100644 --- a/tools/perf/bench/epoll-ctl.c +++ b/tools/perf/bench/epoll-ctl.c @@ -330,7 +330,7 @@ int bench_epoll_ctl(int argc, const char **argv) act.sa_sigaction = toggle_done; sigaction(SIGINT, &act, NULL); - cpu = perf_cpu_map__new(NULL); + cpu = perf_cpu_map__new_online_cpus(); if (!cpu) goto errmem; diff --git a/tools/perf/bench/epoll-wait.c b/tools/perf/bench/epoll-wait.c index cb5174b53940..06bb3187660a 100644 --- a/tools/perf/bench/epoll-wait.c +++ b/tools/perf/bench/epoll-wait.c @@ -444,7 +444,7 @@ int bench_epoll_wait(int argc, const char **argv) act.sa_sigaction = toggle_done; sigaction(SIGINT, &act, NULL); - cpu = perf_cpu_map__new(NULL); + cpu = perf_cpu_map__new_online_cpus(); if (!cpu) goto errmem; diff --git a/tools/perf/bench/futex-hash.c b/tools/perf/bench/futex-hash.c index 2005a3fa3026..0c69d20efa32 100644 --- a/tools/perf/bench/futex-hash.c +++ b/tools/perf/bench/futex-hash.c @@ -138,7 +138,7 @@ int bench_futex_hash(int argc, const char **argv) exit(EXIT_FAILURE); } - cpu = perf_cpu_map__new(NULL); + cpu = perf_cpu_map__new_online_cpus(); if (!cpu) goto errmem; diff --git a/tools/perf/bench/futex-lock-pi.c b/tools/perf/bench/futex-lock-pi.c index 092cbd52db82..7a4973346180 100644 --- a/tools/perf/bench/futex-lock-pi.c +++ b/tools/perf/bench/futex-lock-pi.c @@ -172,7 +172,7 @@ int bench_futex_lock_pi(int argc, const char **argv) if (argc) goto err; - cpu = perf_cpu_map__new(NULL); + cpu = perf_cpu_map__new_online_cpus(); if (!cpu) err(EXIT_FAILURE, "calloc"); diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c index c0035990a33c..d9ad736c1a3e 100644 --- a/tools/perf/bench/futex-requeue.c +++ b/tools/perf/bench/futex-requeue.c @@ -174,7 +174,7 @@ int bench_futex_requeue(int argc, const char **argv) if (argc) goto err; - cpu = perf_cpu_map__new(NULL); + cpu = perf_cpu_map__new_online_cpus(); if (!cpu) err(EXIT_FAILURE, "cpu_map__new"); diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c index 5ab0234d74e6..b66df553e561 100644 --- a/tools/perf/bench/futex-wake-parallel.c +++ b/tools/perf/bench/futex-wake-parallel.c @@ -264,7 +264,7 @@ int bench_futex_wake_parallel(int argc, const char **argv) err(EXIT_FAILURE, "mlockall"); } - cpu = perf_cpu_map__new(NULL); + cpu = perf_cpu_map__new_online_cpus(); if (!cpu) err(EXIT_FAILURE, "calloc"); diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c index 18a5894af8bb..690fd6d3da13 100644 --- a/tools/perf/bench/futex-wake.c +++ b/tools/perf/bench/futex-wake.c @@ -149,7 +149,7 @@ int bench_futex_wake(int argc, const char **argv) exit(EXIT_FAILURE); } - cpu = perf_cpu_map__new(NULL); + cpu = perf_cpu_map__new_online_cpus(); if (!cpu) err(EXIT_FAILURE, "calloc"); diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c index ac2e6c75f912..eb30c8eca488 100644 --- a/tools/perf/builtin-ftrace.c +++ b/tools/perf/builtin-ftrace.c @@ -333,7 +333,7 @@ static int set_tracing_func_irqinfo(struct perf_ftrace *ftrace) static int reset_tracing_cpu(void) { - struct perf_cpu_map *cpumap = perf_cpu_map__new(NULL); + struct perf_cpu_map *cpumap = perf_cpu_map__new_online_cpus(); int ret; ret = set_tracing_cpumask(cpumap); diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c index 8620146d0378..7a3a7bbbec71 100644 --- a/tools/perf/tests/code-reading.c +++ b/tools/perf/tests/code-reading.c @@ -610,7 +610,7 @@ static int do_test_code_reading(bool try_kcore) goto out_put; } - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); if (!cpus) { pr_debug("perf_cpu_map__new failed\n"); goto out_put; diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c index 8f4f9b632e1e..5a3b2bed07f3 100644 --- a/tools/perf/tests/keep-tracking.c +++ b/tools/perf/tests/keep-tracking.c @@ -81,7 +81,7 @@ static int test__keep_tracking(struct test_suite *test __maybe_unused, int subte threads = thread_map__new(-1, getpid(), UINT_MAX); CHECK_NOT_NULL__(threads); - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); CHECK_NOT_NULL__(cpus); evlist = evlist__new(); diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index 886a13a77a16..012c8ae439fd 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c @@ -52,7 +52,7 @@ static int test__basic_mmap(struct test_suite *test __maybe_unused, int subtest return -1; } - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); if (cpus == NULL) { pr_debug("perf_cpu_map__new\n"); goto out_free_threads; diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c index f3275be83a33..fb114118c876 100644 --- a/tools/perf/tests/openat-syscall-all-cpus.c +++ b/tools/perf/tests/openat-syscall-all-cpus.c @@ -37,7 +37,7 @@ static int test__openat_syscall_event_on_all_cpus(struct test_suite *test __mayb return -1; } - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); if (cpus == NULL) { pr_debug("perf_cpu_map__new\n"); goto out_thread_map_delete; diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c index efcd71c2738a..bbe2ddeb9b74 100644 --- a/tools/perf/tests/perf-time-to-tsc.c +++ b/tools/perf/tests/perf-time-to-tsc.c @@ -93,7 +93,7 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su threads = thread_map__new(-1, getpid(), UINT_MAX); CHECK_NOT_NULL__(threads); - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); CHECK_NOT_NULL__(cpus); evlist = evlist__new(); diff --git a/tools/perf/tests/switch-tracking.c b/tools/perf/tests/switch-tracking.c index e52b031bedc5..5cab17a1942e 100644 --- a/tools/perf/tests/switch-tracking.c +++ b/tools/perf/tests/switch-tracking.c @@ -351,7 +351,7 @@ static int test__switch_tracking(struct test_suite *test __maybe_unused, int sub goto out_err; } - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); if (!cpus) { pr_debug("perf_cpu_map__new failed!\n"); goto out_err; diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c index 9dee63734e66..2a842f53fbb5 100644 --- a/tools/perf/tests/topology.c +++ b/tools/perf/tests/topology.c @@ -215,7 +215,7 @@ static int test__session_topology(struct test_suite *test __maybe_unused, int su if (session_write_header(path)) goto free_path; - map = perf_cpu_map__new(NULL); + map = perf_cpu_map__new_online_cpus(); if (map == NULL) { pr_debug("failed to get system cpumap\n"); goto free_path; diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c index 7f9b0e46e008..7a8af60e0f51 100644 --- a/tools/perf/util/bpf_counter.c +++ b/tools/perf/util/bpf_counter.c @@ -455,7 +455,7 @@ static int bperf__load(struct evsel *evsel, struct target *target) return -1; if (!all_cpu_map) { - all_cpu_map = perf_cpu_map__new(NULL); + all_cpu_map = perf_cpu_map__new_online_cpus(); if (!all_cpu_map) return -1; } diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 0e090e8bc334..0581ee0fa5f2 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c @@ -672,7 +672,7 @@ struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */ static struct perf_cpu_map *online; if (!online) - online = perf_cpu_map__new(NULL); /* from /sys/devices/system/cpu/online */ + online = perf_cpu_map__new_online_cpus(); /* from /sys/devices/system/cpu/online */ return online; } diff --git a/tools/perf/util/cputopo.c b/tools/perf/util/cputopo.c index 81cfc85f4668..8bbeb2dc76fd 100644 --- a/tools/perf/util/cputopo.c +++ b/tools/perf/util/cputopo.c @@ -267,7 +267,7 @@ struct cpu_topology *cpu_topology__new(void) ncpus = cpu__max_present_cpu().cpu; /* build online CPU map */ - map = perf_cpu_map__new(NULL); + map = perf_cpu_map__new_online_cpus(); if (map == NULL) { pr_debug("failed to get system cpumap\n"); return NULL; diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index ff7f85ded89d..0ed3ce2aa8eb 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1352,7 +1352,7 @@ static int evlist__create_syswide_maps(struct evlist *evlist) * error, and we may not want to do that fallback to a * default cpu identity map :-\ */ - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); if (!cpus) goto out; diff --git a/tools/perf/util/perf_api_probe.c b/tools/perf/util/perf_api_probe.c index e1e2d701599c..1de3b69cdf4a 100644 --- a/tools/perf/util/perf_api_probe.c +++ b/tools/perf/util/perf_api_probe.c @@ -64,7 +64,7 @@ static bool perf_probe_api(setup_probe_fn_t fn) struct perf_cpu cpu; int ret, i = 0; - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); if (!cpus) return false; cpu = perf_cpu_map__cpu(cpus, 0); @@ -140,7 +140,7 @@ bool perf_can_record_cpu_wide(void) struct perf_cpu cpu; int fd; - cpus = perf_cpu_map__new(NULL); + cpus = perf_cpu_map__new_online_cpus(); if (!cpus) return false; diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c index 40290382b2d7..87e817b3cf7e 100644 --- a/tools/perf/util/record.c +++ b/tools/perf/util/record.c @@ -238,7 +238,7 @@ bool evlist__can_select_event(struct evlist *evlist, const char *str) evsel = evlist__last(temp_evlist); if (!evlist || perf_cpu_map__has_any_cpu_or_is_empty(evlist->core.user_requested_cpus)) { - struct perf_cpu_map *cpus = perf_cpu_map__new(NULL); + struct perf_cpu_map *cpus = perf_cpu_map__new_online_cpus(); if (cpus) cpu = perf_cpu_map__cpu(cpus, 0); From 5805c82513c444333efb086017be8d666336858a Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 28 Nov 2023 22:02:02 -0800 Subject: [PATCH 106/179] libperf cpumap: Add for_each_cpu() that skips the "any CPU" case MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When iterating CPUs in a CPU map it is often desirable to skip the "any CPU" (aka dummy) case. Add a helper for this and use in builtin-record. Reviewed-by: James Clark Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Alexandre Ghiti Cc: Andrew Jones Cc: AndrĂ© Almeida Cc: Athira Jajeev Cc: Atish Patra Cc: Changbin Du Cc: Darren Hart Cc: Davidlohr Bueso Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mike Leach Cc: Namhyung Kim Cc: Nick Desaulniers Cc: Paolo Bonzini Cc: Paran Lee Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Suzuki Poulouse Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Jihong Cc: Yang Li Cc: Yanteng Si Cc: bpf@vger.kernel.org Cc: coresight@lists.linaro.org Cc: linux-arm-kernel@lists.infradead.org Link: https://lore.kernel.org/r/20231129060211.1890454-6-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/include/perf/cpumap.h | 6 ++++++ tools/perf/builtin-record.c | 4 +--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h index 9cf361fc5edc..dbe0a7352b64 100644 --- a/tools/lib/perf/include/perf/cpumap.h +++ b/tools/lib/perf/include/perf/cpumap.h @@ -64,6 +64,12 @@ LIBPERF_API bool perf_cpu_map__has_any_cpu(const struct perf_cpu_map *map); (idx) < perf_cpu_map__nr(cpus); \ (idx)++, (cpu) = perf_cpu_map__cpu(cpus, idx)) +#define perf_cpu_map__for_each_cpu_skip_any(_cpu, idx, cpus) \ + for ((idx) = 0, (_cpu) = perf_cpu_map__cpu(cpus, idx); \ + (idx) < perf_cpu_map__nr(cpus); \ + (idx)++, (_cpu) = perf_cpu_map__cpu(cpus, idx)) \ + if ((_cpu).cpu != -1) + #define perf_cpu_map__for_each_idx(idx, cpus) \ for ((idx) = 0; (idx) < perf_cpu_map__nr(cpus); (idx)++) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index eb5a398ddb1d..8a1002683fda 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -3601,9 +3601,7 @@ static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cp if (cpu_map__is_dummy(cpus)) return 0; - perf_cpu_map__for_each_cpu(cpu, idx, cpus) { - if (cpu.cpu == -1) - continue; + perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpus) { /* Return ENODEV is input cpu is greater than max cpu */ if ((unsigned long)cpu.cpu > mask->nbits) return -ENODEV; From 813900d19b923fc1b241c1ce292472f68066092b Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Thu, 7 Dec 2023 16:16:34 +0800 Subject: [PATCH 107/179] perf header: Fix one memory leakage in perf_event__fprintf_event_update() When dump the raw trace by `perf report -D` ASan reports a memory leakage in perf_event__fprintf_event_update(). It shows that we allocated a temporary cpumap for dumping the CPUs but doesn't release it and it's not used elsewhere. Fix this by free the cpumap after the dumping. Fixes: c853f9394b7bc189 ("perf tools: Add perf_event__fprintf_event_update function") Reviewed-by: Ian Rogers Signed-off-by: Yicong Yang Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Jonathan Cameron Cc: Junhao He Cc: Mark Rutland Cc: Peter Zijlstra Cc: linuxarm@huawei.com Link: https://lore.kernel.org/r/20231207081635.8427-2-yangyicong@huawei.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/header.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 08cc2febabde..a9f71f8343f0 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -4391,9 +4391,10 @@ size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) ret += fprintf(fp, "... "); map = cpu_map__new_data(&ev->cpus.cpus); - if (map) + if (map) { ret += cpu_map__fprintf(map, fp); - else + perf_cpu_map__put(map); + } else ret += fprintf(fp, "failed to get cpus\n"); break; default: From 1bc479d665bc25a9a4e8168d5b400a47491511f9 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Thu, 7 Dec 2023 16:16:35 +0800 Subject: [PATCH 108/179] perf hisi-ptt: Fix one memory leakage in hisi_ptt_process_auxtrace_event() ASan complains a memory leakage in hisi_ptt_process_auxtrace_event() that the data buffer is not freed. Since currently we only support the raw dump trace mode, the data buffer is used only within this function. So fix this by freeing the data buffer before going out. Fixes: 5e91e57e68090c0e ("perf auxtrace arm64: Add support for parsing HiSilicon PCIe Trace packet") Reviewed-by: Ian Rogers Signed-off-by: Yicong Yang Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Jonathan Cameron Cc: Junhao He Cc: Mark Rutland Cc: Peter Zijlstra Cc: Qi Liu Link: https://lore.kernel.org/r/20231207081635.8427-3-yangyicong@huawei.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/hisi-ptt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/util/hisi-ptt.c b/tools/perf/util/hisi-ptt.c index 43bd1ca62d58..52d0ce302ca0 100644 --- a/tools/perf/util/hisi-ptt.c +++ b/tools/perf/util/hisi-ptt.c @@ -123,6 +123,7 @@ static int hisi_ptt_process_auxtrace_event(struct perf_session *session, if (dump_trace) hisi_ptt_dump_event(ptt, data, size); + free(data); return 0; } From 6f33e6fa29d0366d6e5b3ea2930dbc0b648151fe Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 13 Dec 2023 22:02:56 -0800 Subject: [PATCH 109/179] perf stat: Combine the -A/--no-aggr and --no-merge options The -A or --no-aggr option disables aggregation of core events: $ perf stat -A -e cycles,data_total -a true Performance counter stats for 'system wide': CPU0 1,287,665 cycles CPU1 1,831,681 cycles CPU2 27,345,998 cycles CPU3 1,964,799 cycles CPU4 236,174 cycles CPU5 3,302,825 cycles CPU6 9,201,446 cycles CPU7 1,403,043 cycles CPU0 110.90 MiB data_total 0.008961761 seconds time elapsed The --no-merge option disables the aggregation of uncore events: $ perf stat --no-merge -e cycles,data_total -a true Performance counter stats for 'system wide': 38,482,778 cycles 15.04 MiB data_total [uncore_imc_free_running_1] 15.00 MiB data_total [uncore_imc_free_running_0] 0.005915155 seconds time elapsed Having two options confuses users who generally don't appreciate the difference in PMUs. Keep all the options but make it so they all disable aggregation both of core and uncore events: $ perf stat -A -e cycles,data_total -a true Performance counter stats for 'system wide': CPU0 85,878 cycles CPU1 88,179 cycles CPU2 60,872 cycles CPU3 3,265,567 cycles CPU4 82,357 cycles CPU5 83,383 cycles CPU6 84,156 cycles CPU7 220,803 cycles CPU0 2.38 MiB data_total [uncore_imc_free_running_0] CPU0 2.38 MiB data_total [uncore_imc_free_running_1] 0.001397205 seconds time elapsed Update the relevant 'perf stat' man page information. Reviewed-by: Kan Liang Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Athira Jajeev Cc: Changbin Du Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: John Garry Cc: K Prateek Nayak Cc: Kaige Ye Cc: Mark Rutland Cc: Namhyung Kim Cc: Nick Desaulniers Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231214060256.2094017-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-stat.txt | 52 ++++++++++++++------------ tools/perf/builtin-stat.c | 5 ++- tools/perf/util/stat-display.c | 2 +- tools/perf/util/stat.c | 2 +- tools/perf/util/stat.h | 1 - 5 files changed, 33 insertions(+), 29 deletions(-) diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 8f789fa1242e..5af2e432b54f 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -422,7 +422,34 @@ See perf list output for the possible metrics and metricgroups. -A:: --no-aggr:: -Do not aggregate counts across all monitored CPUs. +--no-merge:: +Do not aggregate/merge counts across monitored CPUs or PMUs. + +When multiple events are created from a single event specification, +stat will, by default, aggregate the event counts and show the result +in a single row. This option disables that behavior and shows the +individual events and counts. + +Multiple events are created from a single event specification when: + +1. PID monitoring isn't requested and the system has more than one + CPU. For example, a system with 8 SMT threads will have one event + opened on each thread and aggregation is performed across them. + +2. Prefix or glob wildcard matching is used for the PMU name. For + example, multiple memory controller PMUs may exist typically with a + suffix of _0, _1, etc. By default the event counts will all be + combined if the PMU is specified without the suffix such as + uncore_imc rather than uncore_imc_0. + +3. Aliases, which are listed immediately after the Kernel PMU events + by perf list, are used. + +--hybrid-merge:: +Merge core event counts from all core PMUs. In hybrid or big.LITTLE +systems by default each core PMU will report its count +separately. This option forces core PMU counts to be combined to give +a behavior closer to having a single CPU type in the system. --topdown:: Print top-down metrics supported by the CPU. This allows to determine @@ -475,29 +502,6 @@ highlight 'tma_frontend_bound'. This metric may be drilled into with Error out if the input is higher than the supported max level. ---no-merge:: -Do not merge results from same PMUs. - -When multiple events are created from a single event specification, -stat will, by default, aggregate the event counts and show the result -in a single row. This option disables that behavior and shows -the individual events and counts. - -Multiple events are created from a single event specification when: -1. Prefix or glob matching is used for the PMU name. -2. Aliases, which are listed immediately after the Kernel PMU events - by perf list, are used. - ---hybrid-merge:: -Merge the hybrid event counts from all PMUs. - -For hybrid events, by default, the stat aggregates and reports the event -counts per PMU. But sometimes, it's also useful to aggregate event counts -from all PMUs. This option enables that behavior and reports the counts -without PMUs. - -For non-hybrid events, it should be no effect. - --smi-cost:: Measure SMI cost if msr/aperf/ and msr/smi/ events are supported. diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index bda020c0b9d5..5fe9abc6a524 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1204,8 +1204,9 @@ static struct option stat_options[] = { OPT_STRING('C', "cpu", &target.cpu_list, "cpu", "list of cpus to monitor in system-wide"), OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, - "disable CPU count aggregation", AGGR_NONE), - OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), + "disable aggregation across CPUs or PMUs", AGGR_NONE), + OPT_SET_UINT(0, "no-merge", &stat_config.aggr_mode, + "disable aggregation the same as -A or -no-aggr", AGGR_NONE), OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge, "Merge identical named hybrid events"), OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index afe6db8e7bf4..8c61f8627ebc 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -898,7 +898,7 @@ static bool hybrid_uniquify(struct evsel *evsel, struct perf_stat_config *config static void uniquify_counter(struct perf_stat_config *config, struct evsel *counter) { - if (config->no_merge || hybrid_uniquify(counter, config)) + if (config->aggr_mode == AGGR_NONE || hybrid_uniquify(counter, config)) uniquify_event_name(counter); } diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c index 012c4946b9c4..b0bcf92f0f9c 100644 --- a/tools/perf/util/stat.c +++ b/tools/perf/util/stat.c @@ -592,7 +592,7 @@ void perf_stat_merge_counters(struct perf_stat_config *config, struct evlist *ev { struct evsel *evsel; - if (config->no_merge) + if (config->aggr_mode == AGGR_NONE) return; evlist__for_each_entry(evlist, evsel) diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index 325d0fad1842..4357ba114822 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -76,7 +76,6 @@ struct perf_stat_config { bool null_run; bool ru_display; bool big_num; - bool no_merge; bool hybrid_merge; bool walltime_run_table; bool all_kernel; From 1af478903fc48c1409a8dd6b698383b62387adf1 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 11 Dec 2023 23:05:44 -0800 Subject: [PATCH 110/179] perf genelf: Set ELF program header addresses properly The text section starts after the ELF headers so PHDR.p_vaddr and others should have the correct addresses. Fixes: babd04386b1df8c3 ("perf jit: Include program header in ELF files") Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Fangrui Song Cc: Ingo Molnar Cc: Jiri Olsa Cc: Lieven Hey Cc: Milian Wolff Cc: Pablo Galindo Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231212070547.612536-2-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/genelf.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c index fefc72066c4e..ac17a3cb59dc 100644 --- a/tools/perf/util/genelf.c +++ b/tools/perf/util/genelf.c @@ -293,9 +293,9 @@ jit_write_elf(int fd, uint64_t load_addr, const char *sym, */ phdr = elf_newphdr(e, 1); phdr[0].p_type = PT_LOAD; - phdr[0].p_offset = 0; - phdr[0].p_vaddr = 0; - phdr[0].p_paddr = 0; + phdr[0].p_offset = GEN_ELF_TEXT_OFFSET; + phdr[0].p_vaddr = GEN_ELF_TEXT_OFFSET; + phdr[0].p_paddr = GEN_ELF_TEXT_OFFSET; phdr[0].p_filesz = csize; phdr[0].p_memsz = csize; phdr[0].p_flags = PF_X | PF_R; From c966d23a351a33f8a977fd7efbb6f467132f7383 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 11 Dec 2023 23:05:45 -0800 Subject: [PATCH 111/179] perf unwind-libdw: Handle JIT-generated DSOs properly Usually DSOs are mapped from the beginning of the file, so the base address of the DSO can be calculated by map->start - map->pgoff. However, JIT DSOs which are generated by `perf inject -j`, are mapped only the code segment. This makes unwind-libdw code confusing and rejects processing unwinds in the JIT DSOs. It should use the map start address as base for them to fix the confusion. Fixes: 1fe627da30331024 ("perf unwind: Take pgoff into account when reporting elf to libdwfl") Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Fangrui Song Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Milian Wolff Cc: Pablo Galindo Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231212070547.612536-3-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/unwind-libdw.c | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index 8554db3fc0d7..6013335a8dae 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c @@ -46,6 +46,7 @@ static int __report_module(struct addr_location *al, u64 ip, { Dwfl_Module *mod; struct dso *dso = NULL; + Dwarf_Addr base; /* * Some callers will use al->sym, so we can't just use the * cheaper thread__find_map() here. @@ -58,13 +59,25 @@ static int __report_module(struct addr_location *al, u64 ip, if (!dso) return 0; + /* + * The generated JIT DSO files only map the code segment without + * ELF headers. Since JIT codes used to be packed in a memory + * segment, calculating the base address using pgoff falls into + * a different code in another DSO. So just use the map->start + * directly to pick the correct one. + */ + if (!strncmp(dso->long_name, "/tmp/jitted-", 12)) + base = map__start(al->map); + else + base = map__start(al->map) - map__pgoff(al->map); + mod = dwfl_addrmodule(ui->dwfl, ip); if (mod) { Dwarf_Addr s; dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL); - if (s != map__start(al->map) - map__pgoff(al->map)) - mod = 0; + if (s != base) + mod = NULL; } if (!mod) { @@ -72,14 +85,14 @@ static int __report_module(struct addr_location *al, u64 ip, __symbol__join_symfs(filename, sizeof(filename), dso->long_name); mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1, - map__start(al->map) - map__pgoff(al->map), false); + base, false); } if (!mod) { char filename[PATH_MAX]; if (dso__build_id_filename(dso, filename, sizeof(filename), false)) mod = dwfl_report_elf(ui->dwfl, dso->short_name, filename, -1, - map__start(al->map) - map__pgoff(al->map), false); + base, false); } if (mod) { From 4fb54994b2360ab5029ee3a959161f6fe6bbb349 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Mon, 11 Dec 2023 23:05:46 -0800 Subject: [PATCH 112/179] perf unwind-libunwind: Fix base address for .eh_frame The base address of a DSO mapping should start at the start of the file. Usually DSOs are mapped from the pgoff 0 so it doesn't matter when it uses the start of the map address. But generated DSOs for JIT codes doesn't start from the 0 so it should subtract the offset to calculate the .eh_frame table offsets correctly. Fixes: dc2cf4ca866f5715 ("perf unwind: Fix segbase for ld.lld linked objects") Reviewed-by: Ian Rogers Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Fangrui Song Cc: Ingo Molnar Cc: Jiri Olsa Cc: Milian Wolff Cc: Pablo Galindo Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231212070547.612536-4-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/unwind-libunwind-local.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index c0641882fd2f..5e5c3395a499 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c @@ -327,7 +327,7 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui, maps__for_each_entry(thread__maps(ui->thread), map_node) { struct map *map = map_node->map; - u64 start = map__start(map); + u64 start = map__start(map) - map__pgoff(map); if (map__dso(map) == dso && start < base_addr) base_addr = start; From 5fa695e7da4975e8d21ce49f3718d6cf00ecb75e Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 14 Dec 2023 06:46:11 -0800 Subject: [PATCH 113/179] perf top: Use evsel's cpus to replace user_requested_cpus perf top errors out on a hybrid machine $perf top Error: The cycles:P event is not supported. The perf top expects that the "cycles" is collected on all CPUs in the system. But for hybrid there is no single "cycles" event which can cover all CPUs. Perf has to split it into two cycles events, e.g., cpu_core/cycles/ and cpu_atom/cycles/. Each event has its own CPU mask. If a event is opened on the unsupported CPU. The open fails. That's the reason of the above error out. Perf should only open the cycles event on the corresponding CPU. The commit ef91871c960e ("perf evlist: Propagate user CPU maps intersecting core PMU maps") intersect the requested CPU map with the CPU map of the PMU. Use the evsel's cpus to replace user_requested_cpus. The evlist's threads are also propagated to the evsel's threads in __perf_evlist__propagate_maps(). For a system-wide event, perf appends a dummy event and assign it to the evsel's threads. For a per-thread event, the evlist's thread_map is assigned to the evsel's threads. The same as the other tools, e.g., perf record, using the evsel's threads when opening an event. Reported-by: Arnaldo Carvalho de Melo Reviewed-by: Ian Rogers Signed-off-by: Kan Liang Tested-by: Arnaldo Carvalho de Melo Cc: Hector Martin Cc: Marc Zyngier Cc: Mark Rutland Cc: Namhyung Kim Closes: https://lore.kernel.org/linux-perf-users/ZXNnDrGKXbEELMXV@kernel.org/ Link: https://lore.kernel.org/r/20231214144612.1092028-1-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-top.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index ed83afeeced0..13e609c0c693 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1026,8 +1026,8 @@ static int perf_top__start_counters(struct perf_top *top) evlist__for_each_entry(evlist, counter) { try_again: - if (evsel__open(counter, top->evlist->core.user_requested_cpus, - top->evlist->core.threads) < 0) { + if (evsel__open(counter, counter->core.cpus, + counter->core.threads) < 0) { /* * Specially handle overwrite fall back. From a61f89bf76ef6f87ec48dd90dbc73a6cf9952edc Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 14 Dec 2023 06:46:12 -0800 Subject: [PATCH 114/179] perf top: Uniform the event name for the hybrid machine It's hard to distinguish the default cycles events among hybrid PMUs. For example, $ perf top Available samples 385 cycles:P 903 cycles:P The other tool, e.g., perf record, uniforms the event name and adds the hybrid PMU name before opening the event. So the events can be easily distinguished. Apply the same methodology for the perf top as well. The evlist__uniquify_name() will be invoked by both record and top. Move it to util/evlist.c With the patch: $ perf top Available samples 148 cpu_atom/cycles:P/ 1K cpu_core/cycles:P/ Reviewed-by: Ian Rogers Signed-off-by: Kan Liang Tested-by: Arnaldo Carvalho de Melo Cc: Hector Martin Cc: Marc Zyngier Cc: Mark Rutland Cc: Namhyung Kim Link: https://lore.kernel.org/r/20231214144612.1092028-2-kan.liang@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 28 +--------------------------- tools/perf/builtin-top.c | 1 + tools/perf/util/evlist.c | 25 +++++++++++++++++++++++++ tools/perf/util/evlist.h | 1 + 4 files changed, 28 insertions(+), 27 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 8a1002683fda..a89013c44fd5 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -2237,32 +2237,6 @@ static void hit_auxtrace_snapshot_trigger(struct record *rec) } } -static void record__uniquify_name(struct record *rec) -{ - struct evsel *pos; - struct evlist *evlist = rec->evlist; - char *new_name; - int ret; - - if (perf_pmus__num_core_pmus() == 1) - return; - - evlist__for_each_entry(evlist, pos) { - if (!evsel__is_hybrid(pos)) - continue; - - if (strchr(pos->name, '/')) - continue; - - ret = asprintf(&new_name, "%s/%s/", - pos->pmu_name, pos->name); - if (ret) { - free(pos->name); - pos->name = new_name; - } - } -} - static int record__terminate_thread(struct record_thread *thread_data) { int err; @@ -2496,7 +2470,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) if (data->is_pipe && rec->evlist->core.nr_entries == 1) rec->opts.sample_id = true; - record__uniquify_name(rec); + evlist__uniquify_name(rec->evlist); /* Debug message used by test scripts */ pr_debug3("perf record opening and mmapping events\n"); diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 13e609c0c693..baf1ab083436 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -1298,6 +1298,7 @@ static int __cmd_top(struct perf_top *top) } } + evlist__uniquify_name(top->evlist); ret = perf_top__start_counters(top); if (ret) return ret; diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 0ed3ce2aa8eb..6f0892803c22 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -2518,3 +2518,28 @@ void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_lis } perf_cpu_map__put(user_requested_cpus); } + +void evlist__uniquify_name(struct evlist *evlist) +{ + struct evsel *pos; + char *new_name; + int ret; + + if (perf_pmus__num_core_pmus() == 1) + return; + + evlist__for_each_entry(evlist, pos) { + if (!evsel__is_hybrid(pos)) + continue; + + if (strchr(pos->name, '/')) + continue; + + ret = asprintf(&new_name, "%s/%s/", + pos->pmu_name, pos->name); + if (ret) { + free(pos->name); + pos->name = new_name; + } + } +} diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 98e7ddb2bd30..cb91dc9117a2 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -442,5 +442,6 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx); int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf); void evlist__check_mem_load_aux(struct evlist *evlist); void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_list); +void evlist__uniquify_name(struct evlist *evlist); #endif /* __PERF_EVLIST_H */ From 0b4b785d1f2557678c493dc1b431ca4ad16fde9b Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 15 Dec 2023 15:23:30 -0300 Subject: [PATCH 115/179] perf evlist: Move event attributes to after the / when uniquefying using the PMU name When turning an event with attributes to the format including the PMU we need to move the "event:attributes" format to "event/attributes/" so that we can copy the event displayed and use it in the command line, i.e. in 'perf top' we had: 1K cpu_atom/cycles:P/ 11K cpu_core/cycles:P/ If I try to use that on the command line: # perf top -e cpu_atom/cycles:P/ event syntax error: 'cpu_atom/cycles:P/' \___ Bad event or PMU Unable to find PMU or event on a PMU of 'cpu_atom' Initial error: event syntax error: 'cpu_atom/cycles:P/' \___ unknown term 'cycles:P' for pmu 'cpu_atom' valid terms: event,pc,edge,offcore_rsp,ldlat,inv,umask,cmask,config,config1,config2,config3,name,period,freq,branch_type,time,call-graph,stack-size,no-inherit,inherit,max-stack,nr,no-overwrite,overwrite ,driver-config,percore,aux-output,aux-sample-size,metric-id,raw,legacy-cache,hardware Run 'perf list' for a list of valid events Usage: perf top [] -e, --event event selector. use 'perf list' to list available events # Tested-by: Kan Liang Cc: Hector Martin Cc: Ian Rogers Cc: Marc Zyngier Cc: Mark Rutland Cc: Namhyung Kim Link: https://lore.kernel.org/lkml/ZXxyanyZgWBTOnoK@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evlist.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 6f0892803c22..95f25e9fb994 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -2521,9 +2521,8 @@ void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_lis void evlist__uniquify_name(struct evlist *evlist) { + char *new_name, empty_attributes[2] = ":", *attributes; struct evsel *pos; - char *new_name; - int ret; if (perf_pmus__num_core_pmus() == 1) return; @@ -2535,11 +2534,17 @@ void evlist__uniquify_name(struct evlist *evlist) if (strchr(pos->name, '/')) continue; - ret = asprintf(&new_name, "%s/%s/", - pos->pmu_name, pos->name); - if (ret) { + attributes = strchr(pos->name, ':'); + if (attributes) + *attributes = '\0'; + else + attributes = empty_attributes; + + if (asprintf(&new_name, "%s/%s/%s", pos->pmu_name, pos->name, attributes + 1)) { free(pos->name); pos->name = new_name; + } else { + *attributes = ':'; } } } From 9a07a71ed3d23b56e1f05ea808ec5f59448fcc16 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 28 Nov 2023 11:46:24 -0800 Subject: [PATCH 116/179] perf tests: Make DSO tests a suite rather than individual Make the DSO data tests a suite rather than individual so their output is grouped. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: Kan Liang Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Yang Jihong Link: https://lore.kernel.org/r/20231128194624.1419260-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/builtin-test.c | 2 -- tools/perf/tests/dso-data.c | 15 ++++++++++++--- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index b8c21e81a021..4a5973f9bb9b 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -62,8 +62,6 @@ static struct test_suite *generic_tests[] = { &suite__pmu, &suite__pmu_events, &suite__dso_data, - &suite__dso_data_cache, - &suite__dso_data_reopen, &suite__perf_evsel__roundtrip_name_test, #ifdef HAVE_LIBTRACEEVENT &suite__perf_evsel__tp_sched_test, diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c index 3419a4ab5590..2d67422c1222 100644 --- a/tools/perf/tests/dso-data.c +++ b/tools/perf/tests/dso-data.c @@ -394,6 +394,15 @@ static int test__dso_data_reopen(struct test_suite *test __maybe_unused, int sub return 0; } -DEFINE_SUITE("DSO data read", dso_data); -DEFINE_SUITE("DSO data cache", dso_data_cache); -DEFINE_SUITE("DSO data reopen", dso_data_reopen); + +static struct test_case tests__dso_data[] = { + TEST_CASE("read", dso_data), + TEST_CASE("cache", dso_data_cache), + TEST_CASE("reopen", dso_data_reopen), + { .name = NULL, } +}; + +struct test_suite suite__dso_data = { + .desc = "DSO data tests", + .test_cases = tests__dso_data, +}; From 3e0594f9f0f774918d63701dc7de634bb1bc7b1e Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 28 Nov 2023 22:02:07 -0800 Subject: [PATCH 117/179] perf top: Avoid repeated function calls to perf_cpu_map__nr(). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a local variable to avoid repeated calls to perf_cpu_map__nr(). Reviewed-by: James Clark Signed-off-by: Adrian Hunter Signed-off-by: Ian Rogers Acked-by: Adrian Hunter Acked-by: Namhyung Kim Cc: Alexander Shishkin Cc: Alexandre Ghiti Cc: Andrew Jones Cc: AndrĂ© Almeida Cc: Athira Rajeev Cc: Atish Patra Cc: Changbin Du Cc: Darren Hart Cc: Davidlohr Bueso Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mike Leach Cc: Nick Desaulniers Cc: Paolo Bonzini Cc: Paran Lee Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Suzuki Poulouse Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Jihong Cc: Yang Li Cc: Yanteng Si Link: https://lore.kernel.org/r/20231129060211.1890454-11-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/top.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c index be7157de0451..4db3d1bd686c 100644 --- a/tools/perf/util/top.c +++ b/tools/perf/util/top.c @@ -28,6 +28,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) struct record_opts *opts = &top->record_opts; struct target *target = &opts->target; size_t ret = 0; + int nr_cpus; if (top->samples) { samples_per_sec = top->samples / top->delay_secs; @@ -93,19 +94,17 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) else ret += SNPRINTF(bf + ret, size - ret, " (all"); + nr_cpus = perf_cpu_map__nr(top->evlist->core.user_requested_cpus); if (target->cpu_list) ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)", - perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1 - ? "s" : "", + nr_cpus > 1 ? "s" : "", target->cpu_list); else { if (target->tid) ret += SNPRINTF(bf + ret, size - ret, ")"); else ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)", - perf_cpu_map__nr(top->evlist->core.user_requested_cpus), - perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1 - ? "s" : ""); + nr_cpus, nr_cpus > 1 ? "s" : ""); } perf_top__reset_sample_counters(top); From 67bc993446d340d4f059014f6be6ead35ec5f88b Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Tue, 28 Nov 2023 22:02:11 -0800 Subject: [PATCH 118/179] libperf cpumap: Document perf_cpu_map__nr()'s behavior MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit perf_cpu_map__nr()'s behavior around an empty CPU map is strange as it returns that there is 1 CPU. Changing code that may rely on this behavior is hard, we can at least document the behavior. Reviewed-by: James Clark Signed-off-by: Ian Rogers Acked-by: Adrian Hunter Acked-by: Namhyung Kim Cc: Alexander Shishkin Cc: Alexandre Ghiti Cc: Andrew Jones Cc: AndrĂ© Almeida Cc: Athira Rajeev Cc: Atish Patra Cc: Changbin Du Cc: Darren Hart Cc: Davidlohr Bueso Cc: Huacai Chen Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Mike Leach Cc: Nick Desaulniers Cc: Paolo Bonzini Cc: Paran Lee Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Suzuki Poulouse Cc: Thomas Gleixner Cc: Will Deacon Cc: Yang Jihong Cc: Yang Li Cc: Yanteng Si Link: https://lore.kernel.org/r/20231129060211.1890454-15-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/perf/include/perf/cpumap.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/lib/perf/include/perf/cpumap.h b/tools/lib/perf/include/perf/cpumap.h index dbe0a7352b64..228c6c629b0c 100644 --- a/tools/lib/perf/include/perf/cpumap.h +++ b/tools/lib/perf/include/perf/cpumap.h @@ -44,7 +44,18 @@ LIBPERF_API struct perf_cpu_map *perf_cpu_map__merge(struct perf_cpu_map *orig, LIBPERF_API struct perf_cpu_map *perf_cpu_map__intersect(struct perf_cpu_map *orig, struct perf_cpu_map *other); LIBPERF_API void perf_cpu_map__put(struct perf_cpu_map *map); +/** + * perf_cpu_map__cpu - get the CPU value at the given index. Returns -1 if index + * is invalid. + */ LIBPERF_API struct perf_cpu perf_cpu_map__cpu(const struct perf_cpu_map *cpus, int idx); +/** + * perf_cpu_map__nr - for an empty map returns 1, as perf_cpu_map__cpu returns a + * cpu of -1 for an invalid index, this makes an empty map + * look like it contains the "any CPU"/dummy value. Otherwise + * the result is the number CPUs in the map plus one if the + * "any CPU"/dummy value is present. + */ LIBPERF_API int perf_cpu_map__nr(const struct perf_cpu_map *cpus); /** * perf_cpu_map__has_any_cpu_or_is_empty - is map either empty or has the "any CPU"/dummy value. From 5cc47ffba7b71e37d65c259f7f5778b3622f1e8f Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:35 -0800 Subject: [PATCH 119/179] perf map: Improve map/unmap parameter names The u64 values are either absolute or relative, try to hint better in the parameter names. Suggested-by: Namhyung Kim Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/map.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 3a3b7757da5f..49756716cb13 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -105,25 +105,25 @@ static inline u64 map__dso_map_ip(const struct map *map, u64 ip) } /* dso rip -> ip */ -static inline u64 map__dso_unmap_ip(const struct map *map, u64 ip) +static inline u64 map__dso_unmap_ip(const struct map *map, u64 rip) { - return ip + map__start(map) - map__pgoff(map); + return rip + map__start(map) - map__pgoff(map); } -static inline u64 map__map_ip(const struct map *map, u64 ip) +static inline u64 map__map_ip(const struct map *map, u64 ip_or_rip) { if ((RC_CHK_ACCESS(map)->mapping_type) == MAPPING_TYPE__DSO) - return map__dso_map_ip(map, ip); + return map__dso_map_ip(map, ip_or_rip); else - return ip; + return ip_or_rip; } -static inline u64 map__unmap_ip(const struct map *map, u64 ip) +static inline u64 map__unmap_ip(const struct map *map, u64 ip_or_rip) { if ((RC_CHK_ACCESS(map)->mapping_type) == MAPPING_TYPE__DSO) - return map__dso_unmap_ip(map, ip); + return map__dso_unmap_ip(map, ip_or_rip); else - return ip; + return ip_or_rip; } /* rip/ip <-> addr suitable for passing to `objdump --start-address=` */ From 19b5bd9a59bed4e9937092e2b685d69656bfc606 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:36 -0800 Subject: [PATCH 120/179] perf maps: Add maps__for_each_map to iterate maps holding the lock The macro maps__for_each_entry is error prone as it doesn't require holding the maps lock. Add a new function that iterates the maps holding the read lock. Convert maps__find_symbol_by_name() and maps__fprintf() to use callbacks, the latter being an example of where the read lock wasn't being held. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/maps.c | 101 +++++++++++++++++++++++++++-------------- tools/perf/util/maps.h | 3 ++ 2 files changed, 70 insertions(+), 34 deletions(-) diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 9a011aed4b75..160a6dce54bb 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -196,6 +196,21 @@ void maps__put(struct maps *maps) RC_CHK_PUT(maps); } +int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data) +{ + struct map_rb_node *pos; + int ret = 0; + + down_read(maps__lock(maps)); + maps__for_each_entry(maps, pos) { + ret = cb(pos->map, data); + if (ret) + break; + } + up_read(maps__lock(maps)); + return ret; +} + struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp) { struct map *map = maps__find(maps, addr); @@ -210,31 +225,40 @@ struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp) return NULL; } -struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp) -{ +struct maps__find_symbol_by_name_args { + struct map **mapp; + const char *name; struct symbol *sym; - struct map_rb_node *pos; +}; - down_read(maps__lock(maps)); +static int maps__find_symbol_by_name_cb(struct map *map, void *data) +{ + struct maps__find_symbol_by_name_args *args = data; - maps__for_each_entry(maps, pos) { - sym = map__find_symbol_by_name(pos->map, name); + args->sym = map__find_symbol_by_name(map, args->name); + if (!args->sym) + return 0; - if (sym == NULL) - continue; - if (!map__contains_symbol(pos->map, sym)) { - sym = NULL; - continue; - } - if (mapp != NULL) - *mapp = pos->map; - goto out; + if (!map__contains_symbol(map, args->sym)) { + args->sym = NULL; + return 0; } - sym = NULL; -out: - up_read(maps__lock(maps)); - return sym; + if (args->mapp != NULL) + *args->mapp = map__get(map); + return 1; +} + +struct symbol *maps__find_symbol_by_name(struct maps *maps, const char *name, struct map **mapp) +{ + struct maps__find_symbol_by_name_args args = { + .mapp = mapp, + .name = name, + .sym = NULL, + }; + + maps__for_each_map(maps, maps__find_symbol_by_name_cb, &args); + return args.sym; } int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams) @@ -253,25 +277,34 @@ int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams) return ams->ms.sym ? 0 : -1; } +struct maps__fprintf_args { + FILE *fp; + size_t printed; +}; + +static int maps__fprintf_cb(struct map *map, void *data) +{ + struct maps__fprintf_args *args = data; + + args->printed += fprintf(args->fp, "Map:"); + args->printed += map__fprintf(map, args->fp); + if (verbose > 2) { + args->printed += dso__fprintf(map__dso(map), args->fp); + args->printed += fprintf(args->fp, "--\n"); + } + return 0; +} + size_t maps__fprintf(struct maps *maps, FILE *fp) { - size_t printed = 0; - struct map_rb_node *pos; + struct maps__fprintf_args args = { + .fp = fp, + .printed = 0, + }; - down_read(maps__lock(maps)); + maps__for_each_map(maps, maps__fprintf_cb, &args); - maps__for_each_entry(maps, pos) { - printed += fprintf(fp, "Map:"); - printed += map__fprintf(pos->map, fp); - if (verbose > 2) { - printed += dso__fprintf(map__dso(pos->map), fp); - printed += fprintf(fp, "--\n"); - } - } - - up_read(maps__lock(maps)); - - return printed; + return args.printed; } int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index a689149be8c4..14ad95979257 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -81,6 +81,9 @@ static inline void __maps__zput(struct maps **map) #define maps__zput(map) __maps__zput(&map) +/* Iterate over map calling cb for each entry. */ +int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data); + static inline struct rb_root *maps__entries(struct maps *maps) { return &RC_CHK_ACCESS(maps)->entries; From bc4bc56d9d74f4593f253531edcea9ea8e30e7f1 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:37 -0800 Subject: [PATCH 121/179] perf events x86: Use function to add missing lock Switch from loop macro maps__for_each_entry to maps__for_each_map function that takes a callback. The function holds the maps lock, which should be held during iteration. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/x86/util/event.c | 109 +++++++++++++++++-------------- 1 file changed, 61 insertions(+), 48 deletions(-) diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c index 5741ffe47312..e65b7dbe27fb 100644 --- a/tools/perf/arch/x86/util/event.c +++ b/tools/perf/arch/x86/util/event.c @@ -14,66 +14,79 @@ #if defined(__x86_64__) +struct perf_event__synthesize_extra_kmaps_cb_args { + struct perf_tool *tool; + perf_event__handler_t process; + struct machine *machine; + union perf_event *event; +}; + +static int perf_event__synthesize_extra_kmaps_cb(struct map *map, void *data) +{ + struct perf_event__synthesize_extra_kmaps_cb_args *args = data; + union perf_event *event = args->event; + struct kmap *kmap; + size_t size; + + if (!__map__is_extra_kernel_map(map)) + return 0; + + kmap = map__kmap(map); + + size = sizeof(event->mmap) - sizeof(event->mmap.filename) + + PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) + + args->machine->id_hdr_size; + + memset(event, 0, size); + + event->mmap.header.type = PERF_RECORD_MMAP; + + /* + * kernel uses 0 for user space maps, see kernel/perf_event.c + * __perf_event_mmap + */ + if (machine__is_host(args->machine)) + event->header.misc = PERF_RECORD_MISC_KERNEL; + else + event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; + + event->mmap.header.size = size; + + event->mmap.start = map__start(map); + event->mmap.len = map__size(map); + event->mmap.pgoff = map__pgoff(map); + event->mmap.pid = args->machine->pid; + + strlcpy(event->mmap.filename, kmap->name, PATH_MAX); + + if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0) + return -1; + + return 0; +} + int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine) { - int rc = 0; - struct map_rb_node *pos; + int rc; struct maps *kmaps = machine__kernel_maps(machine); - union perf_event *event = zalloc(sizeof(event->mmap) + - machine->id_hdr_size); + struct perf_event__synthesize_extra_kmaps_cb_args args = { + .tool = tool, + .process = process, + .machine = machine, + .event = zalloc(sizeof(args.event->mmap) + machine->id_hdr_size), + }; - if (!event) { + if (!args.event) { pr_debug("Not enough memory synthesizing mmap event " "for extra kernel maps\n"); return -1; } - maps__for_each_entry(kmaps, pos) { - struct kmap *kmap; - size_t size; - struct map *map = pos->map; + rc = maps__for_each_map(kmaps, perf_event__synthesize_extra_kmaps_cb, &args); - if (!__map__is_extra_kernel_map(map)) - continue; - - kmap = map__kmap(map); - - size = sizeof(event->mmap) - sizeof(event->mmap.filename) + - PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) + - machine->id_hdr_size; - - memset(event, 0, size); - - event->mmap.header.type = PERF_RECORD_MMAP; - - /* - * kernel uses 0 for user space maps, see kernel/perf_event.c - * __perf_event_mmap - */ - if (machine__is_host(machine)) - event->header.misc = PERF_RECORD_MISC_KERNEL; - else - event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; - - event->mmap.header.size = size; - - event->mmap.start = map__start(map); - event->mmap.len = map__size(map); - event->mmap.pgoff = map__pgoff(map); - event->mmap.pid = machine->pid; - - strlcpy(event->mmap.filename, kmap->name, PATH_MAX); - - if (perf_tool__process_synth_event(tool, event, machine, - process) != 0) { - rc = -1; - break; - } - } - - free(event); + free(args.event); return rc; } From 431be14b193ad19946aeb26a6016dc5b8eb6a248 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:38 -0800 Subject: [PATCH 122/179] perf report: Use function to add missing maps lock Switch maps__fprintf_task from loop macro maps__for_each_entry to maps__for_each_map function that takes a callback. The function holds the maps lock, which should be held during iteration. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 54 +++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 17fb171e898b..178fb602bc98 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -856,27 +856,47 @@ static struct task *tasks_list(struct task *task, struct machine *machine) return tasks_list(parent_task, machine); } +struct maps__fprintf_task_args { + int indent; + FILE *fp; + size_t printed; +}; + +static int maps__fprintf_task_cb(struct map *map, void *data) +{ + struct maps__fprintf_task_args *args = data; + const struct dso *dso = map__dso(map); + u32 prot = map__prot(map); + int ret; + + ret = fprintf(args->fp, + "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n", + args->indent, "", map__start(map), map__end(map), + prot & PROT_READ ? 'r' : '-', + prot & PROT_WRITE ? 'w' : '-', + prot & PROT_EXEC ? 'x' : '-', + map__flags(map) ? 's' : 'p', + map__pgoff(map), + dso->id.ino, dso->name); + + if (ret < 0) + return ret; + + args->printed += ret; + return 0; +} + static size_t maps__fprintf_task(struct maps *maps, int indent, FILE *fp) { - size_t printed = 0; - struct map_rb_node *rb_node; + struct maps__fprintf_task_args args = { + .indent = indent, + .fp = fp, + .printed = 0, + }; - maps__for_each_entry(maps, rb_node) { - struct map *map = rb_node->map; - const struct dso *dso = map__dso(map); - u32 prot = map__prot(map); + maps__for_each_map(maps, maps__fprintf_task_cb, &args); - printed += fprintf(fp, "%*s %" PRIx64 "-%" PRIx64 " %c%c%c%c %08" PRIx64 " %" PRIu64 " %s\n", - indent, "", map__start(map), map__end(map), - prot & PROT_READ ? 'r' : '-', - prot & PROT_WRITE ? 'w' : '-', - prot & PROT_EXEC ? 'x' : '-', - map__flags(map) ? 's' : 'p', - map__pgoff(map), - dso->id.ino, dso->name); - } - - return printed; + return args.printed; } static void task__print_level(struct task *task, FILE *fp, int level) From b1928ca950386729b3bcd555efe559eaa1e2c8cc Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:39 -0800 Subject: [PATCH 123/179] perf tests: Use function to add missing maps lock Switch loop macro maps__for_each_entry to maps__for_each_map function that takes a callback. The function holds the maps lock, which should be held during iteration. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-6-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/maps.c | 61 ++++++---- tools/perf/tests/vmlinux-kallsyms.c | 181 +++++++++++++++------------- 2 files changed, 136 insertions(+), 106 deletions(-) diff --git a/tools/perf/tests/maps.c b/tools/perf/tests/maps.c index 5bb1123a91a7..bb3fbfe5a73e 100644 --- a/tools/perf/tests/maps.c +++ b/tools/perf/tests/maps.c @@ -14,44 +14,59 @@ struct map_def { u64 end; }; +struct check_maps_cb_args { + struct map_def *merged; + unsigned int i; +}; + +static int check_maps_cb(struct map *map, void *data) +{ + struct check_maps_cb_args *args = data; + struct map_def *merged = &args->merged[args->i]; + + if (map__start(map) != merged->start || + map__end(map) != merged->end || + strcmp(map__dso(map)->name, merged->name) || + refcount_read(map__refcnt(map)) != 1) { + return 1; + } + args->i++; + return 0; +} + +static int failed_cb(struct map *map, void *data __maybe_unused) +{ + pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: %d\n", + map__start(map), + map__end(map), + map__dso(map)->name, + refcount_read(map__refcnt(map))); + + return 0; +} + static int check_maps(struct map_def *merged, unsigned int size, struct maps *maps) { - struct map_rb_node *rb_node; - unsigned int i = 0; bool failed = false; if (maps__nr_maps(maps) != size) { pr_debug("Expected %d maps, got %d", size, maps__nr_maps(maps)); failed = true; } else { - maps__for_each_entry(maps, rb_node) { - struct map *map = rb_node->map; - - if (map__start(map) != merged[i].start || - map__end(map) != merged[i].end || - strcmp(map__dso(map)->name, merged[i].name) || - refcount_read(map__refcnt(map)) != 1) { - failed = true; - } - i++; - } + struct check_maps_cb_args args = { + .merged = merged, + .i = 0, + }; + failed = maps__for_each_map(maps, check_maps_cb, &args); } if (failed) { pr_debug("Expected:\n"); - for (i = 0; i < size; i++) { + for (unsigned int i = 0; i < size; i++) { pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: 1\n", merged[i].start, merged[i].end, merged[i].name); } pr_debug("Got:\n"); - maps__for_each_entry(maps, rb_node) { - struct map *map = rb_node->map; - - pr_debug("\tstart: %" PRIu64 " end: %" PRIu64 " name: '%s' refcnt: %d\n", - map__start(map), - map__end(map), - map__dso(map)->name, - refcount_read(map__refcnt(map))); - } + maps__for_each_map(maps, failed_cb, NULL); } return failed ? TEST_FAIL : TEST_OK; } diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c index 1078a93b01aa..822f893e67d5 100644 --- a/tools/perf/tests/vmlinux-kallsyms.c +++ b/tools/perf/tests/vmlinux-kallsyms.c @@ -112,18 +112,92 @@ static bool is_ignored_symbol(const char *name, char type) return false; } +struct test__vmlinux_matches_kallsyms_cb_args { + struct machine kallsyms; + struct map *vmlinux_map; + bool header_printed; +}; + +static int test__vmlinux_matches_kallsyms_cb1(struct map *map, void *data) +{ + struct test__vmlinux_matches_kallsyms_cb_args *args = data; + struct dso *dso = map__dso(map); + /* + * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while + * the kernel will have the path for the vmlinux file being used, so use + * the short name, less descriptive but the same ("[kernel]" in both + * cases. + */ + struct map *pair = maps__find_by_name(args->kallsyms.kmaps, + (dso->kernel ? dso->short_name : dso->name)); + + if (pair) + map__set_priv(pair, 1); + else { + if (!args->header_printed) { + pr_info("WARN: Maps only in vmlinux:\n"); + args->header_printed = true; + } + map__fprintf(map, stderr); + } + return 0; +} + +static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data) +{ + struct test__vmlinux_matches_kallsyms_cb_args *args = data; + struct map *pair; + u64 mem_start = map__unmap_ip(args->vmlinux_map, map__start(map)); + u64 mem_end = map__unmap_ip(args->vmlinux_map, map__end(map)); + + pair = maps__find(args->kallsyms.kmaps, mem_start); + if (pair == NULL || map__priv(pair)) + return 0; + + if (map__start(pair) == mem_start) { + struct dso *dso = map__dso(map); + + if (!args->header_printed) { + pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n"); + args->header_printed = true; + } + + pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", + map__start(map), map__end(map), map__pgoff(map), dso->name); + if (mem_end != map__end(pair)) + pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64, + map__start(pair), map__end(pair), map__pgoff(pair)); + pr_info(" %s\n", dso->name); + map__set_priv(pair, 1); + } + return 0; +} + +static int test__vmlinux_matches_kallsyms_cb3(struct map *map, void *data) +{ + struct test__vmlinux_matches_kallsyms_cb_args *args = data; + + if (!map__priv(map)) { + if (!args->header_printed) { + pr_info("WARN: Maps only in kallsyms:\n"); + args->header_printed = true; + } + map__fprintf(map, stderr); + } + return 0; +} + static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused, int subtest __maybe_unused) { int err = TEST_FAIL; struct rb_node *nd; struct symbol *sym; - struct map *kallsyms_map, *vmlinux_map; - struct map_rb_node *rb_node; - struct machine kallsyms, vmlinux; + struct map *kallsyms_map; + struct machine vmlinux; struct maps *maps; u64 mem_start, mem_end; - bool header_printed; + struct test__vmlinux_matches_kallsyms_cb_args args; /* * Step 1: @@ -131,7 +205,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused * Init the machines that will hold kernel, modules obtained from * both vmlinux + .ko files and from /proc/kallsyms split by modules. */ - machine__init(&kallsyms, "", HOST_KERNEL_ID); + machine__init(&args.kallsyms, "", HOST_KERNEL_ID); machine__init(&vmlinux, "", HOST_KERNEL_ID); maps = machine__kernel_maps(&vmlinux); @@ -143,7 +217,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused * load /proc/kallsyms. Also create the modules maps from /proc/modules * and find the .ko files that match them in /lib/modules/`uname -r`/. */ - if (machine__create_kernel_maps(&kallsyms) < 0) { + if (machine__create_kernel_maps(&args.kallsyms) < 0) { pr_debug("machine__create_kernel_maps failed"); err = TEST_SKIP; goto out; @@ -160,7 +234,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused * be compacted against the list of modules found in the "vmlinux" * code and with the one got from /proc/modules from the "kallsyms" code. */ - if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) { + if (machine__load_kallsyms(&args.kallsyms, "/proc/kallsyms") <= 0) { pr_debug("machine__load_kallsyms failed"); err = TEST_SKIP; goto out; @@ -174,7 +248,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused * to see if the running kernel was relocated by checking if it has the * same value in the vmlinux file we load. */ - kallsyms_map = machine__kernel_map(&kallsyms); + kallsyms_map = machine__kernel_map(&args.kallsyms); /* * Step 5: @@ -186,7 +260,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused goto out; } - vmlinux_map = machine__kernel_map(&vmlinux); + args.vmlinux_map = machine__kernel_map(&vmlinux); /* * Step 6: @@ -213,7 +287,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused * in the kallsyms dso. For the ones that are in both, check its names and * end addresses too. */ - map__for_each_symbol(vmlinux_map, sym, nd) { + map__for_each_symbol(args.vmlinux_map, sym, nd) { struct symbol *pair, *first_pair; sym = rb_entry(nd, struct symbol, rb_node); @@ -221,10 +295,10 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused if (sym->start == sym->end) continue; - mem_start = map__unmap_ip(vmlinux_map, sym->start); - mem_end = map__unmap_ip(vmlinux_map, sym->end); + mem_start = map__unmap_ip(args.vmlinux_map, sym->start); + mem_end = map__unmap_ip(args.vmlinux_map, sym->end); - first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL); + first_pair = machine__find_kernel_symbol(&args.kallsyms, mem_start, NULL); pair = first_pair; if (pair && UM(pair->start) == mem_start) { @@ -253,7 +327,8 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused */ continue; } else { - pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL); + pair = machine__find_kernel_symbol_by_name(&args.kallsyms, + sym->name, NULL); if (pair) { if (UM(pair->start) == mem_start) goto next_pair; @@ -267,7 +342,7 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused continue; } - } else if (mem_start == map__end(kallsyms.vmlinux_map)) { + } else if (mem_start == map__end(args.kallsyms.vmlinux_map)) { /* * Ignore aliases to _etext, i.e. to the end of the kernel text area, * such as __indirect_thunk_end. @@ -289,78 +364,18 @@ static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused if (verbose <= 0) goto out; - header_printed = false; + args.header_printed = false; + maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb1, &args); - maps__for_each_entry(maps, rb_node) { - struct map *map = rb_node->map; - struct dso *dso = map__dso(map); - /* - * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while - * the kernel will have the path for the vmlinux file being used, - * so use the short name, less descriptive but the same ("[kernel]" in - * both cases. - */ - struct map *pair = maps__find_by_name(kallsyms.kmaps, (dso->kernel ? - dso->short_name : - dso->name)); - if (pair) { - map__set_priv(pair, 1); - } else { - if (!header_printed) { - pr_info("WARN: Maps only in vmlinux:\n"); - header_printed = true; - } - map__fprintf(map, stderr); - } - } + args.header_printed = false; + maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb2, &args); - header_printed = false; + args.header_printed = false; + maps = machine__kernel_maps(&args.kallsyms); + maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb3, &args); - maps__for_each_entry(maps, rb_node) { - struct map *pair, *map = rb_node->map; - - mem_start = map__unmap_ip(vmlinux_map, map__start(map)); - mem_end = map__unmap_ip(vmlinux_map, map__end(map)); - - pair = maps__find(kallsyms.kmaps, mem_start); - if (pair == NULL || map__priv(pair)) - continue; - - if (map__start(pair) == mem_start) { - struct dso *dso = map__dso(map); - - if (!header_printed) { - pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n"); - header_printed = true; - } - - pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as", - map__start(map), map__end(map), map__pgoff(map), dso->name); - if (mem_end != map__end(pair)) - pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64, - map__start(pair), map__end(pair), map__pgoff(pair)); - pr_info(" %s\n", dso->name); - map__set_priv(pair, 1); - } - } - - header_printed = false; - - maps = machine__kernel_maps(&kallsyms); - - maps__for_each_entry(maps, rb_node) { - struct map *map = rb_node->map; - - if (!map__priv(map)) { - if (!header_printed) { - pr_info("WARN: Maps only in kallsyms:\n"); - header_printed = true; - } - map__fprintf(map, stderr); - } - } out: - machine__exit(&kallsyms); + machine__exit(&args.kallsyms); machine__exit(&vmlinux); return err; } From 2dc549b1dd495e7cdaa822a0af97365a8a1de840 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:40 -0800 Subject: [PATCH 124/179] perf machine: Use function to add missing maps lock Switch machine__map_x86_64_entry_trampolines and machine__for_each_kernel_map from loop macro maps__for_each_entry to maps__for_each_map function that takes a callback. The function holds the maps lock, which should be held during iteration. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-7-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 53 +++++++++++++++++++++------------------ 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index c5de5363b5e7..ca855fc435ac 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1285,33 +1285,46 @@ static u64 find_entry_trampoline(struct dso *dso) #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000 #define X86_64_ENTRY_TRAMPOLINE 0x6000 +struct machine__map_x86_64_entry_trampolines_args { + struct maps *kmaps; + bool found; +}; + +static int machine__map_x86_64_entry_trampolines_cb(struct map *map, void *data) +{ + struct machine__map_x86_64_entry_trampolines_args *args = data; + struct map *dest_map; + struct kmap *kmap = __map__kmap(map); + + if (!kmap || !is_entry_trampoline(kmap->name)) + return 0; + + dest_map = maps__find(args->kmaps, map__pgoff(map)); + if (dest_map != map) + map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map))); + + args->found = true; + return 0; +} + /* Map x86_64 PTI entry trampolines */ int machine__map_x86_64_entry_trampolines(struct machine *machine, struct dso *kernel) { - struct maps *kmaps = machine__kernel_maps(machine); + struct machine__map_x86_64_entry_trampolines_args args = { + .kmaps = machine__kernel_maps(machine), + .found = false, + }; int nr_cpus_avail, cpu; - bool found = false; - struct map_rb_node *rb_node; u64 pgoff; /* * In the vmlinux case, pgoff is a virtual address which must now be * mapped to a vmlinux offset. */ - maps__for_each_entry(kmaps, rb_node) { - struct map *dest_map, *map = rb_node->map; - struct kmap *kmap = __map__kmap(map); + maps__for_each_map(args.kmaps, machine__map_x86_64_entry_trampolines_cb, &args); - if (!kmap || !is_entry_trampoline(kmap->name)) - continue; - - dest_map = maps__find(kmaps, map__pgoff(map)); - if (dest_map != map) - map__set_pgoff(map, map__map_ip(dest_map, map__pgoff(map))); - found = true; - } - if (found || machine->trampolines_mapped) + if (args.found || machine->trampolines_mapped) return 0; pgoff = find_entry_trampoline(kernel); @@ -3398,16 +3411,8 @@ int machine__for_each_dso(struct machine *machine, machine__dso_t fn, void *priv int machine__for_each_kernel_map(struct machine *machine, machine__map_t fn, void *priv) { struct maps *maps = machine__kernel_maps(machine); - struct map_rb_node *pos; - int err = 0; - maps__for_each_entry(maps, pos) { - err = fn(pos->map, priv); - if (err != 0) { - break; - } - } - return err; + return maps__for_each_map(maps, fn, priv); } bool machine__is_lock_function(struct machine *machine, u64 addr) From 300b53d5b819a33e2d4567cc35e1d92b4b49cd9c Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:41 -0800 Subject: [PATCH 125/179] perf probe-event: Use function to add missing maps lock Switch kernel_get_module_map from loop macro maps__for_each_entry to maps__for_each_map function that takes a callback. The function holds the maps lock, which should be held during iteration. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-8-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/probe-event.c | 40 +++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 14 deletions(-) diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 1a5b7fa459b2..a1a796043691 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -149,10 +149,32 @@ static int kernel_get_symbol_address_by_name(const char *name, u64 *addr, return 0; } +struct kernel_get_module_map_cb_args { + const char *module; + struct map *result; +}; + +static int kernel_get_module_map_cb(struct map *map, void *data) +{ + struct kernel_get_module_map_cb_args *args = data; + struct dso *dso = map__dso(map); + const char *short_name = dso->short_name; /* short_name is "[module]" */ + u16 short_name_len = dso->short_name_len; + + if (strncmp(short_name + 1, args->module, short_name_len - 2) == 0 && + args->module[short_name_len - 2] == '\0') { + args->result = map__get(map); + return 1; + } + return 0; +} + static struct map *kernel_get_module_map(const char *module) { - struct maps *maps = machine__kernel_maps(host_machine); - struct map_rb_node *pos; + struct kernel_get_module_map_cb_args args = { + .module = module, + .result = NULL, + }; /* A file path -- this is an offline module */ if (module && strchr(module, '/')) @@ -164,19 +186,9 @@ static struct map *kernel_get_module_map(const char *module) return map__get(map); } - maps__for_each_entry(maps, pos) { - /* short_name is "[module]" */ - struct dso *dso = map__dso(pos->map); - const char *short_name = dso->short_name; - u16 short_name_len = dso->short_name_len; + maps__for_each_map(machine__kernel_maps(host_machine), kernel_get_module_map_cb, &args); - if (strncmp(short_name + 1, module, - short_name_len - 2) == 0 && - module[short_name_len - 2] == '\0') { - return map__get(pos->map); - } - } - return NULL; + return args.result; } struct map *get_target_map(const char *target, struct nsinfo *nsi, bool user) From 111350c67d15ffc284801509acdcf256d77876ca Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:42 -0800 Subject: [PATCH 126/179] perf symbol: Use function to add missing maps lock Switch do_validate_kcore_modules from loop macro maps__for_each_entry to maps__for_each_map function that takes a callback. The function holds the maps lock, which should be held during iteration. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-9-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol.c | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 1cc42b8d8afb..72f03b875478 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1114,33 +1114,35 @@ int compare_proc_modules(const char *from, const char *to) return ret; } +static int do_validate_kcore_modules_cb(struct map *old_map, void *data) +{ + struct rb_root *modules = data; + struct module_info *mi; + struct dso *dso; + + if (!__map__is_kmodule(old_map)) + return 0; + + dso = map__dso(old_map); + /* Module must be in memory at the same address */ + mi = find_module(dso->short_name, modules); + if (!mi || mi->start != map__start(old_map)) + return -EINVAL; + + return 0; +} + static int do_validate_kcore_modules(const char *filename, struct maps *kmaps) { struct rb_root modules = RB_ROOT; - struct map_rb_node *old_node; int err; err = read_proc_modules(filename, &modules); if (err) return err; - maps__for_each_entry(kmaps, old_node) { - struct map *old_map = old_node->map; - struct module_info *mi; - struct dso *dso; + err = maps__for_each_map(kmaps, do_validate_kcore_modules_cb, &modules); - if (!__map__is_kmodule(old_map)) { - continue; - } - dso = map__dso(old_map); - /* Module must be in memory at the same address */ - mi = find_module(dso->short_name, &modules); - if (!mi || mi->start != map__start(old_map)) { - err = -EINVAL; - goto out; - } - } -out: delete_modules(&modules); return err; } From 228493d0a83bc2aec7f4a25491621f9d3b6d7bf2 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:43 -0800 Subject: [PATCH 127/179] perf synthetic-events: Use function to add missing maps lock Switch perf_event__synthesize_modules from loop macro maps__for_each_entry to maps__for_each_map function that takes a callback. The function holds the maps lock, which should be held during iteration. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-10-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/synthetic-events.c | 118 ++++++++++++++++------------- 1 file changed, 67 insertions(+), 51 deletions(-) diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c index a0579c7d7b9e..3712186353fb 100644 --- a/tools/perf/util/synthetic-events.c +++ b/tools/perf/util/synthetic-events.c @@ -665,18 +665,74 @@ int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused, } #endif +struct perf_event__synthesize_modules_maps_cb_args { + struct perf_tool *tool; + perf_event__handler_t process; + struct machine *machine; + union perf_event *event; +}; + +static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data) +{ + struct perf_event__synthesize_modules_maps_cb_args *args = data; + union perf_event *event = args->event; + struct dso *dso; + size_t size; + + if (!__map__is_kmodule(map)) + return 0; + + dso = map__dso(map); + if (symbol_conf.buildid_mmap2) { + size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64)); + event->mmap2.header.type = PERF_RECORD_MMAP2; + event->mmap2.header.size = (sizeof(event->mmap2) - + (sizeof(event->mmap2.filename) - size)); + memset(event->mmap2.filename + size, 0, args->machine->id_hdr_size); + event->mmap2.header.size += args->machine->id_hdr_size; + event->mmap2.start = map__start(map); + event->mmap2.len = map__size(map); + event->mmap2.pid = args->machine->pid; + + memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1); + + perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false); + } else { + size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64)); + event->mmap.header.type = PERF_RECORD_MMAP; + event->mmap.header.size = (sizeof(event->mmap) - + (sizeof(event->mmap.filename) - size)); + memset(event->mmap.filename + size, 0, args->machine->id_hdr_size); + event->mmap.header.size += args->machine->id_hdr_size; + event->mmap.start = map__start(map); + event->mmap.len = map__size(map); + event->mmap.pid = args->machine->pid; + + memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1); + } + + if (perf_tool__process_synth_event(args->tool, event, args->machine, args->process) != 0) + return -1; + + return 0; +} + int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine) { - int rc = 0; - struct map_rb_node *pos; + int rc; struct maps *maps = machine__kernel_maps(machine); - union perf_event *event; - size_t size = symbol_conf.buildid_mmap2 ? - sizeof(event->mmap2) : sizeof(event->mmap); + struct perf_event__synthesize_modules_maps_cb_args args = { + .tool = tool, + .process = process, + .machine = machine, + }; + size_t size = symbol_conf.buildid_mmap2 + ? sizeof(args.event->mmap2) + : sizeof(args.event->mmap); - event = zalloc(size + machine->id_hdr_size); - if (event == NULL) { + args.event = zalloc(size + machine->id_hdr_size); + if (args.event == NULL) { pr_debug("Not enough memory synthesizing mmap event " "for kernel modules\n"); return -1; @@ -687,53 +743,13 @@ int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t * __perf_event_mmap */ if (machine__is_host(machine)) - event->header.misc = PERF_RECORD_MISC_KERNEL; + args.event->header.misc = PERF_RECORD_MISC_KERNEL; else - event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; + args.event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; - maps__for_each_entry(maps, pos) { - struct map *map = pos->map; - struct dso *dso; + rc = maps__for_each_map(maps, perf_event__synthesize_modules_maps_cb, &args); - if (!__map__is_kmodule(map)) - continue; - - dso = map__dso(map); - if (symbol_conf.buildid_mmap2) { - size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64)); - event->mmap2.header.type = PERF_RECORD_MMAP2; - event->mmap2.header.size = (sizeof(event->mmap2) - - (sizeof(event->mmap2.filename) - size)); - memset(event->mmap2.filename + size, 0, machine->id_hdr_size); - event->mmap2.header.size += machine->id_hdr_size; - event->mmap2.start = map__start(map); - event->mmap2.len = map__size(map); - event->mmap2.pid = machine->pid; - - memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1); - - perf_record_mmap2__read_build_id(&event->mmap2, machine, false); - } else { - size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64)); - event->mmap.header.type = PERF_RECORD_MMAP; - event->mmap.header.size = (sizeof(event->mmap) - - (sizeof(event->mmap.filename) - size)); - memset(event->mmap.filename + size, 0, machine->id_hdr_size); - event->mmap.header.size += machine->id_hdr_size; - event->mmap.start = map__start(map); - event->mmap.len = map__size(map); - event->mmap.pid = machine->pid; - - memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1); - } - - if (perf_tool__process_synth_event(tool, event, machine, process) != 0) { - rc = -1; - break; - } - } - - free(event); + free(args.event); return rc; } From 71225af17f611632226a4a2fae25235fd8dad268 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:44 -0800 Subject: [PATCH 128/179] perf thread: Use function to add missing maps lock Switch thread__prepare_access from loop macro maps__for_each_entry to maps__for_each_map function that takes a callback. The function holds the maps lock, which should be held during iteration. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-11-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/thread.c | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index b9c2039c4230..b6986a81aa6d 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -349,34 +349,33 @@ int thread__insert_map(struct thread *thread, struct map *map) return maps__insert(thread__maps(thread), map); } -static int __thread__prepare_access(struct thread *thread) +struct thread__prepare_access_maps_cb_args { + int err; + struct maps *maps; +}; + +static int thread__prepare_access_maps_cb(struct map *map, void *data) { bool initialized = false; - int err = 0; - struct maps *maps = thread__maps(thread); - struct map_rb_node *rb_node; + struct thread__prepare_access_maps_cb_args *args = data; - down_read(maps__lock(maps)); + args->err = unwind__prepare_access(args->maps, map, &initialized); - maps__for_each_entry(maps, rb_node) { - err = unwind__prepare_access(thread__maps(thread), rb_node->map, &initialized); - if (err || initialized) - break; - } - - up_read(maps__lock(maps)); - - return err; + return (args->err || initialized) ? 1 : 0; } static int thread__prepare_access(struct thread *thread) { - int err = 0; + struct thread__prepare_access_maps_cb_args args = { + .err = 0, + }; - if (dwarf_callchain_users) - err = __thread__prepare_access(thread); + if (dwarf_callchain_users) { + args.maps = thread__maps(thread); + maps__for_each_map(thread__maps(thread), thread__prepare_access_maps_cb, &args); + } - return err; + return args.err; } static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone) From 624dda101e03c3a3a155d51e37a7bb7607cb760b Mon Sep 17 00:00:00 2001 From: Veronika Molnarova Date: Tue, 12 Dec 2023 17:59:08 +0100 Subject: [PATCH 129/179] perf archive: Add new option '--all' to pack perf.data with DSOs 'perf archive' has limited functionality and people from Red Hat Global Support Services sent a request for a new feature that would pack perf.data file together with an archive with debug symbols created by the command 'perf archive' as customers were being confused and often would forget to send perf.data file with the debug symbols. With this patch 'perf archive' now accepts an option '--all' that generates archive 'perf.all-hostname-date-time.tar.bz2' that holds file 'perf.data' and a sub-tar 'perf.symbols.tar.bz2' with debug symbols. The functionality of the command 'perf archive' was not changed. Committer testing: Run 'perf record' on a Intel 14900K machine, hybrid: root@number:~# perf record -a sleep 5s [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 4.006 MB perf.data (15427 samples) ] root@number:~# perf archive --all Now please run: $ tar xvf perf.all-number-20231219-104854.tar.bz2 && tar xvf perf.symbols.tar.bz2 -C ~/.debug wherever you need to run 'perf report' on. root@number:~# root@number:~# perf report --header-only # ======== # captured on : Tue Dec 19 10:48:48 2023 # header version : 1 # data offset : 1008 # data size : 4199936 # feat offset : 4200944 # hostname : number # os release : 6.6.4-200.fc39.x86_64 # perf version : 6.7.rc6.gca90f8e17b84 # arch : x86_64 # nrcpus online : 28 # nrcpus avail : 28 # cpudesc : Intel(R) Core(TM) i7-14700K # cpuid : GenuineIntel,6,183,1 # total memory : 32610508 kB # cmdline : /home/acme/bin/perf (deleted) record -a sleep 5s # event : name = cpu_atom/cycles/P, , id = { 5088024, 5088025, 5088026, 5088027, 5088028, 5088029, 5088030, 5088031, 5088032, 5088033, 5088034, 5088035 }, type = 0 (PERF_TYPE_HARDWARE), size> # event : name = cpu_core/cycles/P, , id = { 5088036, 5088037, 5088038, 5088039, 5088040, 5088041, 5088042, 5088043, 5088044, 5088045, 5088046, 5088047, 5088048, 5088049, 5088050, 5088051 },> # event : name = dummy:u, , id = { 5088052, 5088053, 5088054, 5088055, 5088056, 5088057, 5088058, 5088059, 5088060, 5088061, 5088062, 5088063, 5088064, 5088065, 5088066, 5088067, 5088068, 50> # CPU_TOPOLOGY info available, use -I to display # NUMA_TOPOLOGY info available, use -I to display # pmu mappings: cpu_atom = 10, cpu_core = 4, breakpoint = 5, cstate_core = 34, cstate_pkg = 35, i915 = 14, intel_bts = 11, intel_pt = 12, kprobe = 8, msr = 13, power = 36, software = 1, trac> # CACHE info available, use -I to display # time of first sample : 124739.850375 # time of last sample : 124744.855181 # sample duration : 5004.806 ms # sample duration : 5004.806 ms # MEM_TOPOLOGY info available, use -I to display # bpf_prog_info 2: bpf_prog_7cc47bbf07148bfe_hid_tail_call addr 0xffffffffc0000978 size 113 # bpf_prog_info 47: bpf_prog_713a545fe0530ce7_restrict_filesystems addr 0xffffffffc0000748 size 305 # bpf_prog_info 163: bpf_prog_bd834b0730296056 addr 0xffffffffc000df14 size 331 # bpf_prog_info 258: bpf_prog_ee0e253c78993a24_sd_devices addr 0xffffffffc001fc08 size 264 # bpf_prog_info 259: bpf_prog_40ddf486530245f5_sd_devices addr 0xffffffffc00204bc size 318 # bpf_prog_info 260: bpf_prog_6deef7357e7b4530_sd_fw_egress addr 0xffffffffc0020630 size 63 # bpf_prog_info 261: bpf_prog_6deef7357e7b4530_sd_fw_ingress addr 0xffffffffc0020688 size 63 # bpf_prog_info 262: bpf_prog_b37200ab714f0e17_sd_devices addr 0xffffffffc002072c size 110 # bpf_prog_info 263: bpf_prog_b90a282ee45cfed9_sd_devices addr 0xffffffffc00207d8 size 393 # bpf_prog_info 264: bpf_prog_ee0e253c78993a24_sd_devices addr 0xffffffffc002099c size 264 # bpf_prog_info 265: bpf_prog_6deef7357e7b4530_sd_fw_egress addr 0xffffffffc0020ad4 size 63 # bpf_prog_info 266: bpf_prog_6deef7357e7b4530_sd_fw_ingress addr 0xffffffffc0020b50 size 63 # bpf_prog_info 267: bpf_prog_ee0e253c78993a24_sd_devices addr 0xffffffffc002d98c size 264 # bpf_prog_info 268: bpf_prog_be31ae23198a0378_sd_devices addr 0xffffffffc002dac8 size 297 # bpf_prog_info 269: bpf_prog_ccbbf91f3c6979c7_sd_devices addr 0xffffffffc002dc54 size 360 # bpf_prog_info 270: bpf_prog_3a0ef5414c2f6fca_sd_devices addr 0xffffffffc002dde8 size 456 # bpf_prog_info 271: bpf_prog_6deef7357e7b4530_sd_fw_egress addr 0xffffffffc0020bd4 size 63 # bpf_prog_info 272: bpf_prog_6deef7357e7b4530_sd_fw_ingress addr 0xffffffffc00299b4 size 63 # bpf_prog_info 273: bpf_prog_ee0e253c78993a24_sd_devices addr 0xffffffffc002dfd0 size 264 # bpf_prog_info 274: bpf_prog_6deef7357e7b4530_sd_fw_egress addr 0xffffffffc0029a3c size 63 # bpf_prog_info 275: bpf_prog_6deef7357e7b4530_sd_fw_ingress addr 0xffffffffc002d71c size 63 # bpf_prog_info 276: bpf_prog_6deef7357e7b4530_sd_fw_egress addr 0xffffffffc002d7a8 size 63 # bpf_prog_info 277: bpf_prog_6deef7357e7b4530_sd_fw_ingress addr 0xffffffffc002e13c size 63 # bpf_prog_info 278: bpf_prog_6deef7357e7b4530_sd_fw_egress addr 0xffffffffc002e1a8 size 63 # bpf_prog_info 279: bpf_prog_6deef7357e7b4530_sd_fw_ingress addr 0xffffffffc002e234 size 63 # bpf_prog_info 280: bpf_prog_be31ae23198a0378_sd_devices addr 0xffffffffc002e2ac size 297 # bpf_prog_info 281: bpf_prog_6deef7357e7b4530_sd_fw_egress addr 0xffffffffc002e42c size 63 # bpf_prog_info 282: bpf_prog_6deef7357e7b4530_sd_fw_ingress addr 0xffffffffc002e49c size 63 # bpf_prog_info 290: bpf_prog_ee0e253c78993a24_sd_devices addr 0xffffffffc0004b18 size 264 # bpf_prog_info 294: bpf_prog_0b1566e4b83190c5_sd_devices addr 0xffffffffc0004c50 size 360 # bpf_prog_info 295: bpf_prog_ee0e253c78993a24_sd_devices addr 0xffffffffc001cfc8 size 264 # bpf_prog_info 296: bpf_prog_6deef7357e7b4530_sd_fw_egress addr 0xffffffffc0013abc size 63 # bpf_prog_info 297: bpf_prog_6deef7357e7b4530_sd_fw_ingress addr 0xffffffffc0013b24 size 63 # btf info of id 2 # btf info of id 52 # HYBRID_TOPOLOGY info available, use -I to display # cpu_atom pmu capabilities: branches=32, max_precise=3, pmu_name=alderlake_hybrid # cpu_core pmu capabilities: branches=32, max_precise=3, pmu_name=alderlake_hybrid # intel_pt pmu capabilities: topa_multiple_entries=1, psb_cyc=1, single_range_output=1, mtc_periods=249, ip_filtering=1, output_subsys=0, cr3_filtering=1, psb_periods=3f, event_trace=0, cycl> # missing features: TRACING_DATA BRANCH_STACK GROUP_DESC AUXTRACE STAT CLOCKID DIR_FORMAT COMPRESSED CPU_PMU_CAPS CLOCK_DATA # ======== # root@number:~# And then transferring it to a ARM64 machine, a Libre Computer RK3399-PC: root@number:~# scp perf.all-number-20231219-104854.tar.bz2 acme@192.168.86.114:. acme@192.168.86.114's password: perf.all-number-20231219-104854.tar.bz2 100% 145MB 85.4MB/s 00:01 root@number:~# root@number:~# ssh acme@192.168.86.114 acme@192.168.86.114's password: Welcome to Ubuntu 23.04 (GNU/Linux 6.1.68-12200-g1c40dda3081e aarch64) * Documentation: https://help.ubuntu.com * Management: https://landscape.canonical.com * Support: https://ubuntu.com/advantage Last login: Tue Dec 19 14:53:18 2023 from 192.168.86.42 acme@roc-rk3399-pc:~$ tar xvf perf.all-number-20231219-104854.tar.bz2 && tar xvf perf.symbols.tar.bz2 -C ~/.debug perf.data perf.symbols.tar.bz2 .build-id/ad/acc227f470409213308050b71f664322e2956c [kernel.kallsyms]/adacc227f470409213308050b71f664322e2956c/ [kernel.kallsyms]/adacc227f470409213308050b71f664322e2956c/kallsyms [kernel.kallsyms]/adacc227f470409213308050b71f664322e2956c/probes .build-id/76/c91f4d62baa06bb52e07e20aba36d21a8f9797 usr/lib64/libz.so.1.2.13/76c91f4d62baa06bb52e07e20aba36d21a8f9797/ .build-id/09/d7e96bc1e3f599d15ca28b36959124b2d74410 usr/lib64/librpm_sequoia.so.1/09d7e96bc1e3f599d15ca28b36959124b2d74410/ usr/lib64/librpm_sequoia.so.1/09d7e96bc1e3f599d15ca28b36959124b2d74410/elf usr/lib64/librpm_sequoia.so.1/09d7e96bc1e3f599d15ca28b36959124b2d74410/probes acme@roc-rk3399-pc:~$ acme@roc-rk3399-pc:~$ perf report --stdio | head -40 # To display the perf.data header info, please use --header/--header-only options. # # Total Lost Samples: 0 # # Samples: 6K of event 'cpu_atom/cycles/P' # Event count (approx.): 4519946621 # # Overhead Command Shared Object Symbol # ........ ............... .............................................. ......................................................................................................................................................... # 1.73% swapper [kernel.kallsyms] [k] intel_idle 1.43% sh [kernel.kallsyms] [k] next_uptodate_folio 0.94% make ld-linux-x86-64.so.2 [.] do_lookup_x 0.90% sh ld-linux-x86-64.so.2 [.] do_lookup_x 0.82% sh [kernel.kallsyms] [k] perf_event_mmap_output 0.74% sh [kernel.kallsyms] [k] filemap_map_pages 0.72% sh ld-linux-x86-64.so.2 [.] _dl_relocate_object 0.69% cc1 [kernel.kallsyms] [k] clear_page_erms 0.61% sh [kernel.kallsyms] [k] unmap_page_range 0.56% swapper [kernel.kallsyms] [k] poll_idle 0.52% cc1 ld-linux-x86-64.so.2 [.] do_lookup_x 0.47% make ld-linux-x86-64.so.2 [.] _dl_relocate_object 0.44% cc1 cc1 [.] make_node(tree_code) 0.43% sh [kernel.kallsyms] [k] native_irq_return_iret 0.38% sh libc.so.6 [.] _int_malloc 0.38% cc1 cc1 [.] decl_attributes(tree_node**, tree_node*, int, tree_node*) 0.38% sh [kernel.kallsyms] [k] clear_page_erms 0.37% cc1 cc1 [.] ht_lookup_with_hash(ht*, unsigned char const*, unsigned long, unsigned int, ht_lookup_option) 0.37% make [kernel.kallsyms] [k] perf_event_mmap_output 0.37% make ld-linux-x86-64.so.2 [.] _dl_lookup_symbol_x 0.35% sh [kernel.kallsyms] [k] _compound_head 0.35% make make [.] hash_find_slot 0.33% sh libc.so.6 [.] __strlen_avx2 0.33% cc1 cc1 [.] ggc_internal_alloc(unsigned long, void (*)(void*), unsigned long, unsigned long) 0.33% sh [kernel.kallsyms] [k] perf_iterate_ctx 0.31% make make [.] jhash_string 0.31% sh [kernel.kallsyms] [k] page_remove_rmap 0.30% cc1 libc.so.6 [.] _int_malloc 0.30% make libc.so.6 [.] _int_malloc acme@roc-rk3399-pc:~$ Signed-off-by: Veronika Molnarova Tested-by: Arnaldo Carvalho de Melo Cc: Michael Petlan Link: https://lore.kernel.org/r/20231212165909.14459-1-vmolnaro@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/perf-archive.sh | 40 ++++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) mode change 100644 => 100755 tools/perf/perf-archive.sh diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh old mode 100644 new mode 100755 index 133f0eddbcc4..a92042eae95a --- a/tools/perf/perf-archive.sh +++ b/tools/perf/perf-archive.sh @@ -4,9 +4,19 @@ # Arnaldo Carvalho de Melo PERF_DATA=perf.data -if [ $# -ne 0 ] ; then - PERF_DATA=$1 -fi +PERF_SYMBOLS=perf.symbols +PERF_ALL=perf.all +ALL=0 + +while [ $# -gt 0 ] ; do + if [ $1 == "--all" ]; then + ALL=1 + shift + else + PERF_DATA=$1 + shift + fi +done # # PERF_BUILDID_DIR environment variable set by perf @@ -39,9 +49,23 @@ while read build_id ; do echo ${filename#$PERF_BUILDID_LINKDIR} >> $MANIFEST done -tar cjf $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST -rm $MANIFEST $BUILDIDS || true -echo -e "Now please run:\n" -echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n" -echo "wherever you need to run 'perf report' on." +if [ $ALL -eq 1 ]; then # pack perf.data file together with tar containing debug symbols + HOSTNAME=$(hostname) + DATE=$(date '+%Y%m%d-%H%M%S') + tar cjf $PERF_SYMBOLS.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST + tar cjf $PERF_ALL-$HOSTNAME-$DATE.tar.bz2 $PERF_DATA $PERF_SYMBOLS.tar.bz2 + rm $PERF_SYMBOLS.tar.bz2 $MANIFEST $BUILDIDS || true + + echo -e "Now please run:\n" + echo -e "$ tar xvf $PERF_ALL-$HOSTNAME-$DATE.tar.bz2 && tar xvf $PERF_SYMBOLS.tar.bz2 -C ~/.debug\n" + echo "wherever you need to run 'perf report' on." +else # pack only the debug symbols + tar cjf $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST + rm $MANIFEST $BUILDIDS || true + + echo -e "Now please run:\n" + echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n" + echo "wherever you need to run 'perf report' on." +fi + exit 0 From e43c64c971e48d11ee100c5a8b2eadbed056f924 Mon Sep 17 00:00:00 2001 From: Veronika Molnarova Date: Tue, 12 Dec 2023 17:59:09 +0100 Subject: [PATCH 130/179] perf archive: Add new option '--unpack' to expand tarballs Archives generated by the command 'perf archive' have to be unpacked manually. Following the addition of option '--all' now there also exist a nested structure of tars, and after further discussion with Red Hat Global Support Services, they found a feature correctly unpacking archives of 'perf archive' convenient. Option '--unpack' of 'perf archive' unpacks archives generated by the command 'perf archive' as well as archives generated when used with option '--all'. The 'perf.data' file is placed in the current directory, while debug symbols are unpacked in '~/.debug' directory. A tar filename can be passed as an argument, and if not provided the command tries to find a viable perf.tar file for unpacking. Signed-off-by: Veronika Molnarova Tested-by: Arnaldo Carvalho de Melo Cc: Michael Petlan Link: https://lore.kernel.org/r/20231212165909.14459-2-vmolnaro@redhat.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/perf-archive.sh | 66 +++++++++++++++++++++++++++++++++----- 1 file changed, 58 insertions(+), 8 deletions(-) diff --git a/tools/perf/perf-archive.sh b/tools/perf/perf-archive.sh index a92042eae95a..f94795794b36 100755 --- a/tools/perf/perf-archive.sh +++ b/tools/perf/perf-archive.sh @@ -7,17 +7,72 @@ PERF_DATA=perf.data PERF_SYMBOLS=perf.symbols PERF_ALL=perf.all ALL=0 +UNPACK=0 while [ $# -gt 0 ] ; do if [ $1 == "--all" ]; then ALL=1 shift + elif [ $1 == "--unpack" ]; then + UNPACK=1 + shift else PERF_DATA=$1 + UNPACK_TAR=$1 shift fi done +if [ $UNPACK -eq 1 ]; then + if [ ! -z "$UNPACK_TAR" ]; then # tar given as an argument + if [ ! -e "$UNPACK_TAR" ]; then + echo "Provided file $UNPACK_TAR does not exist" + exit 1 + fi + TARGET="$UNPACK_TAR" + else # search for perf tar in the current directory + TARGET=`find . -regex "\./perf.*\.tar\.bz2"` + TARGET_NUM=`echo -n "$TARGET" | grep -c '^'` + + if [ -z "$TARGET" -o $TARGET_NUM -gt 1 ]; then + echo -e "Error: $TARGET_NUM files found for unpacking:\n$TARGET" + echo "Provide the requested file as an argument" + exit 1 + else + echo "Found target file for unpacking: $TARGET" + fi + fi + + if [[ "$TARGET" =~ (\./)?$PERF_ALL.*.tar.bz2 ]]; then # perf tar generated by --all option + TAR_CONTENTS=`tar tvf "$TARGET" | tr -s " " | cut -d " " -f 6` + VALID_TAR=`echo "$TAR_CONTENTS" | grep "$PERF_SYMBOLS.tar.bz2" | wc -l` # check if it contains a sub-tar perf.symbols + if [ $VALID_TAR -ne 1 ]; then + echo "Error: $TARGET file is not valid (contains zero or multiple sub-tar files with debug symbols)" + exit 1 + fi + + INTERSECT=`comm -12 <(ls) <(echo "$TAR_CONTENTS") | tr "\n" " "` # check for overwriting + if [ ! -z "$INTERSECT" ]; then # prompt if file(s) already exist in the current directory + echo "File(s) ${INTERSECT::-1} already exist in the current directory." + while true; do + read -p 'Do you wish to overwrite them? ' yn + case $yn in + [Yy]* ) break;; + [Nn]* ) exit 1;; + * ) echo "Please answer yes or no.";; + esac + done + fi + + # unzip the perf.data file in the current working directory and debug symbols in ~/.debug directory + tar xvf $TARGET && tar xvf $PERF_SYMBOLS.tar.bz2 -C ~/.debug + + else # perf tar generated by perf archive (contains only debug symbols) + tar xvf $TARGET -C ~/.debug + fi + exit 0 +fi + # # PERF_BUILDID_DIR environment variable set by perf # path to buildid directory, default to $HOME/.debug @@ -55,17 +110,12 @@ if [ $ALL -eq 1 ]; then # pack perf.data file together with tar containing tar cjf $PERF_SYMBOLS.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST tar cjf $PERF_ALL-$HOSTNAME-$DATE.tar.bz2 $PERF_DATA $PERF_SYMBOLS.tar.bz2 rm $PERF_SYMBOLS.tar.bz2 $MANIFEST $BUILDIDS || true - - echo -e "Now please run:\n" - echo -e "$ tar xvf $PERF_ALL-$HOSTNAME-$DATE.tar.bz2 && tar xvf $PERF_SYMBOLS.tar.bz2 -C ~/.debug\n" - echo "wherever you need to run 'perf report' on." else # pack only the debug symbols tar cjf $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST rm $MANIFEST $BUILDIDS || true - - echo -e "Now please run:\n" - echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n" - echo "wherever you need to run 'perf report' on." fi +echo -e "Now please run:\n" +echo -e "$ perf archive --unpack\n" +echo "or unpack the tar manually wherever you need to run 'perf report' on." exit 0 From c344675ad267040b1794b0b5d85147a5023c548d Mon Sep 17 00:00:00 2001 From: Ruidong Tian Date: Thu, 14 Dec 2023 20:33:03 +0800 Subject: [PATCH 131/179] perf scripts python arm-cs-trace-disasm.py: Set start vm addr of exectable file to 0 For exectable ELF file, which e_type is ET_EXEC, dso start address is a absolute address other than offset. Just set vm_start to zero when dso start is 0x400000, which means it is a exectable file. Reviewed-by: James Clark Signed-off-by: Ruidong Tian Cc: Adrian Hunter Cc: Al Grant Cc: Alexander Shishkin Cc: Leo Yan Cc: Mathieu Poirier Cc: Mike Leach Cc: Suzuki Poulouse Cc: Tor Jeremiassen Link: https://lore.kernel.org/r/20231214123304.34087-3-tianruidong@linux.alibaba.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/scripts/python/arm-cs-trace-disasm.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py index de58991c78bb..e33aee5c78d7 100755 --- a/tools/perf/scripts/python/arm-cs-trace-disasm.py +++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py @@ -258,8 +258,9 @@ def process_event(param_dict): if (options.objdump_name != None): # It doesn't need to decrease virtual memory offset for disassembly - # for kernel dso, so in this case we set vm_start to zero. - if (dso == "[kernel.kallsyms]"): + # for kernel dso and executable file dso, so in this case we set + # vm_start to zero. + if (dso == "[kernel.kallsyms]" or dso_start == 0x400000): dso_vm_start = 0 else: dso_vm_start = int(dso_start) From 2d98dbb4c9c5b09c8d2411581ab17921f872dbd3 Mon Sep 17 00:00:00 2001 From: Ruidong Tian Date: Thu, 14 Dec 2023 20:33:04 +0800 Subject: [PATCH 132/179] perf scripts python arm-cs-trace-disasm.py: Do not ignore disam first sample arm-cs-trace-disasm ignore disam the first branch sample, For example as follow, the instructions beteween 0x0000ffffae878750 and 0x0000ffffae878754 is lose: ARM CoreSight Trace Data Assembler Dump Event type: branches:uH Sample = { cpu: 0000 addr: 0x0000ffffae878750 phys_addr: 0x0000000000000000 ip: 0x0000000000000000 pid: 4003489 tid: 4003489 period: 1 time: 26765151766034 } Event type: branches:uH Sample = { cpu: 0000 addr: 0x0000000000000000 phys_addr: 0x0000000000000000 ip: 0x0000ffffae878754 pid: 4003489 tid: 4003489 period: 1 time: 26765151766034 } Initialize cpu_data earlier to fix it: ARM CoreSight Trace Data Assembler Dump Event type: branches:uH Sample = { cpu: 0000 addr: 0x0000000000000000 phys_addr: 0x0000000000000000 ip: 0x0000ffffae878754 pid: 4003489 tid: 4003489 period: 1 time: 26765151766034 } 0000000000028740 : (base address is 0x0000ffffae850000) 28750: b13ffc1f cmn x0, #4095 28754: 54000042 b.hs 0x2875c test 4003489/4003489 [0000] 26765.151766034 __GI___ioctl+0x14 /usr/lib64/libc-2.32.so Event type: branches:uH Sample = { cpu: 0000 addr: 0x0000ffffa67535ac phys_addr: 0x0000000000000000 ip: 0x0000000000000000 pid: 4003489 tid: 4003489 period: 1 time: 26765151766034 } Reviewed-by: James Clark Signed-off-by: Ruidong Tian Cc: Adrian Hunter Cc: Al Grant Cc: Alexander Shishkin Cc: Leo Yan Cc: Mathieu Poirier Cc: Mike Leach Cc: Suzuki Poulouse Cc: Tor Jeremiassen Link: https://lore.kernel.org/r/20231214123304.34087-4-tianruidong@linux.alibaba.com Signed-off-by: Arnaldo Carvalho de Melo --- .../scripts/python/arm-cs-trace-disasm.py | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/tools/perf/scripts/python/arm-cs-trace-disasm.py b/tools/perf/scripts/python/arm-cs-trace-disasm.py index e33aee5c78d7..d973c2baed1c 100755 --- a/tools/perf/scripts/python/arm-cs-trace-disasm.py +++ b/tools/perf/scripts/python/arm-cs-trace-disasm.py @@ -188,6 +188,17 @@ def process_event(param_dict): dso_end = get_optional(param_dict, "dso_map_end") symbol = get_optional(param_dict, "symbol") + cpu = sample["cpu"] + ip = sample["ip"] + addr = sample["addr"] + + # Initialize CPU data if it's empty, and directly return back + # if this is the first tracing event for this CPU. + if (cpu_data.get(str(cpu) + 'addr') == None): + cpu_data[str(cpu) + 'addr'] = addr + return + + if (options.verbose == True): print("Event type: %s" % name) print_sample(sample) @@ -209,16 +220,6 @@ def process_event(param_dict): if (name[0:8] != "branches"): return - cpu = sample["cpu"] - ip = sample["ip"] - addr = sample["addr"] - - # Initialize CPU data if it's empty, and directly return back - # if this is the first tracing event for this CPU. - if (cpu_data.get(str(cpu) + 'addr') == None): - cpu_data[str(cpu) + 'addr'] = addr - return - # The format for packet is: # # +------------+------------+------------+ From 16f533ade706d33e60324ff32e526bda20bccbd9 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:45 -0800 Subject: [PATCH 133/179] perf unwind: Use function to add missing maps lock Switch read_unwind_spec_eh_frame() from loop macro maps__for_each_entry() to maps__for_each_map() function that takes a callback. The function holds the maps lock, which should be held during iteration. Committer notes: Fixed up conflict with: 4fb54994b2360ab5 ("perf unwind-libunwind: Fix base address for .eh_frame") Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: German Gomez Cc: James Clark Cc: Jiri Olsa Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Steinar H. Gunderson Cc: Wenyu Liu Cc: Yang Jihong Cc: changbin du Cc: colin ian king Cc: dmitrii dolgov <9erthalion6@gmail.com> Cc: guilherme amadio Cc: huacai chen Cc: k prateek nayak Cc: li dong Cc: liam howlett Cc: miguel ojeda Cc: ming wang Cc: sean christopherson Cc: vincent whitchurch Link: http://lore.kernel.org/lkml/20231207011722.1220634-12-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/unwind-libunwind-local.c | 34 +++++++++++++++++------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index 5e5c3395a499..dac536e28360 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c @@ -302,12 +302,31 @@ static int unwind_spec_ehframe(struct dso *dso, struct machine *machine, return 0; } +struct read_unwind_spec_eh_frame_maps_cb_args { + struct dso *dso; + u64 base_addr; +}; + +static int read_unwind_spec_eh_frame_maps_cb(struct map *map, void *data) +{ + + struct read_unwind_spec_eh_frame_maps_cb_args *args = data; + + if (map__dso(map) == args->dso && map__start(map) - map__pgoff(map) < args->base_addr) + args->base_addr = map__start(map) - map__pgoff(map); + + return 0; +} + + static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui, u64 *table_data, u64 *segbase, u64 *fde_count) { - struct map_rb_node *map_node; - u64 base_addr = UINT64_MAX; + struct read_unwind_spec_eh_frame_maps_cb_args args = { + .dso = dso, + .base_addr = UINT64_MAX, + }; int ret, fd; if (dso->data.eh_frame_hdr_offset == 0) { @@ -325,16 +344,11 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct unwind_info *ui, return -EINVAL; } - maps__for_each_entry(thread__maps(ui->thread), map_node) { - struct map *map = map_node->map; - u64 start = map__start(map) - map__pgoff(map); + maps__for_each_map(thread__maps(ui->thread), read_unwind_spec_eh_frame_maps_cb, &args); - if (map__dso(map) == dso && start < base_addr) - base_addr = start; - } - base_addr -= dso->data.elf_base_addr; + args.base_addr -= dso->data.elf_base_addr; /* Address of .eh_frame_hdr */ - *segbase = base_addr + dso->data.eh_frame_hdr_addr; + *segbase = args.base_addr + dso->data.eh_frame_hdr_addr; ret = unwind_spec_ehframe(dso, ui->machine, dso->data.eh_frame_hdr_offset, table_data, fde_count); if (ret) From 51ab715e2bf037cd0525494b7ed496c46e4013c6 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:46 -0800 Subject: [PATCH 134/179] perf vdso: Use function to add missing maps lock Switch machine__thread_dso_type() from loop macro maps__for_each_entry() to maps__for_each_map() function that takes a callback. The function holds the maps lock, which should be held during iteration. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-13-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/vdso.c | 35 +++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/tools/perf/util/vdso.c b/tools/perf/util/vdso.c index ae3eee69b659..df8963796187 100644 --- a/tools/perf/util/vdso.c +++ b/tools/perf/util/vdso.c @@ -140,23 +140,34 @@ static struct dso *__machine__addnew_vdso(struct machine *machine, const char *s return dso; } +struct machine__thread_dso_type_maps_cb_args { + struct machine *machine; + enum dso_type dso_type; +}; + +static int machine__thread_dso_type_maps_cb(struct map *map, void *data) +{ + struct machine__thread_dso_type_maps_cb_args *args = data; + struct dso *dso = map__dso(map); + + if (!dso || dso->long_name[0] != '/') + return 0; + + args->dso_type = dso__type(dso, args->machine); + return (args->dso_type != DSO__TYPE_UNKNOWN) ? 1 : 0; +} + static enum dso_type machine__thread_dso_type(struct machine *machine, struct thread *thread) { - enum dso_type dso_type = DSO__TYPE_UNKNOWN; - struct map_rb_node *rb_node; + struct machine__thread_dso_type_maps_cb_args args = { + .machine = machine, + .dso_type = DSO__TYPE_UNKNOWN, + }; - maps__for_each_entry(thread__maps(thread), rb_node) { - struct dso *dso = map__dso(rb_node->map); + maps__for_each_map(thread__maps(thread), machine__thread_dso_type_maps_cb, &args); - if (!dso || dso->long_name[0] != '/') - continue; - dso_type = dso__type(dso, machine); - if (dso_type != DSO__TYPE_UNKNOWN) - break; - } - - return dso_type; + return args.dso_type; } #if BITS_PER_LONG == 64 From 9cce3a161e17b5c1d50de75e85c1aeb7452d17e6 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:47 -0800 Subject: [PATCH 135/179] perf maps: Reduce scope of maps__for_each_entry() Reduce scope of maps__for_each_entry() as maps__for_each_map() is a safer alternative holding the maps lock during iteration. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-14-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/maps.c | 3 +++ tools/perf/util/maps.h | 3 --- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 160a6dce54bb..00e6589bba10 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -10,6 +10,9 @@ #include "ui/ui.h" #include "unwind.h" +#define maps__for_each_entry(maps, map) \ + for (map = maps__first(maps); map; map = map_rb_node__next(map)) + static void maps__init(struct maps *maps, struct machine *machine) { refcount_set(maps__refcnt(maps), 1); diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index 14ad95979257..8ac30cdaf5bd 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -36,9 +36,6 @@ struct map_rb_node *map_rb_node__next(struct map_rb_node *node); struct map_rb_node *maps__find_node(struct maps *maps, struct map *map); struct map *maps__find(struct maps *maps, u64 addr); -#define maps__for_each_entry(maps, map) \ - for (map = maps__first(maps); map; map = map_rb_node__next(map)) - #define maps__for_each_entry_safe(maps, map, next) \ for (map = maps__first(maps), next = map_rb_node__next(map); map; \ map = next, next = map_rb_node__next(map)) From 8d5847a61723b4dbb3da3b356925c4928ed14de2 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:48 -0800 Subject: [PATCH 136/179] perf maps: Add remove maps function to remove a map based on callback Removing maps wasn't being done under the write lock. Similar to maps__for_each_map(), iterate the entries but in this case remove the entry based on the result of the callback. If an entry was removed then maps_by_name() also needs updating, so add missed flush. In dso__load_kcore(), the test of map to save would always be false with REFCNT_CHECKING because of a missing RC_CHK_ACCESS/RC_CHK_EQUAL. Signed-off-by: Ian Rogers Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-15-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/maps.c | 24 ++++++++++++++++++++++++ tools/perf/util/maps.h | 6 ++---- tools/perf/util/symbol.c | 24 ++++++++++++------------ 3 files changed, 38 insertions(+), 16 deletions(-) diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 00e6589bba10..f13fd3a9686b 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -13,6 +13,10 @@ #define maps__for_each_entry(maps, map) \ for (map = maps__first(maps); map; map = map_rb_node__next(map)) +#define maps__for_each_entry_safe(maps, map, next) \ + for (map = maps__first(maps), next = map_rb_node__next(map); map; \ + map = next, next = map_rb_node__next(map)) + static void maps__init(struct maps *maps, struct machine *machine) { refcount_set(maps__refcnt(maps), 1); @@ -214,6 +218,26 @@ int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data) return ret; } +void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data) +{ + struct map_rb_node *pos, *next; + unsigned int start_nr_maps; + + down_write(maps__lock(maps)); + + start_nr_maps = maps__nr_maps(maps); + maps__for_each_entry_safe(maps, pos, next) { + if (cb(pos->map, data)) { + __maps__remove(maps, pos); + --RC_CHK_ACCESS(maps)->nr_maps; + } + } + if (maps__maps_by_name(maps) && start_nr_maps != maps__nr_maps(maps)) + __maps__free_maps_by_name(maps); + + up_write(maps__lock(maps)); +} + struct symbol *maps__find_symbol(struct maps *maps, u64 addr, struct map **mapp) { struct map *map = maps__find(maps, addr); diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index 8ac30cdaf5bd..b94ad5c8fea7 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -36,10 +36,6 @@ struct map_rb_node *map_rb_node__next(struct map_rb_node *node); struct map_rb_node *maps__find_node(struct maps *maps, struct map *map); struct map *maps__find(struct maps *maps, u64 addr); -#define maps__for_each_entry_safe(maps, map, next) \ - for (map = maps__first(maps), next = map_rb_node__next(map); map; \ - map = next, next = map_rb_node__next(map)) - DECLARE_RC_STRUCT(maps) { struct rb_root entries; struct rw_semaphore lock; @@ -80,6 +76,8 @@ static inline void __maps__zput(struct maps **map) /* Iterate over map calling cb for each entry. */ int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data); +/* Iterate over map removing an entry if cb returns true. */ +void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data); static inline struct rb_root *maps__entries(struct maps *maps) { diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 72f03b875478..be212ba157dc 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1239,13 +1239,23 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) return 0; } +static bool remove_old_maps(struct map *map, void *data) +{ + const struct map *map_to_save = data; + + /* + * We need to preserve eBPF maps even if they are covered by kcore, + * because we need to access eBPF dso for source data. + */ + return !RC_CHK_EQUAL(map, map_to_save) && !__map__is_bpf_prog(map); +} + static int dso__load_kcore(struct dso *dso, struct map *map, const char *kallsyms_filename) { struct maps *kmaps = map__kmaps(map); struct kcore_mapfn_data md; struct map *replacement_map = NULL; - struct map_rb_node *old_node, *next; struct machine *machine; bool is_64_bit; int err, fd; @@ -1292,17 +1302,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map, } /* Remove old maps */ - maps__for_each_entry_safe(kmaps, old_node, next) { - struct map *old_map = old_node->map; - - /* - * We need to preserve eBPF maps even if they are - * covered by kcore, because we need to access - * eBPF dso for source data. - */ - if (old_map != map && !__map__is_bpf_prog(old_map)) - maps__remove(kmaps, old_map); - } + maps__remove_maps(kmaps, remove_old_maps, map); machine->trampolines_mapped = false; /* Find the kernel map using the '_stext' symbol */ From ec49230cf6dda70437bf878281566dd82af9affa Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:49 -0800 Subject: [PATCH 137/179] perf debug: Expose debug file Some dumping call backs need to be passed a FILE*. Expose debug file via an accessor API for a consistent way to do this. Catch the unlikely failure of it not being set. Switch two cases where stderr was being used instead of debug_file. Signed-off-by: Ian Rogers Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-16-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/debug.c | 22 +++++++++++++++------- tools/perf/util/debug.h | 1 + 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index 88378c4c5dd9..e282b4ceb4d2 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -38,12 +38,21 @@ bool dump_trace = false, quiet = false; int debug_ordered_events; static int redirect_to_stderr; int debug_data_convert; -static FILE *debug_file; +static FILE *_debug_file; bool debug_display_time; +FILE *debug_file(void) +{ + if (!_debug_file) { + pr_warning_once("debug_file not set"); + debug_set_file(stderr); + } + return _debug_file; +} + void debug_set_file(FILE *file) { - debug_file = file; + _debug_file = file; } void debug_set_display_time(bool set) @@ -78,8 +87,8 @@ int veprintf(int level, int var, const char *fmt, va_list args) if (use_browser >= 1 && !redirect_to_stderr) { ui_helpline__vshow(fmt, args); } else { - ret = fprintf_time(debug_file); - ret += vfprintf(debug_file, fmt, args); + ret = fprintf_time(debug_file()); + ret += vfprintf(debug_file(), fmt, args); } } @@ -107,9 +116,8 @@ static int veprintf_time(u64 t, const char *fmt, va_list args) nsecs -= secs * NSEC_PER_SEC; usecs = nsecs / NSEC_PER_USEC; - ret = fprintf(stderr, "[%13" PRIu64 ".%06" PRIu64 "] ", - secs, usecs); - ret += vfprintf(stderr, fmt, args); + ret = fprintf(debug_file(), "[%13" PRIu64 ".%06" PRIu64 "] ", secs, usecs); + ret += vfprintf(debug_file(), fmt, args); return ret; } diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h index f99468a7f681..de8870980d44 100644 --- a/tools/perf/util/debug.h +++ b/tools/perf/util/debug.h @@ -77,6 +77,7 @@ int eprintf_time(int level, int var, u64 t, const char *fmt, ...) __printf(4, 5) int veprintf(int level, int var, const char *fmt, va_list args); int perf_debug_option(const char *str); +FILE *debug_file(void); void debug_set_file(FILE *file); void debug_set_display_time(bool set); void perf_debug_setup(void); From 07ef14d50cf1af1caa49cd183216a517e75e8a93 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:50 -0800 Subject: [PATCH 138/179] perf maps: Refactor maps__fixup_overlappings() Rename to maps__fixup_overlap_and_insert() as the given mapping is always inserted. Factor out first_ending_after() as a utility function. Minor variable name changes. Switch to using debug_file() rather than passing a debug FILE*. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-17-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/maps.c | 57 ++++++++++++++++++++++++---------------- tools/perf/util/maps.h | 2 +- tools/perf/util/thread.c | 3 +-- 3 files changed, 37 insertions(+), 25 deletions(-) diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index f13fd3a9686b..94a97923527d 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -334,20 +334,16 @@ size_t maps__fprintf(struct maps *maps, FILE *fp) return args.printed; } -int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) +/* + * Find first map where end > map->start. + * Same as find_vma() in kernel. + */ +static struct rb_node *first_ending_after(struct maps *maps, const struct map *map) { struct rb_root *root; struct rb_node *next, *first; - int err = 0; - - down_write(maps__lock(maps)); root = maps__entries(maps); - - /* - * Find first map where end > map->start. - * Same as find_vma() in kernel. - */ next = root->rb_node; first = NULL; while (next) { @@ -361,8 +357,23 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) } else next = next->rb_right; } + return first; +} - next = first; +/* + * Adds new to maps, if new overlaps existing entries then the existing maps are + * adjusted or removed so that new fits without overlapping any entries. + */ +int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) +{ + + struct rb_node *next; + int err = 0; + FILE *fp = debug_file(); + + down_write(maps__lock(maps)); + + next = first_ending_after(maps, new); while (next && !err) { struct map_rb_node *pos = rb_entry(next, struct map_rb_node, rb_node); next = rb_next(&pos->rb_node); @@ -371,27 +382,27 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) * Stop if current map starts after map->end. * Maps are ordered by start: next will not overlap for sure. */ - if (map__start(pos->map) >= map__end(map)) + if (map__start(pos->map) >= map__end(new)) break; if (verbose >= 2) { if (use_browser) { pr_debug("overlapping maps in %s (disable tui for more info)\n", - map__dso(map)->name); + map__dso(new)->name); } else { - fputs("overlapping maps:\n", fp); - map__fprintf(map, fp); + pr_debug("overlapping maps:\n"); + map__fprintf(new, fp); map__fprintf(pos->map, fp); } } - rb_erase_init(&pos->rb_node, root); + rb_erase_init(&pos->rb_node, maps__entries(maps)); /* * Now check if we need to create new maps for areas not * overlapped by the new map: */ - if (map__start(map) > map__start(pos->map)) { + if (map__start(new) > map__start(pos->map)) { struct map *before = map__clone(pos->map); if (before == NULL) { @@ -399,7 +410,7 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) goto put_map; } - map__set_end(before, map__start(map)); + map__set_end(before, map__start(new)); err = __maps__insert(maps, before); if (err) { map__put(before); @@ -411,7 +422,7 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) map__put(before); } - if (map__end(map) < map__end(pos->map)) { + if (map__end(new) < map__end(pos->map)) { struct map *after = map__clone(pos->map); if (after == NULL) { @@ -419,10 +430,10 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) goto put_map; } - map__set_start(after, map__end(map)); - map__add_pgoff(after, map__end(map) - map__start(pos->map)); - assert(map__map_ip(pos->map, map__end(map)) == - map__map_ip(after, map__end(map))); + map__set_start(after, map__end(new)); + map__add_pgoff(after, map__end(new) - map__start(pos->map)); + assert(map__map_ip(pos->map, map__end(new)) == + map__map_ip(after, map__end(new))); err = __maps__insert(maps, after); if (err) { map__put(after); @@ -436,6 +447,8 @@ int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) map__put(pos->map); free(pos); } + /* Add the map. */ + err = __maps__insert(maps, new); up_write(maps__lock(maps)); return err; } diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index b94ad5c8fea7..62e94d443c02 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -133,7 +133,7 @@ struct addr_map_symbol; int maps__find_ams(struct maps *maps, struct addr_map_symbol *ams); -int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp); +int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new); struct map *maps__find_by_name(struct maps *maps, const char *name); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index b6986a81aa6d..3d47b5c5528b 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -345,8 +345,7 @@ int thread__insert_map(struct thread *thread, struct map *map) if (ret) return ret; - maps__fixup_overlappings(thread__maps(thread), map, stderr); - return maps__insert(thread__maps(thread), map); + return maps__fixup_overlap_and_insert(thread__maps(thread), map); } struct thread__prepare_access_maps_cb_args { From 980d7927213a57c9a31ff4ea685eb93fbe4b31ab Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:51 -0800 Subject: [PATCH 139/179] perf maps: Do simple merge if given map doesn't overlap Simplify merge in for the simple case of a non-overlapping map. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-18-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/maps.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 94a97923527d..f305a4834cf0 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -697,9 +697,20 @@ void maps__fixup_end(struct maps *maps) int maps__merge_in(struct maps *kmaps, struct map *new_map) { struct map_rb_node *rb_node; + struct rb_node *first; + bool overlaps; LIST_HEAD(merged); int err = 0; + down_read(maps__lock(kmaps)); + first = first_ending_after(kmaps, new_map); + rb_node = first ? rb_entry(first, struct map_rb_node, rb_node) : NULL; + overlaps = rb_node && map__start(rb_node->map) < map__end(new_map); + up_read(maps__lock(kmaps)); + + if (!overlaps) + return maps__insert(kmaps, new_map); + maps__for_each_entry(kmaps, rb_node) { struct map *old_map = rb_node->map; From 9084952704ba075de28684301ec282b6626b5e7a Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:52 -0800 Subject: [PATCH 140/179] perf maps: Rename clone to copy from Rename maps__clone() to maps__copy_from() to be more intention revealing of its behavior. Pass the underlying maps rather than the thread. Signed-off-by: Ian Rogers Acked-by: Namhyung Kim Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-19-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 2 +- tools/perf/util/maps.c | 6 +----- tools/perf/util/maps.h | 3 +-- tools/perf/util/thread.c | 2 +- 4 files changed, 4 insertions(+), 9 deletions(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index ca855fc435ac..38bf7108821d 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -453,7 +453,7 @@ static struct thread *findnew_guest_code(struct machine *machine, * Guest code can be found in hypervisor process at the same address * so copy host maps. */ - err = maps__clone(thread, thread__maps(host_thread)); + err = maps__copy_from(thread__maps(thread), thread__maps(host_thread)); thread__put(host_thread); if (err) goto out_err; diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index f305a4834cf0..986daa1b0497 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -453,12 +453,8 @@ int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new) return err; } -/* - * XXX This should not really _copy_ te maps, but refcount them. - */ -int maps__clone(struct thread *thread, struct maps *parent) +int maps__copy_from(struct maps *maps, struct maps *parent) { - struct maps *maps = thread__maps(thread); int err; struct map_rb_node *rb_node; diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index 62e94d443c02..e4a49d6ff5cf 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -14,7 +14,6 @@ struct ref_reloc_sym; struct machine; struct map; struct maps; -struct thread; struct map_rb_node { struct rb_node rb_node; @@ -61,7 +60,7 @@ struct kmap { struct maps *maps__new(struct machine *machine); bool maps__empty(struct maps *maps); -int maps__clone(struct thread *thread, struct maps *parent); +int maps__copy_from(struct maps *maps, struct maps *parent); struct maps *maps__get(struct maps *maps); void maps__put(struct maps *maps); diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 3d47b5c5528b..89c47a5098e2 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -390,7 +390,7 @@ static int thread__clone_maps(struct thread *thread, struct thread *parent, bool return 0; } /* But this one is new process, copy maps. */ - return do_maps_clone ? maps__clone(thread, thread__maps(parent)) : 0; + return do_maps_clone ? maps__copy_from(thread__maps(thread), thread__maps(parent)) : 0; } int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone) From e77b0236cd0cd1572c6a9b25097b207eab799e74 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:53 -0800 Subject: [PATCH 141/179] perf maps: Add maps__load_first() Avoid bpf_lock_contention_read touching the internal maps data structure by adding a helper function. As access is done directly on the map in maps, hold the read lock to stop it being removed. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-20-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/bpf_lock_contention.c | 2 +- tools/perf/util/maps.c | 13 +++++++++++++ tools/perf/util/maps.h | 2 ++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c index f1716c089c99..31ff19afc20c 100644 --- a/tools/perf/util/bpf_lock_contention.c +++ b/tools/perf/util/bpf_lock_contention.c @@ -318,7 +318,7 @@ int lock_contention_read(struct lock_contention *con) } /* make sure it loads the kernel map */ - map__load(maps__first(machine->kmaps)->map); + maps__load_first(machine->kmaps); prev_key = NULL; while (!bpf_map_get_next_key(fd, prev_key, &key)) { diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 986daa1b0497..024a6c9f72c4 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -793,3 +793,16 @@ int maps__merge_in(struct maps *kmaps, struct map *new_map) } return err; } + +void maps__load_first(struct maps *maps) +{ + struct map_rb_node *first; + + down_read(maps__lock(maps)); + + first = maps__first(maps); + if (first) + map__load(first->map); + + up_read(maps__lock(maps)); +} diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index e4a49d6ff5cf..b7ab3ec61b7c 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -142,4 +142,6 @@ void __maps__sort_by_name(struct maps *maps); void maps__fixup_end(struct maps *maps); +void maps__load_first(struct maps *maps); + #endif // __PERF_MAPS_H From 75858007d101cf38e9a79d682d0361bb6493d7cc Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:54 -0800 Subject: [PATCH 142/179] perf maps: Add find next entry to give entry after the given map Use to remove map_rb_node use from machine.c. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-21-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/machine.c | 7 +++---- tools/perf/util/maps.c | 11 +++++++++++ tools/perf/util/maps.h | 2 ++ 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 38bf7108821d..b397a769006f 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1762,12 +1762,11 @@ int machine__create_kernel_maps(struct machine *machine) if (end == ~0ULL) { /* update end address of the kernel map using adjacent module address */ - struct map_rb_node *rb_node = maps__find_node(machine__kernel_maps(machine), - machine__kernel_map(machine)); - struct map_rb_node *next = map_rb_node__next(rb_node); + struct map *next = maps__find_next_entry(machine__kernel_maps(machine), + machine__kernel_map(machine)); if (next) - machine__set_kernel_mmap(machine, start, map__start(next->map)); + machine__set_kernel_mmap(machine, start, map__start(next)); } out_put: diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 024a6c9f72c4..5b898a0e97b2 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -663,6 +663,17 @@ struct map *maps__find_by_name(struct maps *maps, const char *name) return map; } +struct map *maps__find_next_entry(struct maps *maps, struct map *map) +{ + struct map_rb_node *rb_node = maps__find_node(maps, map); + struct map_rb_node *next = map_rb_node__next(rb_node); + + if (next) + return next->map; + + return NULL; +} + void maps__fixup_end(struct maps *maps) { struct map_rb_node *prev = NULL, *curr; diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index b7ab3ec61b7c..84b42c8456e8 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -136,6 +136,8 @@ int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new); struct map *maps__find_by_name(struct maps *maps, const char *name); +struct map *maps__find_next_entry(struct maps *maps, struct map *map); + int maps__merge_in(struct maps *kmaps, struct map *new_map); void __maps__sort_by_name(struct maps *maps); From 631bb236aa6f306fd2ba16aee9ae96083453eebc Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:55 -0800 Subject: [PATCH 143/179] perf maps: Reduce scope of map_rb_node and maps internals Avoid exposing the implementation of maps so that the internals can be refactored. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-22-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/maps.c | 90 ++++++++++++++++++++++++++---------------- tools/perf/util/maps.h | 23 ----------- 2 files changed, 55 insertions(+), 58 deletions(-) diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index 5b898a0e97b2..dcd67384d877 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -10,6 +10,11 @@ #include "ui/ui.h" #include "unwind.h" +struct map_rb_node { + struct rb_node rb_node; + struct map *map; +}; + #define maps__for_each_entry(maps, map) \ for (map = maps__first(maps); map; map = map_rb_node__next(map)) @@ -17,6 +22,56 @@ for (map = maps__first(maps), next = map_rb_node__next(map); map; \ map = next, next = map_rb_node__next(map)) +static struct rb_root *maps__entries(struct maps *maps) +{ + return &RC_CHK_ACCESS(maps)->entries; +} + +static struct rw_semaphore *maps__lock(struct maps *maps) +{ + return &RC_CHK_ACCESS(maps)->lock; +} + +static struct map **maps__maps_by_name(struct maps *maps) +{ + return RC_CHK_ACCESS(maps)->maps_by_name; +} + +static struct map_rb_node *maps__first(struct maps *maps) +{ + struct rb_node *first = rb_first(maps__entries(maps)); + + if (first) + return rb_entry(first, struct map_rb_node, rb_node); + return NULL; +} + +static struct map_rb_node *map_rb_node__next(struct map_rb_node *node) +{ + struct rb_node *next; + + if (!node) + return NULL; + + next = rb_next(&node->rb_node); + + if (!next) + return NULL; + + return rb_entry(next, struct map_rb_node, rb_node); +} + +static struct map_rb_node *maps__find_node(struct maps *maps, struct map *map) +{ + struct map_rb_node *rb_node; + + maps__for_each_entry(maps, rb_node) { + if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map)) + return rb_node; + } + return NULL; +} + static void maps__init(struct maps *maps, struct machine *machine) { refcount_set(maps__refcnt(maps), 1); @@ -485,17 +540,6 @@ int maps__copy_from(struct maps *maps, struct maps *parent) return err; } -struct map_rb_node *maps__find_node(struct maps *maps, struct map *map) -{ - struct map_rb_node *rb_node; - - maps__for_each_entry(maps, rb_node) { - if (rb_node->RC_CHK_ACCESS(map) == RC_CHK_ACCESS(map)) - return rb_node; - } - return NULL; -} - struct map *maps__find(struct maps *maps, u64 ip) { struct rb_node *p; @@ -521,30 +565,6 @@ struct map *maps__find(struct maps *maps, u64 ip) return m ? m->map : NULL; } -struct map_rb_node *maps__first(struct maps *maps) -{ - struct rb_node *first = rb_first(maps__entries(maps)); - - if (first) - return rb_entry(first, struct map_rb_node, rb_node); - return NULL; -} - -struct map_rb_node *map_rb_node__next(struct map_rb_node *node) -{ - struct rb_node *next; - - if (!node) - return NULL; - - next = rb_next(&node->rb_node); - - if (!next) - return NULL; - - return rb_entry(next, struct map_rb_node, rb_node); -} - static int map__strcmp(const void *a, const void *b) { const struct map *map_a = *(const struct map **)a; diff --git a/tools/perf/util/maps.h b/tools/perf/util/maps.h index 84b42c8456e8..d836d04c9402 100644 --- a/tools/perf/util/maps.h +++ b/tools/perf/util/maps.h @@ -15,11 +15,6 @@ struct machine; struct map; struct maps; -struct map_rb_node { - struct rb_node rb_node; - struct map *map; -}; - struct map_list_node { struct list_head node; struct map *map; @@ -30,9 +25,6 @@ static inline struct map_list_node *map_list_node__new(void) return malloc(sizeof(struct map_list_node)); } -struct map_rb_node *maps__first(struct maps *maps); -struct map_rb_node *map_rb_node__next(struct map_rb_node *node); -struct map_rb_node *maps__find_node(struct maps *maps, struct map *map); struct map *maps__find(struct maps *maps, u64 addr); DECLARE_RC_STRUCT(maps) { @@ -78,26 +70,11 @@ int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data) /* Iterate over map removing an entry if cb returns true. */ void maps__remove_maps(struct maps *maps, bool (*cb)(struct map *map, void *data), void *data); -static inline struct rb_root *maps__entries(struct maps *maps) -{ - return &RC_CHK_ACCESS(maps)->entries; -} - static inline struct machine *maps__machine(struct maps *maps) { return RC_CHK_ACCESS(maps)->machine; } -static inline struct rw_semaphore *maps__lock(struct maps *maps) -{ - return &RC_CHK_ACCESS(maps)->lock; -} - -static inline struct map **maps__maps_by_name(struct maps *maps) -{ - return RC_CHK_ACCESS(maps)->maps_by_name; -} - static inline unsigned int maps__nr_maps(const struct maps *maps) { return RC_CHK_ACCESS(maps)->nr_maps; From 7887097c65446ff229099221975f6fc9ad9e378b Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:16:56 -0800 Subject: [PATCH 144/179] perf maps: Fix up overlaps during fixup_end Maps are sometimes made overlapping, in particular kernel maps. If the end of a map overlaps the start of the next, shorten the overlapping map. This should remove potential non-determinism in maps__find, ie finding maps by address. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Andi Kleen Cc: Athira Rajeev Cc: Changbin Du Cc: Colin Ian King Cc: Dmitrii Dolgov <9erthalion6@gmail.com> Cc: German Gomez Cc: Guilherme Amadio Cc: Huacai Chen Cc: Ingo Molnar Cc: James Clark Cc: Jiri Olsa Cc: K Prateek Nayak Cc: Kajol Jain Cc: Kan Liang Cc: Leo Yan Cc: Li Dong Cc: Liam Howlett Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Miguel Ojeda Cc: Ming Wang Cc: Namhyung Kim Cc: Nick Terrell Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Sean Christopherson Cc: Steinar H. Gunderson Cc: Vincent Whitchurch Cc: Wenyu Liu Cc: Yang Jihong Link: https://lore.kernel.org/r/20231207011722.1220634-23-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/maps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c index dcd67384d877..0334fc18d9c6 100644 --- a/tools/perf/util/maps.c +++ b/tools/perf/util/maps.c @@ -701,7 +701,7 @@ void maps__fixup_end(struct maps *maps) down_write(maps__lock(maps)); maps__for_each_entry(maps, curr) { - if (prev != NULL && !map__end(prev->map)) + if (prev && (!map__end(prev->map) || map__end(prev->map) > map__start(curr->map))) map__set_end(prev->map, map__start(curr->map)); prev = curr; From 457caadce7ab71a54ee2d4f032ee4a55b4a28776 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Thu, 21 Dec 2023 14:03:13 +0800 Subject: [PATCH 145/179] perf vendor events: Remove UTF-8 characters from cmn.json cmn.json contains UTF-8 characters in brief description which could break the perf build on some distros. Fix this issue by removing the UTF-8 characters from cmn.json. without this fix: $find tools/perf/pmu-events/ -name "*.json" | xargs file -i | grep -v us-ascii tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json: application/json; charset=utf-8 with it: $ file -i tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json: text/plain; charset=us-ascii Fixes: 0b4de7bdf46c5215 ("perf jevents: Add support for Arm CMN PMU aliasing") Reported-by: Arnaldo Carvalho de Melo Signed-off-by: Jing Zhang Cc: Adrian Hunter Cc: Heiko Carstens Cc: Ian Rogers Cc: Jing Zhang Cc: Jiri Olsa Cc: Kajol Jain Cc: Namhyung Kim Cc: Sukadev Bhattiprolu Cc: Thomas Richter Link: https://lore.kernel.org/r/1703138593-50486-1-git-send-email-renyu.zj@linux.alibaba.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json index 428605c37d10..5ec157c39f0d 100644 --- a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json +++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json @@ -107,7 +107,7 @@ "EventName": "hnf_qos_hh_retry", "EventidCode": "0xe", "NodeType": "0x5", - "BriefDescription": "Counts number of times a HighHigh priority request is protocolretried at the HN‑F.", + "BriefDescription": "Counts number of times a HighHigh priority request is protocolretried at the HN-F.", "Unit": "arm_cmn", "Compat": "(434|436|43c|43a).*" }, From ac254dfb983deb7840bc7267418d1ae231f5694f Mon Sep 17 00:00:00 2001 From: "JiaLong.Yang" Date: Thu, 21 Dec 2023 14:02:41 +0800 Subject: [PATCH 146/179] perf vendor events powerpc: Add PVN for HX-C2000 CPU with Power8 Architecture HX-C2000 is a new CPU made by HEXIN Technologies Co., Ltd. And a new PVN 0x0066 has been applied from the OpenPower Community for this CPU. Here is a patch to make perf tool run in the CPU. Reviewed-by: Madhavan Srinivasan Signed-off-by: JiaLong.Yang Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: shenghui.qu@shingroup.cn Cc: Zhao Ke Cc: zhijie.ren@shingroup.cn Link: https://lore.kernel.org/r/20231221060242.4532-1-jialong.yang@shingroup.cn Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/powerpc/mapfile.csv | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/perf/pmu-events/arch/powerpc/mapfile.csv b/tools/perf/pmu-events/arch/powerpc/mapfile.csv index f4908af7ad66..599a588dbeb4 100644 --- a/tools/perf/pmu-events/arch/powerpc/mapfile.csv +++ b/tools/perf/pmu-events/arch/powerpc/mapfile.csv @@ -11,8 +11,7 @@ # # Multiple PVRs could map to a single JSON file. # - -# Power8 entries 0x004[bcd][[:xdigit:]]{4},1,power8,core +0x0066[[:xdigit:]]{4},1,power8,core 0x004e[[:xdigit:]]{4},1,power9,core 0x0080[[:xdigit:]]{4},1,power10,core From 60cb19b485a534a896431393a877d853bbe51b67 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:07 -0800 Subject: [PATCH 147/179] perf dwarf-aux: Factor out die_get_typename_from_type() The die_get_typename_from_type() is to get the name of the given DIE in C-style type name. The difference from die_get_typename() is that it does not retrieve the DW_AT_type and use the given DIE directly. This will be used when users know the type DIE already. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-2-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/dwarf-aux.c | 38 ++++++++++++++++++++++++++----------- tools/perf/util/dwarf-aux.h | 3 +++ 2 files changed, 30 insertions(+), 11 deletions(-) diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c index edd9e407bc74..7aa5fee0da19 100644 --- a/tools/perf/util/dwarf-aux.c +++ b/tools/perf/util/dwarf-aux.c @@ -1051,32 +1051,28 @@ Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name, } /** - * die_get_typename - Get the name of given variable DIE - * @vr_die: a variable DIE + * die_get_typename_from_type - Get the name of given type DIE + * @type_die: a type DIE * @buf: a strbuf for result type name * - * Get the name of @vr_die and stores it to @buf. Return 0 if succeeded. + * Get the name of @type_die and stores it to @buf. Return 0 if succeeded. * and Return -ENOENT if failed to find type name. * Note that the result will stores typedef name if possible, and stores * "*(function_type)" if the type is a function pointer. */ -int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf) +int die_get_typename_from_type(Dwarf_Die *type_die, struct strbuf *buf) { - Dwarf_Die type; int tag, ret; const char *tmp = ""; - if (__die_get_real_type(vr_die, &type) == NULL) - return -ENOENT; - - tag = dwarf_tag(&type); + tag = dwarf_tag(type_die); if (tag == DW_TAG_array_type || tag == DW_TAG_pointer_type) tmp = "*"; else if (tag == DW_TAG_subroutine_type) { /* Function pointer */ return strbuf_add(buf, "(function_type)", 15); } else { - const char *name = dwarf_diename(&type); + const char *name = dwarf_diename(type_die); if (tag == DW_TAG_union_type) tmp = "union "; @@ -1089,7 +1085,7 @@ int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf) /* Write a base name */ return strbuf_addf(buf, "%s%s", tmp, name ?: ""); } - ret = die_get_typename(&type, buf); + ret = die_get_typename(type_die, buf); if (ret < 0) { /* void pointer has no type attribute */ if (tag == DW_TAG_pointer_type && ret == -ENOENT) @@ -1100,6 +1096,26 @@ int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf) return strbuf_addstr(buf, tmp); } +/** + * die_get_typename - Get the name of given variable DIE + * @vr_die: a variable DIE + * @buf: a strbuf for result type name + * + * Get the name of @vr_die and stores it to @buf. Return 0 if succeeded. + * and Return -ENOENT if failed to find type name. + * Note that the result will stores typedef name if possible, and stores + * "*(function_type)" if the type is a function pointer. + */ +int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf) +{ + Dwarf_Die type; + + if (__die_get_real_type(vr_die, &type) == NULL) + return -ENOENT; + + return die_get_typename_from_type(&type, buf); +} + /** * die_get_varname - Get the name and type of given variable DIE * @vr_die: a variable DIE diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h index 0ddf61fd3f8b..4e64caac6df8 100644 --- a/tools/perf/util/dwarf-aux.h +++ b/tools/perf/util/dwarf-aux.h @@ -116,6 +116,9 @@ Dwarf_Die *die_find_variable_at(Dwarf_Die *sp_die, const char *name, Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name, Dwarf_Die *die_mem); +/* Get the name of given type DIE */ +int die_get_typename_from_type(Dwarf_Die *type_die, struct strbuf *buf); + /* Get the name of given variable DIE */ int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf); From 3eee606757ad82f32332a2da174aa032bfd9cc32 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:08 -0800 Subject: [PATCH 148/179] perf dwarf-regs: Add get_dwarf_regnum() The get_dwarf_regnum() returns a DWARF register number from a register name string according to the psABI. Also add two pseudo encodings of DWARF_REG_PC which is a register that are used by PC-relative addressing and DWARF_REG_FB which is a frame base register. They need to be handled in a special way. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-3-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/x86/util/dwarf-regs.c | 38 +++++++++++++++++++++++++++ tools/perf/util/dwarf-regs.c | 34 ++++++++++++++++++++++++ tools/perf/util/include/dwarf-regs.h | 19 ++++++++++++++ 3 files changed, 91 insertions(+) diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c index 530934805710..399c4a0a29d8 100644 --- a/tools/perf/arch/x86/util/dwarf-regs.c +++ b/tools/perf/arch/x86/util/dwarf-regs.c @@ -113,3 +113,41 @@ int regs_query_register_offset(const char *name) return roff->offset; return -EINVAL; } + +struct dwarf_regs_idx { + const char *name; + int idx; +}; + +static const struct dwarf_regs_idx x86_regidx_table[] = { + { "rax", 0 }, { "eax", 0 }, { "ax", 0 }, { "al", 0 }, + { "rdx", 1 }, { "edx", 1 }, { "dx", 1 }, { "dl", 1 }, + { "rcx", 2 }, { "ecx", 2 }, { "cx", 2 }, { "cl", 2 }, + { "rbx", 3 }, { "edx", 3 }, { "bx", 3 }, { "bl", 3 }, + { "rsi", 4 }, { "esi", 4 }, { "si", 4 }, { "sil", 4 }, + { "rdi", 5 }, { "edi", 5 }, { "di", 5 }, { "dil", 5 }, + { "rbp", 6 }, { "ebp", 6 }, { "bp", 6 }, { "bpl", 6 }, + { "rsp", 7 }, { "esp", 7 }, { "sp", 7 }, { "spl", 7 }, + { "r8", 8 }, { "r8d", 8 }, { "r8w", 8 }, { "r8b", 8 }, + { "r9", 9 }, { "r9d", 9 }, { "r9w", 9 }, { "r9b", 9 }, + { "r10", 10 }, { "r10d", 10 }, { "r10w", 10 }, { "r10b", 10 }, + { "r11", 11 }, { "r11d", 11 }, { "r11w", 11 }, { "r11b", 11 }, + { "r12", 12 }, { "r12d", 12 }, { "r12w", 12 }, { "r12b", 12 }, + { "r13", 13 }, { "r13d", 13 }, { "r13w", 13 }, { "r13b", 13 }, + { "r14", 14 }, { "r14d", 14 }, { "r14w", 14 }, { "r14b", 14 }, + { "r15", 15 }, { "r15d", 15 }, { "r15w", 15 }, { "r15b", 15 }, + { "rip", DWARF_REG_PC }, +}; + +int get_arch_regnum(const char *name) +{ + unsigned int i; + + if (*name != '%') + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(x86_regidx_table); i++) + if (!strcmp(x86_regidx_table[i].name, name + 1)) + return x86_regidx_table[i].idx; + return -ENOENT; +} diff --git a/tools/perf/util/dwarf-regs.c b/tools/perf/util/dwarf-regs.c index 69cfaa5953bf..5b7f86c0063f 100644 --- a/tools/perf/util/dwarf-regs.c +++ b/tools/perf/util/dwarf-regs.c @@ -5,9 +5,12 @@ * Written by: Masami Hiramatsu */ +#include +#include #include #include #include +#include #include #ifndef EM_AARCH64 @@ -68,3 +71,34 @@ const char *get_dwarf_regstr(unsigned int n, unsigned int machine) } return NULL; } + +__weak int get_arch_regnum(const char *name __maybe_unused) +{ + return -ENOTSUP; +} + +/* Return DWARF register number from architecture register name */ +int get_dwarf_regnum(const char *name, unsigned int machine) +{ + char *regname = strdup(name); + int reg = -1; + char *p; + + if (regname == NULL) + return -EINVAL; + + /* For convenience, remove trailing characters */ + p = strpbrk(regname, " ,)"); + if (p) + *p = '\0'; + + switch (machine) { + case EM_NONE: /* Generic arch - use host arch */ + reg = get_arch_regnum(regname); + break; + default: + pr_err("ELF MACHINE %x is not supported.\n", machine); + } + free(regname); + return reg; +} diff --git a/tools/perf/util/include/dwarf-regs.h b/tools/perf/util/include/dwarf-regs.h index 7d99a084e82d..01fb25a1150a 100644 --- a/tools/perf/util/include/dwarf-regs.h +++ b/tools/perf/util/include/dwarf-regs.h @@ -2,6 +2,9 @@ #ifndef _PERF_DWARF_REGS_H_ #define _PERF_DWARF_REGS_H_ +#define DWARF_REG_PC 0xd3af9c /* random number */ +#define DWARF_REG_FB 0xd3affb /* random number */ + #ifdef HAVE_DWARF_SUPPORT const char *get_arch_regstr(unsigned int n); /* @@ -10,6 +13,22 @@ const char *get_arch_regstr(unsigned int n); * machine: ELF machine signature (EM_*) */ const char *get_dwarf_regstr(unsigned int n, unsigned int machine); + +int get_arch_regnum(const char *name); +/* + * get_dwarf_regnum - Returns DWARF regnum from register name + * name: architecture register name + * machine: ELF machine signature (EM_*) + */ +int get_dwarf_regnum(const char *name, unsigned int machine); + +#else /* HAVE_DWARF_SUPPORT */ + +static inline int get_dwarf_regnum(const char *name __maybe_unused, + unsigned int machine __maybe_unused) +{ + return -1; +} #endif #ifdef HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET From b9c87f536c6f28c75ace8a014646faad00f0e1ec Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:09 -0800 Subject: [PATCH 149/179] perf annotate-data: Add find_data_type() to get type from memory access The find_data_type() is to get a data type from the memory access at the given address (IP) using a register and an offset. It requires DWARF debug info in the DSO and searches the list of variables and function parameters in the scope. In a pseudo code, it does basically the following: find_data_type(dso, ip, reg, offset) { pc = map__rip_2objdump(ip); CU = dwarf_addrdie(dso->dwarf, pc); scopes = die_get_scopes(CU, pc); for_each_scope(S, scopes) { V = die_find_variable_by_reg(S, pc, reg); if (V && V.type == pointer_type) { T = die_get_real_type(V); if (offset < T.size) return T; } } return NULL; } Committer notes: The 'size' variable in check_variable() is 64-bit, so use PRIu64 and inttypes.h to debug it. Ditto at find_data_type_die(). Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-4-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/Build | 1 + tools/perf/util/annotate-data.c | 164 ++++++++++++++++++++++++++++++++ tools/perf/util/annotate-data.h | 40 ++++++++ 3 files changed, 205 insertions(+) create mode 100644 tools/perf/util/annotate-data.c create mode 100644 tools/perf/util/annotate-data.h diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 132508ebe125..8027f450fa3e 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -196,6 +196,7 @@ perf-$(CONFIG_DWARF) += probe-finder.o perf-$(CONFIG_DWARF) += dwarf-aux.o perf-$(CONFIG_DWARF) += dwarf-regs.o perf-$(CONFIG_DWARF) += debuginfo.o +perf-$(CONFIG_DWARF) += annotate-data.o perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c new file mode 100644 index 000000000000..9739ecc841ee --- /dev/null +++ b/tools/perf/util/annotate-data.c @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Convert sample address to data type using DWARF debug info. + * + * Written by Namhyung Kim + */ + +#include +#include +#include + +#include "annotate-data.h" +#include "debuginfo.h" +#include "debug.h" +#include "dso.h" +#include "map.h" +#include "map_symbol.h" +#include "strbuf.h" +#include "symbol.h" + +static bool find_cu_die(struct debuginfo *di, u64 pc, Dwarf_Die *cu_die) +{ + Dwarf_Off off, next_off; + size_t header_size; + + if (dwarf_addrdie(di->dbg, pc, cu_die) != NULL) + return cu_die; + + /* + * There are some kernels don't have full aranges and contain only a few + * aranges entries. Fallback to iterate all CU entries in .debug_info + * in case it's missing. + */ + off = 0; + while (dwarf_nextcu(di->dbg, off, &next_off, &header_size, + NULL, NULL, NULL) == 0) { + if (dwarf_offdie(di->dbg, off + header_size, cu_die) && + dwarf_haspc(cu_die, pc)) + return true; + + off = next_off; + } + return false; +} + +/* The type info will be saved in @type_die */ +static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset) +{ + Dwarf_Word size; + + /* Get the type of the variable */ + if (die_get_real_type(var_die, type_die) == NULL) { + pr_debug("variable has no type\n"); + return -1; + } + + /* + * It expects a pointer type for a memory access. + * Convert to a real type it points to. + */ + if (dwarf_tag(type_die) != DW_TAG_pointer_type || + die_get_real_type(type_die, type_die) == NULL) { + pr_debug("no pointer or no type\n"); + return -1; + } + + /* Get the size of the actual type */ + if (dwarf_aggregate_size(type_die, &size) < 0) { + pr_debug("type size is unknown\n"); + return -1; + } + + /* Minimal sanity check */ + if ((unsigned)offset >= size) { + pr_debug("offset: %d is bigger than size: %" PRIu64 "\n", offset, size); + return -1; + } + + return 0; +} + +/* The result will be saved in @type_die */ +static int find_data_type_die(struct debuginfo *di, u64 pc, + int reg, int offset, Dwarf_Die *type_die) +{ + Dwarf_Die cu_die, var_die; + Dwarf_Die *scopes = NULL; + int ret = -1; + int i, nr_scopes; + + /* Get a compile_unit for this address */ + if (!find_cu_die(di, pc, &cu_die)) { + pr_debug("cannot find CU for address %" PRIx64 "\n", pc); + return -1; + } + + /* Get a list of nested scopes - i.e. (inlined) functions and blocks. */ + nr_scopes = die_get_scopes(&cu_die, pc, &scopes); + + /* Search from the inner-most scope to the outer */ + for (i = nr_scopes - 1; i >= 0; i--) { + /* Look up variables/parameters in this scope */ + if (!die_find_variable_by_reg(&scopes[i], pc, reg, &var_die)) + continue; + + /* Found a variable, see if it's correct */ + ret = check_variable(&var_die, type_die, offset); + break; + } + + free(scopes); + return ret; +} + +/** + * find_data_type - Return a data type at the location + * @ms: map and symbol at the location + * @ip: instruction address of the memory access + * @reg: register that holds the base address + * @offset: offset from the base address + * + * This functions searches the debug information of the binary to get the data + * type it accesses. The exact location is expressed by (ip, reg, offset). + * It return %NULL if not found. + */ +struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, + int reg, int offset) +{ + struct annotated_data_type *result = NULL; + struct dso *dso = map__dso(ms->map); + struct debuginfo *di; + Dwarf_Die type_die; + struct strbuf sb; + u64 pc; + + di = debuginfo__new(dso->long_name); + if (di == NULL) { + pr_debug("cannot get the debug info\n"); + return NULL; + } + + /* + * IP is a relative instruction address from the start of the map, as + * it can be randomized/relocated, it needs to translate to PC which is + * a file address for DWARF processing. + */ + pc = map__rip_2objdump(ms->map, ip); + if (find_data_type_die(di, pc, reg, offset, &type_die) < 0) + goto out; + + result = zalloc(sizeof(*result)); + if (result == NULL) + goto out; + + strbuf_init(&sb, 32); + if (die_get_typename_from_type(&type_die, &sb) < 0) + strbuf_add(&sb, "(unknown type)", 14); + + result->type_name = strbuf_detach(&sb, NULL); + +out: + debuginfo__delete(di); + return result; +} diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h new file mode 100644 index 000000000000..633147f78ca5 --- /dev/null +++ b/tools/perf/util/annotate-data.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _PERF_ANNOTATE_DATA_H +#define _PERF_ANNOTATE_DATA_H + +#include +#include +#include + +struct map_symbol; + +/** + * struct annotated_data_type - Data type to profile + * @type_name: Name of the data type + * @type_size: Size of the data type + * + * This represents a data type accessed by samples in the profile data. + */ +struct annotated_data_type { + char *type_name; + int type_size; +}; + +#ifdef HAVE_DWARF_SUPPORT + +/* Returns data type at the location (ip, reg, offset) */ +struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, + int reg, int offset); + +#else /* HAVE_DWARF_SUPPORT */ + +static inline struct annotated_data_type * +find_data_type(struct map_symbol *ms __maybe_unused, u64 ip __maybe_unused, + int reg __maybe_unused, int offset __maybe_unused) +{ + return NULL; +} + +#endif /* HAVE_DWARF_SUPPORT */ + +#endif /* _PERF_ANNOTATE_DATA_H */ From fc044c53b99fad039ac30b95b289992ebf7dd6b4 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:10 -0800 Subject: [PATCH 150/179] perf annotate-data: Add dso->data_types tree To aggregate accesses to the same data type, add 'data_types' tree in DSO to maintain data types and find it by name and size. It might have different data types that happen to have the same name, so it also compares the size of the type. Even if it doesn't 100% guarantee, it reduces the possibility of mis-handling of such conflicts. And I don't think it's common to have different types with the same name. Committer notes: Very few cases on the Linux kernel, but there are some different types with the same name, unsure if there is a debug mode in libbpf dedup that warns about such cases, but there are provisions in pahole for that, see: "emit: Notice type shadowing, i.e. multiple types with the same name (enum, struct, union, etc)" https://git.kernel.org/pub/scm/devel/pahole/pahole.git/commit/?id=4f332dbfd02072e4f410db7bdcda8d6e3422974b $ pahole --compile > vmlinux.h $ rm -f a ; make a cc a.c -o a $ grep __[0-9] vmlinux.h union irte__1 { struct map_info__1; struct map_info__1 { struct map_info__1 * next; /* 0 8 */ $ drivers/iommu/amd/amd_iommu_types.h 'union irte' include/linux/dmar.h 'struct irte' include/linux/device-mapper.h: union map_info { void *ptr; }; include/linux/mtd/map.h: struct map_info { const char *name; unsigned long size; resource_size_t phys; kernel/events/uprobes.c: struct map_info { struct map_info *next; struct mm_struct *mm; unsigned long vaddr; }; Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-5-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate-data.c | 95 +++++++++++++++++++++++++++++---- tools/perf/util/annotate-data.h | 9 ++++ tools/perf/util/dso.c | 4 ++ tools/perf/util/dso.h | 2 + 4 files changed, 100 insertions(+), 10 deletions(-) diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c index 9739ecc841ee..1f921971174d 100644 --- a/tools/perf/util/annotate-data.c +++ b/tools/perf/util/annotate-data.c @@ -18,6 +18,76 @@ #include "strbuf.h" #include "symbol.h" +/* + * Compare type name and size to maintain them in a tree. + * I'm not sure if DWARF would have information of a single type in many + * different places (compilation units). If not, it could compare the + * offset of the type entry in the .debug_info section. + */ +static int data_type_cmp(const void *_key, const struct rb_node *node) +{ + const struct annotated_data_type *key = _key; + struct annotated_data_type *type; + + type = rb_entry(node, struct annotated_data_type, node); + + if (key->type_size != type->type_size) + return key->type_size - type->type_size; + return strcmp(key->type_name, type->type_name); +} + +static bool data_type_less(struct rb_node *node_a, const struct rb_node *node_b) +{ + struct annotated_data_type *a, *b; + + a = rb_entry(node_a, struct annotated_data_type, node); + b = rb_entry(node_b, struct annotated_data_type, node); + + if (a->type_size != b->type_size) + return a->type_size < b->type_size; + return strcmp(a->type_name, b->type_name) < 0; +} + +static struct annotated_data_type *dso__findnew_data_type(struct dso *dso, + Dwarf_Die *type_die) +{ + struct annotated_data_type *result = NULL; + struct annotated_data_type key; + struct rb_node *node; + struct strbuf sb; + char *type_name; + Dwarf_Word size; + + strbuf_init(&sb, 32); + if (die_get_typename_from_type(type_die, &sb) < 0) + strbuf_add(&sb, "(unknown type)", 14); + type_name = strbuf_detach(&sb, NULL); + dwarf_aggregate_size(type_die, &size); + + /* Check existing nodes in dso->data_types tree */ + key.type_name = type_name; + key.type_size = size; + node = rb_find(&key, &dso->data_types, data_type_cmp); + if (node) { + result = rb_entry(node, struct annotated_data_type, node); + free(type_name); + return result; + } + + /* If not, add a new one */ + result = zalloc(sizeof(*result)); + if (result == NULL) { + free(type_name); + return NULL; + } + + result->type_name = type_name; + result->type_size = size; + + rb_add(&result->node, &dso->data_types, data_type_less); + return result; +} + static bool find_cu_die(struct debuginfo *di, u64 pc, Dwarf_Die *cu_die) { Dwarf_Off off, next_off; @@ -130,7 +200,6 @@ struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, struct dso *dso = map__dso(ms->map); struct debuginfo *di; Dwarf_Die type_die; - struct strbuf sb; u64 pc; di = debuginfo__new(dso->long_name); @@ -148,17 +217,23 @@ struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, if (find_data_type_die(di, pc, reg, offset, &type_die) < 0) goto out; - result = zalloc(sizeof(*result)); - if (result == NULL) - goto out; - - strbuf_init(&sb, 32); - if (die_get_typename_from_type(&type_die, &sb) < 0) - strbuf_add(&sb, "(unknown type)", 14); - - result->type_name = strbuf_detach(&sb, NULL); + result = dso__findnew_data_type(dso, &type_die); out: debuginfo__delete(di); return result; } + +void annotated_data_type__tree_delete(struct rb_root *root) +{ + struct annotated_data_type *pos; + + while (!RB_EMPTY_ROOT(root)) { + struct rb_node *node = rb_first(root); + + rb_erase(node, root); + pos = rb_entry(node, struct annotated_data_type, node); + free(pos->type_name); + free(pos); + } +} diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h index 633147f78ca5..ab9f187bd7f1 100644 --- a/tools/perf/util/annotate-data.h +++ b/tools/perf/util/annotate-data.h @@ -4,6 +4,7 @@ #include #include +#include #include struct map_symbol; @@ -16,6 +17,7 @@ struct map_symbol; * This represents a data type accessed by samples in the profile data. */ struct annotated_data_type { + struct rb_node node; char *type_name; int type_size; }; @@ -26,6 +28,9 @@ struct annotated_data_type { struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, int reg, int offset); +/* Release all data type information in the tree */ +void annotated_data_type__tree_delete(struct rb_root *root); + #else /* HAVE_DWARF_SUPPORT */ static inline struct annotated_data_type * @@ -35,6 +40,10 @@ find_data_type(struct map_symbol *ms __maybe_unused, u64 ip __maybe_unused, return NULL; } +static inline void annotated_data_type__tree_delete(struct rb_root *root __maybe_unused) +{ +} + #endif /* HAVE_DWARF_SUPPORT */ #endif /* _PERF_ANNOTATE_DATA_H */ diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 1f629b6fb7cf..22fd5fa806ed 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -31,6 +31,7 @@ #include "debug.h" #include "string2.h" #include "vdso.h" +#include "annotate-data.h" static const char * const debuglink_paths[] = { "%.0s%s", @@ -1327,6 +1328,7 @@ struct dso *dso__new_id(const char *name, struct dso_id *id) dso->data.cache = RB_ROOT; dso->inlined_nodes = RB_ROOT_CACHED; dso->srclines = RB_ROOT_CACHED; + dso->data_types = RB_ROOT; dso->data.fd = -1; dso->data.status = DSO_DATA_STATUS_UNKNOWN; dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND; @@ -1370,6 +1372,8 @@ void dso__delete(struct dso *dso) symbols__delete(&dso->symbols); dso->symbol_names_len = 0; zfree(&dso->symbol_names); + annotated_data_type__tree_delete(&dso->data_types); + if (dso->short_name_allocated) { zfree((char **)&dso->short_name); dso->short_name_allocated = false; diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index 3759de8c2267..ce9f3849a773 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -154,6 +154,8 @@ struct dso { size_t symbol_names_len; struct rb_root_cached inlined_nodes; struct rb_root_cached srclines; + struct rb_root data_types; + struct { u64 addr; struct symbol *symbol; From 0669729eb0afb0cf55fe1b97d7a8b1315354910f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:11 -0800 Subject: [PATCH 151/179] perf annotate: Factor out evsel__get_arch() The evsel__get_arch() is to get architecture info from the environment. It'll be used by other places later so let's factor it out. Also add arch__is() to check the arch info by name. Committer notes: "get" is usually associated with refcounting, so we better rename this at some point to a better name. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-6-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 56 +++++++++++++++++++++++++------------- tools/perf/util/annotate.h | 2 ++ 2 files changed, 39 insertions(+), 19 deletions(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index c81fa0791918..27b2a9961cd5 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -843,6 +843,11 @@ static struct arch *arch__find(const char *name) return bsearch(name, architectures, nmemb, sizeof(struct arch), arch__key_cmp); } +bool arch__is(struct arch *arch, const char *name) +{ + return !strcmp(arch->name, name); +} + static struct annotated_source *annotated_source__new(void) { struct annotated_source *src = zalloc(sizeof(*src)); @@ -2378,6 +2383,33 @@ void symbol__calc_percent(struct symbol *sym, struct evsel *evsel) annotation__calc_percent(notes, evsel, symbol__size(sym)); } +static int evsel__get_arch(struct evsel *evsel, struct arch **parch) +{ + struct perf_env *env = evsel__env(evsel); + const char *arch_name = perf_env__arch(env); + struct arch *arch; + int err; + + if (!arch_name) + return errno; + + *parch = arch = arch__find(arch_name); + if (arch == NULL) { + pr_err("%s: unsupported arch %s\n", __func__, arch_name); + return ENOTSUP; + } + + if (arch->init) { + err = arch->init(arch, env ? env->cpuid : NULL); + if (err) { + pr_err("%s: failed to initialize %s arch priv area\n", + __func__, arch->name); + return err; + } + } + return 0; +} + int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, struct arch **parch) { @@ -2387,31 +2419,17 @@ int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, .evsel = evsel, .options = &annotate_opts, }; - struct perf_env *env = evsel__env(evsel); - const char *arch_name = perf_env__arch(env); - struct arch *arch; + struct arch *arch = NULL; int err; - if (!arch_name) - return errno; - - args.arch = arch = arch__find(arch_name); - if (arch == NULL) { - pr_err("%s: unsupported arch %s\n", __func__, arch_name); - return ENOTSUP; - } + err = evsel__get_arch(evsel, &arch); + if (err < 0) + return err; if (parch) *parch = arch; - if (arch->init) { - err = arch->init(arch, env ? env->cpuid : NULL); - if (err) { - pr_err("%s: failed to initialize %s arch priv area\n", __func__, arch->name); - return err; - } - } - + args.arch = arch; args.ms = *ms; if (annotate_opts.full_addr) notes->start = map__objdump_2mem(ms->map, ms->sym->start); diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 589f8aaf0236..2ef7e7dda7bd 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -61,6 +61,8 @@ struct ins_operands { struct arch; +bool arch__is(struct arch *arch, const char *name); + struct ins_ops { void (*free)(struct ins_operands *ops); int (*parse)(struct arch *arch, struct ins_operands *ops, struct map_symbol *ms); From 3a0c26edc3d2f39da3a91eb6eae404253e7ccbaa Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:12 -0800 Subject: [PATCH 152/179] perf annotate: Add annotate_get_insn_location() The annotate_get_insn_location() is to get the detailed information of instruction locations like registers and offset. It has source and target operands locations in an array. Each operand can have a register and an offset. The offset is meaningful when mem_ref flag is set. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-7-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 107 +++++++++++++++++++++++++++++++++++++ tools/perf/util/annotate.h | 36 +++++++++++++ 2 files changed, 143 insertions(+) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 27b2a9961cd5..7c597440dc2e 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -31,6 +31,7 @@ #include "bpf-utils.h" #include "block-range.h" #include "string2.h" +#include "dwarf-regs.h" #include "util/event.h" #include "util/sharded_mutex.h" #include "arch/common.h" @@ -3518,3 +3519,109 @@ int annotate_check_args(void) } return 0; } + +/* + * Get register number and access offset from the given instruction. + * It assumes AT&T x86 asm format like OFFSET(REG). Maybe it needs + * to revisit the format when it handles different architecture. + * Fills @reg and @offset when return 0. + */ +static int extract_reg_offset(struct arch *arch, const char *str, + struct annotated_op_loc *op_loc) +{ + char *p; + char *regname; + + if (arch->objdump.register_char == 0) + return -1; + + /* + * It should start from offset, but it's possible to skip 0 + * in the asm. So 0(%rax) should be same as (%rax). + * + * However, it also start with a segment select register like + * %gs:0x18(%rbx). In that case it should skip the part. + */ + if (*str == arch->objdump.register_char) { + while (*str && !isdigit(*str) && + *str != arch->objdump.memory_ref_char) + str++; + } + + op_loc->offset = strtol(str, &p, 0); + + p = strchr(p, arch->objdump.register_char); + if (p == NULL) + return -1; + + regname = strdup(p); + if (regname == NULL) + return -1; + + op_loc->reg = get_dwarf_regnum(regname, 0); + free(regname); + return 0; +} + +/** + * annotate_get_insn_location - Get location of instruction + * @arch: the architecture info + * @dl: the target instruction + * @loc: a buffer to save the data + * + * Get detailed location info (register and offset) in the instruction. + * It needs both source and target operand and whether it accesses a + * memory location. The offset field is meaningful only when the + * corresponding mem flag is set. + * + * Some examples on x86: + * + * mov (%rax), %rcx # src_reg = rax, src_mem = 1, src_offset = 0 + * # dst_reg = rcx, dst_mem = 0 + * + * mov 0x18, %r8 # src_reg = -1, dst_reg = r8 + */ +int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, + struct annotated_insn_loc *loc) +{ + struct ins_operands *ops; + struct annotated_op_loc *op_loc; + int i; + + if (!strcmp(dl->ins.name, "lock")) + ops = dl->ops.locked.ops; + else + ops = &dl->ops; + + if (ops == NULL) + return -1; + + memset(loc, 0, sizeof(*loc)); + + for_each_insn_op_loc(loc, i, op_loc) { + const char *insn_str = ops->source.raw; + + if (i == INSN_OP_TARGET) + insn_str = ops->target.raw; + + /* Invalidate the register by default */ + op_loc->reg = -1; + + if (insn_str == NULL) + continue; + + if (strchr(insn_str, arch->objdump.memory_ref_char)) { + op_loc->mem_ref = true; + extract_reg_offset(arch, insn_str, op_loc); + } else { + char *s = strdup(insn_str); + + if (s) { + op_loc->reg = get_dwarf_regnum(s, 0); + free(s); + } + } + } + + return 0; +} diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 2ef7e7dda7bd..25ae8893d4f9 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -439,4 +439,40 @@ int annotate_parse_percent_type(const struct option *opt, const char *_str, int annotate_check_args(void); +/** + * struct annotated_op_loc - Location info of instruction operand + * @reg: Register in the operand + * @offset: Memory access offset in the operand + * @mem_ref: Whether the operand accesses memory + */ +struct annotated_op_loc { + int reg; + int offset; + bool mem_ref; +}; + +enum annotated_insn_ops { + INSN_OP_SOURCE = 0, + INSN_OP_TARGET = 1, + + INSN_OP_MAX, +}; + +/** + * struct annotated_insn_loc - Location info of instruction + * @ops: Array of location info for source and target operands + */ +struct annotated_insn_loc { + struct annotated_op_loc ops[INSN_OP_MAX]; +}; + +#define for_each_insn_op_loc(insn_loc, i, op_loc) \ + for (i = INSN_OP_SOURCE, op_loc = &(insn_loc)->ops[i]; \ + i < INSN_OP_MAX; \ + i++, op_loc++) + +/* Get detailed location info in the instruction */ +int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, + struct annotated_insn_loc *loc); + #endif /* __PERF_ANNOTATE_H */ From 67bc54bbc5a25c0488cc488558a11c14c10f5f14 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:13 -0800 Subject: [PATCH 153/179] perf annotate: Implement hist_entry__get_data_type() It's the function to find out the type info from the given sample data and will be called from the hist_entry sort logic when 'type' sort key is used. It first calls objdump to disassemble the instructions and figure out information about memory access at the location. Maybe we can do it better by analyzing the instruction directly, but I'll leave it for later work. The memory access is determined by checking instruction operands to have "(" and then extract register name and offset. It'll return NULL if no data type is found. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-8-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 88 ++++++++++++++++++++++++++++++++++++++ tools/perf/util/annotate.h | 4 ++ 2 files changed, 92 insertions(+) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 7c597440dc2e..8673eac4b9df 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -25,6 +25,7 @@ #include "units.h" #include "debug.h" #include "annotate.h" +#include "annotate-data.h" #include "evsel.h" #include "evlist.h" #include "bpf-event.h" @@ -3625,3 +3626,90 @@ int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, return 0; } + +static void symbol__ensure_annotate(struct map_symbol *ms, struct evsel *evsel) +{ + struct disasm_line *dl, *tmp_dl; + struct annotation *notes; + + notes = symbol__annotation(ms->sym); + if (!list_empty(¬es->src->source)) + return; + + if (symbol__annotate(ms, evsel, NULL) < 0) + return; + + /* remove non-insn disasm lines for simplicity */ + list_for_each_entry_safe(dl, tmp_dl, ¬es->src->source, al.node) { + if (dl->al.offset == -1) { + list_del(&dl->al.node); + free(dl); + } + } +} + +static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip) +{ + struct disasm_line *dl; + struct annotation *notes; + + notes = symbol__annotation(sym); + + list_for_each_entry(dl, ¬es->src->source, al.node) { + if (sym->start + dl->al.offset == ip) + return dl; + } + return NULL; +} + +/** + * hist_entry__get_data_type - find data type for given hist entry + * @he: hist entry + * + * This function first annotates the instruction at @he->ip and extracts + * register and offset info from it. Then it searches the DWARF debug + * info to get a variable and type information using the address, register, + * and offset. + */ +struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) +{ + struct map_symbol *ms = &he->ms; + struct evsel *evsel = hists_to_evsel(he->hists); + struct arch *arch; + struct disasm_line *dl; + struct annotated_insn_loc loc; + struct annotated_op_loc *op_loc; + u64 ip = he->ip; + int i; + + if (ms->map == NULL || ms->sym == NULL) + return NULL; + + if (!symbol_conf.init_annotation) + return NULL; + + if (evsel__get_arch(evsel, &arch) < 0) + return NULL; + + /* Make sure it runs objdump to get disasm of the function */ + symbol__ensure_annotate(ms, evsel); + + /* + * Get a disasm to extract the location from the insn. + * This is too slow... + */ + dl = find_disasm_line(ms->sym, ip); + if (dl == NULL) + return NULL; + + if (annotate_get_insn_location(arch, dl, &loc) < 0) + return NULL; + + for_each_insn_op_loc(&loc, i, op_loc) { + if (!op_loc->mem_ref) + continue; + + return find_data_type(ms, ip, op_loc->reg, op_loc->offset); + } + return NULL; +} diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 25ae8893d4f9..6c75b2832286 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -23,6 +23,7 @@ struct option; struct perf_sample; struct evsel; struct symbol; +struct annotated_data_type; struct ins { const char *name; @@ -475,4 +476,7 @@ struct annotated_insn_loc { int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, struct annotated_insn_loc *loc); +/* Returns a data type from the sample instruction (if any) */ +struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he); + #endif /* __PERF_ANNOTATE_H */ From 2f2c41bdd87f450a6a71c5d090d42c248ca4bf1e Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:14 -0800 Subject: [PATCH 154/179] perf report: Add 'type' sort key The 'type' sort key is to aggregate hist entries by data type they access. Add mem_type field to hist_entry struct to save the type. If hist_entry__get_data_type() returns NULL, it'd use the 'unknown_type' instance. Committer testing: Before: # perf mem record sleep 2s [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.037 MB perf.data (4 samples) ] root@number:/home/acme/Downloads# perf report --stdio -s type Error: Unknown --sort key: `type' Usage: perf report [] -s, --sort sort by key(s): overhead overhead_sys overhead_us overhead_guest_sys overhead_guest_us overhead_children sample period pid comm dso symbol parent cpu socket srcline srcfile local_weight weight transaction trace symbol_size dso_size cgroup cgroup_id ipc_null time code_page_size local_ins_lat ins_lat local_p_stage_cyc p_stage_cyc addr local_retire_lat retire_lat simd dso_from dso_to symbol_from symbol_to mispredict abort in_tx cycles srcline_from srcline_to ipc_lbr addr_from addr_to symbol_daddr dso_daddr locked tlb mem snoop dcacheline symbol_iaddr phys_daddr data_page_size blocked # After: # perf report --stdio -s type # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 4 of event 'cpu_atom/mem-loads,ldlat=30/P' # Event count (approx.): 7 # # Overhead Data Type # ........ ......... # 100.00% (unknown) # # (Tip: Print event counts in CSV format with: perf stat -x,) # # rpm -q kernel-debuginfo kernel-debuginfo-6.6.4-200.fc39.x86_64 # uname -r 6.6.4-200.fc39.x86_64 # Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org> Cc: linux-trace-devel@vger.kernel.org> Link: https://lore.kernel.org/r/20231213001323.718046-9-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-report.txt | 1 + tools/perf/util/annotate-data.h | 2 + tools/perf/util/hist.h | 1 + tools/perf/util/sort.c | 69 +++++++++++++++++++++++- tools/perf/util/sort.h | 4 ++ 5 files changed, 75 insertions(+), 2 deletions(-) diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index af068b4f1e5a..aec34417090b 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -118,6 +118,7 @@ OPTIONS - retire_lat: On X86, this reports pipeline stall of this instruction compared to the previous instruction in cycles. And currently supported only on X86 - simd: Flags describing a SIMD operation. "e" for empty Arm SVE predicate. "p" for partial Arm SVE predicate + - type: Data type of sample memory access. By default, comm, dso and symbol keys are used. (i.e. --sort comm,dso,symbol) diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h index ab9f187bd7f1..6efdd7e21b28 100644 --- a/tools/perf/util/annotate-data.h +++ b/tools/perf/util/annotate-data.h @@ -22,6 +22,8 @@ struct annotated_data_type { int type_size; }; +extern struct annotated_data_type unknown_type; + #ifdef HAVE_DWARF_SUPPORT /* Returns data type at the location (ip, reg, offset) */ diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 5d0db96609df..7ebbf427b1ea 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -82,6 +82,7 @@ enum hist_column { HISTC_ADDR_TO, HISTC_ADDR, HISTC_SIMD, + HISTC_TYPE, HISTC_NR_COLS, /* Last entry */ }; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 27b123ccd2d1..e647f0117bb5 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -24,6 +24,7 @@ #include "strbuf.h" #include "mem-events.h" #include "annotate.h" +#include "annotate-data.h" #include "event.h" #include "time-utils.h" #include "cgroup.h" @@ -2094,7 +2095,7 @@ struct sort_entry sort_dso_size = { .se_width_idx = HISTC_DSO_SIZE, }; -/* --sort dso_size */ +/* --sort addr */ static int64_t sort__addr_cmp(struct hist_entry *left, struct hist_entry *right) @@ -2131,6 +2132,69 @@ struct sort_entry sort_addr = { .se_width_idx = HISTC_ADDR, }; +/* --sort type */ + +struct annotated_data_type unknown_type = { + .type_name = (char *)"(unknown)", +}; + +static int64_t +sort__type_cmp(struct hist_entry *left, struct hist_entry *right) +{ + return sort__addr_cmp(left, right); +} + +static void sort__type_init(struct hist_entry *he) +{ + if (he->mem_type) + return; + + he->mem_type = hist_entry__get_data_type(he); + if (he->mem_type == NULL) + he->mem_type = &unknown_type; +} + +static int64_t +sort__type_collapse(struct hist_entry *left, struct hist_entry *right) +{ + struct annotated_data_type *left_type = left->mem_type; + struct annotated_data_type *right_type = right->mem_type; + + if (!left_type) { + sort__type_init(left); + left_type = left->mem_type; + } + + if (!right_type) { + sort__type_init(right); + right_type = right->mem_type; + } + + return strcmp(left_type->type_name, right_type->type_name); +} + +static int64_t +sort__type_sort(struct hist_entry *left, struct hist_entry *right) +{ + return sort__type_collapse(left, right); +} + +static int hist_entry__type_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->type_name); +} + +struct sort_entry sort_type = { + .se_header = "Data Type", + .se_cmp = sort__type_cmp, + .se_collapse = sort__type_collapse, + .se_sort = sort__type_sort, + .se_init = sort__type_init, + .se_snprintf = hist_entry__type_snprintf, + .se_width_idx = HISTC_TYPE, +}; + struct sort_dimension { const char *name; @@ -2185,7 +2249,8 @@ static struct sort_dimension common_sort_dimensions[] = { DIM(SORT_ADDR, "addr", sort_addr), DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc), DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc), - DIM(SORT_SIMD, "simd", sort_simd) + DIM(SORT_SIMD, "simd", sort_simd), + DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type), }; #undef DIM diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index ecfb7f1359d5..aabf0b8331a3 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -15,6 +15,7 @@ struct option; struct thread; +struct annotated_data_type; extern regex_t parent_regex; extern const char *sort_order; @@ -34,6 +35,7 @@ extern struct sort_entry sort_dso_to; extern struct sort_entry sort_sym_from; extern struct sort_entry sort_sym_to; extern struct sort_entry sort_srcline; +extern struct sort_entry sort_type; extern const char default_mem_sort_order[]; extern bool chk_double_cl; @@ -154,6 +156,7 @@ struct hist_entry { struct perf_hpp_list *hpp_list; struct hist_entry *parent_he; struct hist_entry_ops *ops; + struct annotated_data_type *mem_type; union { /* this is for hierarchical entry structure */ struct { @@ -243,6 +246,7 @@ enum sort_type { SORT_LOCAL_RETIRE_LAT, SORT_GLOBAL_RETIRE_LAT, SORT_SIMD, + SORT_ANNOTATE_DATA_TYPE, /* branch stack specific sort keys */ __SORT_BRANCH_STACK, From 81e57deec32594b124d777b1d3ca8a1415410230 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:15 -0800 Subject: [PATCH 155/179] perf report: Support data type profiling Enable type annotation when the 'type' sort key is used. It shows type of variables the samples access at the moment. Users can see which types are accessed frequently. $ perf report -s dso,type --stdio ... # Overhead Shared Object Data Type # ........ ................. ......... # 35.47% [kernel.kallsyms] (unknown) 1.62% [kernel.kallsyms] struct sched_entry 1.23% [kernel.kallsyms] struct cfs_rq 0.83% [kernel.kallsyms] struct task_struct 0.34% [kernel.kallsyms] struct list_head 0.30% [kernel.kallsyms] struct mem_cgroup ... Committer testing: With the perf.data file collected in the previous cset: # perf report --stdio -s type # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 4 of event 'cpu_atom/mem-loads,ldlat=30/P' # Event count (approx.): 7 # # Overhead Data Type # ........ ......... # 42.86% struct list_head 42.86% (unknown) 14.29% char # # (Tip: To record callchains for each sample: perf record -g) # # perf report --stdio -s dso,type # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 4 of event 'cpu_atom/mem-loads,ldlat=30/P' # Event count (approx.): 7 # # Overhead Shared Object Data Type # ........ .................... ......... # 42.86% [kernel.kallsyms] struct list_head 28.57% libc.so.6 (unknown) 14.29% [kernel.kallsyms] char 14.29% ld-linux-x86-64.so.2 (unknown) # # (Tip: Save output of perf stat using: perf stat record ) # # Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-10-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-report.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 178fb602bc98..f2ed2b7e80a3 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -96,6 +96,7 @@ struct report { bool stitch_lbr; bool disable_order; bool skip_empty; + bool data_type; int max_stack; struct perf_read_values show_threads_values; const char *pretty_printing_style; @@ -170,7 +171,7 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter, struct mem_info *mi; struct branch_info *bi; - if (!ui__has_annotation() && !rep->symbol_ipc) + if (!ui__has_annotation() && !rep->symbol_ipc && !rep->data_type) return 0; if (sort__mode == SORT_MODE__BRANCH) { @@ -1639,6 +1640,16 @@ int cmd_report(int argc, const char **argv) sort_order = NULL; } + if (sort_order && strstr(sort_order, "type")) { + report.data_type = true; + annotate_opts.annotate_src = false; + +#ifndef HAVE_DWARF_GETLOCATIONS_SUPPORT + pr_err("Error: Data type profiling is disabled due to missing DWARF support\n"); + goto error; +#endif + } + if (strcmp(input_name, "-") != 0) setup_browser(true); else @@ -1697,7 +1708,7 @@ int cmd_report(int argc, const char **argv) * so don't allocate extra space that won't be used in the stdio * implementation. */ - if (ui__has_annotation() || report.symbol_ipc || + if (ui__has_annotation() || report.symbol_ipc || report.data_type || report.total_cycles_mode) { ret = symbol__annotation_init(); if (ret < 0) From 4a111cadac85362ed9476737d7a36e8dd3a8e476 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:16 -0800 Subject: [PATCH 156/179] perf annotate-data: Add member field in the data type Add child member field if the current type is a composite type like a struct or union. The member fields are linked in the children list and do the same recursively if the child itself is a composite type. Add 'self' member to the annotated_data_type to handle the members in the same way. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-11-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate-data.c | 101 ++++++++++++++++++++++++++++---- tools/perf/util/annotate-data.h | 27 +++++++-- tools/perf/util/sort.c | 9 ++- 3 files changed, 119 insertions(+), 18 deletions(-) diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c index 1f921971174d..5269c6630e04 100644 --- a/tools/perf/util/annotate-data.c +++ b/tools/perf/util/annotate-data.c @@ -31,9 +31,9 @@ static int data_type_cmp(const void *_key, const struct rb_node *node) type = rb_entry(node, struct annotated_data_type, node); - if (key->type_size != type->type_size) - return key->type_size - type->type_size; - return strcmp(key->type_name, type->type_name); + if (key->self.size != type->self.size) + return key->self.size - type->self.size; + return strcmp(key->self.type_name, type->self.type_name); } static bool data_type_less(struct rb_node *node_a, const struct rb_node *node_b) @@ -43,9 +43,80 @@ static bool data_type_less(struct rb_node *node_a, const struct rb_node *node_b) a = rb_entry(node_a, struct annotated_data_type, node); b = rb_entry(node_b, struct annotated_data_type, node); - if (a->type_size != b->type_size) - return a->type_size < b->type_size; - return strcmp(a->type_name, b->type_name) < 0; + if (a->self.size != b->self.size) + return a->self.size < b->self.size; + return strcmp(a->self.type_name, b->self.type_name) < 0; +} + +/* Recursively add new members for struct/union */ +static int __add_member_cb(Dwarf_Die *die, void *arg) +{ + struct annotated_member *parent = arg; + struct annotated_member *member; + Dwarf_Die member_type, die_mem; + Dwarf_Word size, loc; + Dwarf_Attribute attr; + struct strbuf sb; + int tag; + + if (dwarf_tag(die) != DW_TAG_member) + return DIE_FIND_CB_SIBLING; + + member = zalloc(sizeof(*member)); + if (member == NULL) + return DIE_FIND_CB_END; + + strbuf_init(&sb, 32); + die_get_typename(die, &sb); + + die_get_real_type(die, &member_type); + if (dwarf_aggregate_size(&member_type, &size) < 0) + size = 0; + + if (!dwarf_attr_integrate(die, DW_AT_data_member_location, &attr)) + loc = 0; + else + dwarf_formudata(&attr, &loc); + + member->type_name = strbuf_detach(&sb, NULL); + /* member->var_name can be NULL */ + if (dwarf_diename(die)) + member->var_name = strdup(dwarf_diename(die)); + member->size = size; + member->offset = loc + parent->offset; + INIT_LIST_HEAD(&member->children); + list_add_tail(&member->node, &parent->children); + + tag = dwarf_tag(&member_type); + switch (tag) { + case DW_TAG_structure_type: + case DW_TAG_union_type: + die_find_child(&member_type, __add_member_cb, member, &die_mem); + break; + default: + break; + } + return DIE_FIND_CB_SIBLING; +} + +static void add_member_types(struct annotated_data_type *parent, Dwarf_Die *type) +{ + Dwarf_Die die_mem; + + die_find_child(type, __add_member_cb, &parent->self, &die_mem); +} + +static void delete_members(struct annotated_member *member) +{ + struct annotated_member *child, *tmp; + + list_for_each_entry_safe(child, tmp, &member->children, node) { + list_del(&child->node); + delete_members(child); + free(child->type_name); + free(child->var_name); + free(child); + } } static struct annotated_data_type *dso__findnew_data_type(struct dso *dso, @@ -65,8 +136,8 @@ static struct annotated_data_type *dso__findnew_data_type(struct dso *dso, dwarf_aggregate_size(type_die, &size); /* Check existing nodes in dso->data_types tree */ - key.type_name = type_name; - key.type_size = size; + key.self.type_name = type_name; + key.self.size = size; node = rb_find(&key, &dso->data_types, data_type_cmp); if (node) { result = rb_entry(node, struct annotated_data_type, node); @@ -81,8 +152,15 @@ static struct annotated_data_type *dso__findnew_data_type(struct dso *dso, return NULL; } - result->type_name = type_name; - result->type_size = size; + result->self.type_name = type_name; + result->self.size = size; + INIT_LIST_HEAD(&result->self.children); + + /* + * Fill member info unconditionally for now, + * later perf annotate would need it. + */ + add_member_types(result, type_die); rb_add(&result->node, &dso->data_types, data_type_less); return result; @@ -233,7 +311,8 @@ void annotated_data_type__tree_delete(struct rb_root *root) rb_erase(node, root); pos = rb_entry(node, struct annotated_data_type, node); - free(pos->type_name); + delete_members(&pos->self); + free(pos->self.type_name); free(pos); } } diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h index 6efdd7e21b28..33748222e6aa 100644 --- a/tools/perf/util/annotate-data.h +++ b/tools/perf/util/annotate-data.h @@ -9,17 +9,36 @@ struct map_symbol; +/** + * struct annotated_member - Type of member field + * @node: List entry in the parent list + * @children: List head for child nodes + * @type_name: Name of the member type + * @var_name: Name of the member variable + * @offset: Offset from the outer data type + * @size: Size of the member field + * + * This represents a member type in a data type. + */ +struct annotated_member { + struct list_head node; + struct list_head children; + char *type_name; + char *var_name; + int offset; + int size; +}; + /** * struct annotated_data_type - Data type to profile - * @type_name: Name of the data type - * @type_size: Size of the data type + * @node: RB-tree node for dso->type_tree + * @self: Actual type information * * This represents a data type accessed by samples in the profile data. */ struct annotated_data_type { struct rb_node node; - char *type_name; - int type_size; + struct annotated_member self; }; extern struct annotated_data_type unknown_type; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index e647f0117bb5..a41209e242ae 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -2135,7 +2135,10 @@ struct sort_entry sort_addr = { /* --sort type */ struct annotated_data_type unknown_type = { - .type_name = (char *)"(unknown)", + .self = { + .type_name = (char *)"(unknown)", + .children = LIST_HEAD_INIT(unknown_type.self.children), + }, }; static int64_t @@ -2170,7 +2173,7 @@ sort__type_collapse(struct hist_entry *left, struct hist_entry *right) right_type = right->mem_type; } - return strcmp(left_type->type_name, right_type->type_name); + return strcmp(left_type->self.type_name, right_type->self.type_name); } static int64_t @@ -2182,7 +2185,7 @@ sort__type_sort(struct hist_entry *left, struct hist_entry *right) static int hist_entry__type_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) { - return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->type_name); + return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name); } struct sort_entry sort_type = { From 9bd7ddd1576164fbb8d369c6a7b018af9544e202 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:17 -0800 Subject: [PATCH 157/179] perf annotate-data: Update sample histogram for type The annotated_data_type__update_samples() to get histogram for data type access. It'll be called by perf annotate to show which fields in the data type are accessed frequently. Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-12-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate-data.c | 81 +++++++++++++++++++++++++++++++++ tools/perf/util/annotate-data.h | 42 +++++++++++++++++ tools/perf/util/annotate.c | 9 +++- 3 files changed, 131 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c index 5269c6630e04..868f52ccdb74 100644 --- a/tools/perf/util/annotate-data.c +++ b/tools/perf/util/annotate-data.c @@ -13,6 +13,8 @@ #include "debuginfo.h" #include "debug.h" #include "dso.h" +#include "evsel.h" +#include "evlist.h" #include "map.h" #include "map_symbol.h" #include "strbuf.h" @@ -302,6 +304,44 @@ struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, return result; } +static int alloc_data_type_histograms(struct annotated_data_type *adt, int nr_entries) +{ + int i; + size_t sz = sizeof(struct type_hist); + + sz += sizeof(struct type_hist_entry) * adt->self.size; + + /* Allocate a table of pointers for each event */ + adt->nr_histograms = nr_entries; + adt->histograms = calloc(nr_entries, sizeof(*adt->histograms)); + if (adt->histograms == NULL) + return -ENOMEM; + + /* + * Each histogram is allocated for the whole size of the type. + * TODO: Probably we can move the histogram to members. + */ + for (i = 0; i < nr_entries; i++) { + adt->histograms[i] = zalloc(sz); + if (adt->histograms[i] == NULL) + goto err; + } + return 0; + +err: + while (--i >= 0) + free(adt->histograms[i]); + free(adt->histograms); + return -ENOMEM; +} + +static void delete_data_type_histograms(struct annotated_data_type *adt) +{ + for (int i = 0; i < adt->nr_histograms; i++) + free(adt->histograms[i]); + free(adt->histograms); +} + void annotated_data_type__tree_delete(struct rb_root *root) { struct annotated_data_type *pos; @@ -312,7 +352,48 @@ void annotated_data_type__tree_delete(struct rb_root *root) rb_erase(node, root); pos = rb_entry(node, struct annotated_data_type, node); delete_members(&pos->self); + delete_data_type_histograms(pos); free(pos->self.type_name); free(pos); } } + +/** + * annotated_data_type__update_samples - Update histogram + * @adt: Data type to update + * @evsel: Event to update + * @offset: Offset in the type + * @nr_samples: Number of samples at this offset + * @period: Event count at this offset + * + * This function updates type histogram at @ofs for @evsel. Samples are + * aggregated before calling this function so it can be called with more + * than one samples at a certain offset. + */ +int annotated_data_type__update_samples(struct annotated_data_type *adt, + struct evsel *evsel, int offset, + int nr_samples, u64 period) +{ + struct type_hist *h; + + if (adt == NULL) + return 0; + + if (adt->histograms == NULL) { + int nr = evsel->evlist->core.nr_entries; + + if (alloc_data_type_histograms(adt, nr) < 0) + return -1; + } + + if (offset < 0 || offset >= adt->self.size) + return -1; + + h = adt->histograms[evsel->core.idx]; + + h->nr_samples += nr_samples; + h->addr[offset].nr_samples += nr_samples; + h->period += period; + h->addr[offset].period += period; + return 0; +} diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h index 33748222e6aa..d2dc025b1934 100644 --- a/tools/perf/util/annotate-data.h +++ b/tools/perf/util/annotate-data.h @@ -7,6 +7,7 @@ #include #include +struct evsel; struct map_symbol; /** @@ -29,16 +30,42 @@ struct annotated_member { int size; }; +/** + * struct type_hist_entry - Histogram entry per offset + * @nr_samples: Number of samples + * @period: Count of event + */ +struct type_hist_entry { + int nr_samples; + u64 period; +}; + +/** + * struct type_hist - Type histogram for each event + * @nr_samples: Total number of samples in this data type + * @period: Total count of the event in this data type + * @offset: Array of histogram entry + */ +struct type_hist { + u64 nr_samples; + u64 period; + struct type_hist_entry addr[]; +}; + /** * struct annotated_data_type - Data type to profile * @node: RB-tree node for dso->type_tree * @self: Actual type information + * @nr_histogram: Number of histogram entries + * @histograms: An array of pointers to histograms * * This represents a data type accessed by samples in the profile data. */ struct annotated_data_type { struct rb_node node; struct annotated_member self; + int nr_histograms; + struct type_hist **histograms; }; extern struct annotated_data_type unknown_type; @@ -49,6 +76,11 @@ extern struct annotated_data_type unknown_type; struct annotated_data_type *find_data_type(struct map_symbol *ms, u64 ip, int reg, int offset); +/* Update type access histogram at the given offset */ +int annotated_data_type__update_samples(struct annotated_data_type *adt, + struct evsel *evsel, int offset, + int nr_samples, u64 period); + /* Release all data type information in the tree */ void annotated_data_type__tree_delete(struct rb_root *root); @@ -61,6 +93,16 @@ find_data_type(struct map_symbol *ms __maybe_unused, u64 ip __maybe_unused, return NULL; } +static inline int +annotated_data_type__update_samples(struct annotated_data_type *adt __maybe_unused, + struct evsel *evsel __maybe_unused, + int offset __maybe_unused, + int nr_samples __maybe_unused, + u64 period __maybe_unused) +{ + return -1; +} + static inline void annotated_data_type__tree_delete(struct rb_root *root __maybe_unused) { } diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 8673eac4b9df..6747779ecef8 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -3679,6 +3679,7 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) struct disasm_line *dl; struct annotated_insn_loc loc; struct annotated_op_loc *op_loc; + struct annotated_data_type *mem_type; u64 ip = he->ip; int i; @@ -3709,7 +3710,13 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) if (!op_loc->mem_ref) continue; - return find_data_type(ms, ip, op_loc->reg, op_loc->offset); + mem_type = find_data_type(ms, ip, op_loc->reg, op_loc->offset); + + annotated_data_type__update_samples(mem_type, evsel, + op_loc->offset, + he->stat.nr_events, + he->stat.period); + return mem_type; } return NULL; } From 871304a79f755b2ab594bbd21857ecb4c4aa57c9 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:18 -0800 Subject: [PATCH 158/179] perf report: Add 'typeoff' sort key The typeoff sort key shows the data type name, offset and the name of the field. This is useful to see which field in the struct is accessed most frequently. $ perf report -s type,typeoff --hierarchy --stdio ... # Overhead Data Type / Data Type Offset # ............ ............................ # ... 1.23% struct cfs_rq 0.19% struct cfs_rq +404 (throttle_count) 0.19% struct cfs_rq +0 (load.weight) 0.19% struct cfs_rq +336 (leaf_cfs_rq_list.next) 0.09% struct cfs_rq +272 (propagate) 0.09% struct cfs_rq +196 (removed.nr) 0.09% struct cfs_rq +80 (curr) 0.09% struct cfs_rq +544 (lt_b_children_throttled) 0.06% struct cfs_rq +320 (rq) Committer testing: Again with the perf.data from the previous csets: # perf report --stdio -s type,typeoff # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 4 of event 'cpu_atom/mem-loads,ldlat=30/P' # Event count (approx.): 7 # # Overhead Data Type Data Type Offset # ........ ......... ................ # 42.86% struct list_head struct list_head +8 (prev) 42.86% (unknown) (unknown) +0 (no field) 14.29% char char +0 (no field) # # (Tip: To see callchains in a more compact form: perf report -g folded) # # perf report --stdio -s dso,type,typeoff # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 4 of event 'cpu_atom/mem-loads,ldlat=30/P' # Event count (approx.): 7 # # Overhead Shared Object Data Type Data Type Offset # ........ .................... ......... ................ # 42.86% [kernel.kallsyms] struct list_head struct list_head +8 (prev) 28.57% libc.so.6 (unknown) (unknown) +0 (no field) 14.29% [kernel.kallsyms] char char +0 (no field) 14.29% ld-linux-x86-64.so.2 (unknown) (unknown) +0 (no field) # # (Tip: If you have debuginfo enabled, try: perf report -s sym,srcline) # # Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-13-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-report.txt | 1 + tools/perf/util/annotate.c | 1 + tools/perf/util/hist.h | 1 + tools/perf/util/sort.c | 83 +++++++++++++++++++++++- tools/perf/util/sort.h | 2 + 5 files changed, 87 insertions(+), 1 deletion(-) diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index aec34417090b..b57eb51b47aa 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -119,6 +119,7 @@ OPTIONS to the previous instruction in cycles. And currently supported only on X86 - simd: Flags describing a SIMD operation. "e" for empty Arm SVE predicate. "p" for partial Arm SVE predicate - type: Data type of sample memory access. + - typeoff: Offset in the data type of sample memory access. By default, comm, dso and symbol keys are used. (i.e. --sort comm,dso,symbol) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 6747779ecef8..f966e8f83c5e 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -3716,6 +3716,7 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) op_loc->offset, he->stat.nr_events, he->stat.period); + he->mem_type_off = op_loc->offset; return mem_type; } return NULL; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 7ebbf427b1ea..18128a49309e 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -83,6 +83,7 @@ enum hist_column { HISTC_ADDR, HISTC_SIMD, HISTC_TYPE, + HISTC_TYPE_OFFSET, HISTC_NR_COLS, /* Last entry */ }; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index a41209e242ae..d78e680d3988 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -2153,8 +2153,10 @@ static void sort__type_init(struct hist_entry *he) return; he->mem_type = hist_entry__get_data_type(he); - if (he->mem_type == NULL) + if (he->mem_type == NULL) { he->mem_type = &unknown_type; + he->mem_type_off = 0; + } } static int64_t @@ -2198,6 +2200,84 @@ struct sort_entry sort_type = { .se_width_idx = HISTC_TYPE, }; +/* --sort typeoff */ + +static int64_t +sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right) +{ + struct annotated_data_type *left_type = left->mem_type; + struct annotated_data_type *right_type = right->mem_type; + int64_t ret; + + if (!left_type) { + sort__type_init(left); + left_type = left->mem_type; + } + + if (!right_type) { + sort__type_init(right); + right_type = right->mem_type; + } + + ret = strcmp(left_type->self.type_name, right_type->self.type_name); + if (ret) + return ret; + return left->mem_type_off - right->mem_type_off; +} + +static void fill_member_name(char *buf, size_t sz, struct annotated_member *m, + int offset, bool first) +{ + struct annotated_member *child; + + if (list_empty(&m->children)) + return; + + list_for_each_entry(child, &m->children, node) { + if (child->offset <= offset && offset < child->offset + child->size) { + int len = 0; + + /* It can have anonymous struct/union members */ + if (child->var_name) { + len = scnprintf(buf, sz, "%s%s", + first ? "" : ".", child->var_name); + first = false; + } + + fill_member_name(buf + len, sz - len, child, offset, first); + return; + } + } +} + +static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width __maybe_unused) +{ + struct annotated_data_type *he_type = he->mem_type; + char buf[4096]; + + buf[0] = '\0'; + if (list_empty(&he_type->self.children)) + snprintf(buf, sizeof(buf), "no field"); + else + fill_member_name(buf, sizeof(buf), &he_type->self, + he->mem_type_off, true); + buf[4095] = '\0'; + + return repsep_snprintf(bf, size, "%s %+d (%s)", he_type->self.type_name, + he->mem_type_off, buf); +} + +struct sort_entry sort_type_offset = { + .se_header = "Data Type Offset", + .se_cmp = sort__type_cmp, + .se_collapse = sort__typeoff_sort, + .se_sort = sort__typeoff_sort, + .se_init = sort__type_init, + .se_snprintf = hist_entry__typeoff_snprintf, + .se_width_idx = HISTC_TYPE_OFFSET, +}; + struct sort_dimension { const char *name; @@ -2254,6 +2334,7 @@ static struct sort_dimension common_sort_dimensions[] = { DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc), DIM(SORT_SIMD, "simd", sort_simd), DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type), + DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset), }; #undef DIM diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index aabf0b8331a3..d806adcc1e1e 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -113,6 +113,7 @@ struct hist_entry { u64 p_stage_cyc; u8 cpumode; u8 depth; + int mem_type_off; struct simd_flags simd_flags; /* We are added by hists__add_dummy_entry. */ @@ -247,6 +248,7 @@ enum sort_type { SORT_GLOBAL_RETIRE_LAT, SORT_SIMD, SORT_ANNOTATE_DATA_TYPE, + SORT_ANNOTATE_DATA_TYPE_OFFSET, /* branch stack specific sort keys */ __SORT_BRANCH_STACK, From e2c1c8ff2d2ffec340b8fc73ee13b8fb516d1c6d Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:19 -0800 Subject: [PATCH 159/179] perf report: Add 'symoff' sort key The symoff sort key is to print symbol and offset of sample. This is useful for data type profiling to show exact instruction in the function which refers the data. $ perf report -s type,sym,typeoff,symoff --hierarchy ... # Overhead Data Type / Symbol / Data Type Offset / Symbol Offset # .............. ..................................................... # 1.23% struct cfs_rq 0.84% update_blocked_averages 0.19% struct cfs_rq +336 (leaf_cfs_rq_list.next) 0.19% [k] update_blocked_averages+0x96 0.19% struct cfs_rq +0 (load.weight) 0.14% [k] update_blocked_averages+0x104 0.04% [k] update_blocked_averages+0x31c 0.17% struct cfs_rq +404 (throttle_count) 0.12% [k] update_blocked_averages+0x9d 0.05% [k] update_blocked_averages+0x1f9 0.08% struct cfs_rq +272 (propagate) 0.07% [k] update_blocked_averages+0x3d3 0.02% [k] update_blocked_averages+0x45b ... Committer testing: # perf report --stdio -s type,typeoff,symoff # To display the perf.data header info, please use --header/--header-only options. # # # Total Lost Samples: 0 # # Samples: 4 of event 'cpu_atom/mem-loads,ldlat=30/P' # Event count (approx.): 7 # # Overhead Data Type Data Type Offset Symbol Offset # ........ ......... ................ ............. # 42.86% struct list_head struct list_head +8 (prev) [k] __list_del_entry_valid_or_report+0x7 28.57% (unknown) (unknown) +0 (no field) [.] _nl_intern_locale_data+0x25 14.29% char char +0 (no field) [k] strncpy_from_user+0xa5 14.29% (unknown) (unknown) +0 (no field) [.] _dl_lookup_symbol_x+0x50 # # (Tip: To change sampling frequency to 100 Hz: perf record -F 100) # Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-14-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-report.txt | 1 + tools/perf/util/hist.h | 1 + tools/perf/util/sort.c | 47 ++++++++++++++++++++++++ tools/perf/util/sort.h | 1 + 4 files changed, 50 insertions(+) diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index b57eb51b47aa..38f59ac064f7 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt @@ -120,6 +120,7 @@ OPTIONS - simd: Flags describing a SIMD operation. "e" for empty Arm SVE predicate. "p" for partial Arm SVE predicate - type: Data type of sample memory access. - typeoff: Offset in the data type of sample memory access. + - symoff: Offset in the symbol. By default, comm, dso and symbol keys are used. (i.e. --sort comm,dso,symbol) diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 18128a49309e..4a0aea0c9e00 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -84,6 +84,7 @@ enum hist_column { HISTC_SIMD, HISTC_TYPE, HISTC_TYPE_OFFSET, + HISTC_SYMBOL_OFFSET, HISTC_NR_COLS, /* Last entry */ }; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index d78e680d3988..0cbbd5ba8175 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -419,6 +419,52 @@ struct sort_entry sort_sym = { .se_width_idx = HISTC_SYMBOL, }; +/* --sort symoff */ + +static int64_t +sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right) +{ + int64_t ret; + + ret = sort__sym_cmp(left, right); + if (ret) + return ret; + + return left->ip - right->ip; +} + +static int64_t +sort__symoff_sort(struct hist_entry *left, struct hist_entry *right) +{ + int64_t ret; + + ret = sort__sym_sort(left, right); + if (ret) + return ret; + + return left->ip - right->ip; +} + +static int +hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width) +{ + struct symbol *sym = he->ms.sym; + + if (sym == NULL) + return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip); + + return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start); +} + +struct sort_entry sort_sym_offset = { + .se_header = "Symbol Offset", + .se_cmp = sort__symoff_cmp, + .se_sort = sort__symoff_sort, + .se_snprintf = hist_entry__symoff_snprintf, + .se_filter = hist_entry__sym_filter, + .se_width_idx = HISTC_SYMBOL_OFFSET, +}; + /* --sort srcline */ char *hist_entry__srcline(struct hist_entry *he) @@ -2335,6 +2381,7 @@ static struct sort_dimension common_sort_dimensions[] = { DIM(SORT_SIMD, "simd", sort_simd), DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type), DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset), + DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset), }; #undef DIM diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index d806adcc1e1e..6f6b4189a389 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -249,6 +249,7 @@ enum sort_type { SORT_SIMD, SORT_ANNOTATE_DATA_TYPE, SORT_ANNOTATE_DATA_TYPE_OFFSET, + SORT_SYM_OFFSET, /* branch stack specific sort keys */ __SORT_BRANCH_STACK, From 263925bf843f5ca8257317d74992f8e6b7d222e3 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:20 -0800 Subject: [PATCH 160/179] perf annotate: Add --data-type option Support data type annotation with new --data-type option. It internally uses type sort key to collect sample histogram for the type and display every members like below. $ perf annotate --data-type ... Annotate type: 'struct cfs_rq' in [kernel.kallsyms] (13 samples): ============================================================================ samples offset size field 13 0 640 struct cfs_rq { 2 0 16 struct load_weight load { 2 0 8 unsigned long weight; 0 8 4 u32 inv_weight; }; 0 16 8 unsigned long runnable_weight; 0 24 4 unsigned int nr_running; 1 28 4 unsigned int h_nr_running; ... For simplicity it prints the number of samples per field for now. But it should be easy to show the overhead percentage instead. The number at the outer struct is a sum of the numbers of the inner members. For example, struct cfs_rq got total 13 samples, and 2 came from the load (struct load_weight) and 1 from h_nr_running. Similarly, the struct load_weight got total 2 samples and they all came from the weight field. I've added two new flags in the symbol_conf for this. The annotate_data_member is to get the members of the type. This is also needed for perf report with typeoff sort key. The annotate_data_sample is to update sample stats for each offset and used only in annotate. Currently it only support stdio output mode, TUI support can be added later. Committer testing: With the perf.data from the previous csets, a very simple, short duration one: # perf annotate --data-type Annotate type: 'struct list_head' in [kernel.kallsyms] (1 samples): ============================================================================ samples offset size field 1 0 16 struct list_head { 0 0 8 struct list_head* next; 1 8 8 struct list_head* prev; }; Annotate type: 'char' in [kernel.kallsyms] (1 samples): ============================================================================ samples offset size field 1 0 1 char ; # Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-15-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-annotate.txt | 8 ++ tools/perf/builtin-annotate.c | 97 +++++++++++++++++++++- tools/perf/util/annotate-data.c | 8 +- tools/perf/util/annotate.c | 10 ++- tools/perf/util/sort.c | 2 + tools/perf/util/symbol_conf.h | 4 +- 6 files changed, 118 insertions(+), 11 deletions(-) diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt index fe168e8165c8..0e6a49b7795c 100644 --- a/tools/perf/Documentation/perf-annotate.txt +++ b/tools/perf/Documentation/perf-annotate.txt @@ -155,6 +155,14 @@ include::itrace.txt[] stdio or stdio2 (Default: 0). Note that this is about selection of functions to display, not about lines within the function. +--data-type[=TYPE_NAME]:: + Display data type annotation instead of code. It infers data type of + samples (if they are memory accessing instructions) using DWARF debug + information. It can take an optional argument of data type name. In + that case it'd show annotation for the type only, otherwise it'd show + all data types it finds. + + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-report[1] diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index d880f1b039fd..8acfbbc1b9c2 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -20,6 +20,7 @@ #include "util/evlist.h" #include "util/evsel.h" #include "util/annotate.h" +#include "util/annotate-data.h" #include "util/event.h" #include #include "util/parse-events.h" @@ -55,9 +56,11 @@ struct perf_annotate { bool skip_missing; bool has_br_stack; bool group_set; + bool data_type; float min_percent; const char *sym_hist_filter; const char *cpu_list; + const char *target_data_type; DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); }; @@ -322,6 +325,32 @@ static int hist_entry__tty_annotate(struct hist_entry *he, return symbol__tty_annotate2(&he->ms, evsel); } +static void print_annotated_data_type(struct annotated_data_type *mem_type, + struct annotated_member *member, + struct evsel *evsel, int indent) +{ + struct annotated_member *child; + struct type_hist *h = mem_type->histograms[evsel->core.idx]; + int i, samples = 0; + + for (i = 0; i < member->size; i++) + samples += h->addr[member->offset + i].nr_samples; + + printf(" %10d %10d %10d %*s%s\t%s", + samples, member->offset, member->size, indent, "", member->type_name, + member->var_name ?: ""); + + if (!list_empty(&member->children)) + printf(" {\n"); + + list_for_each_entry(child, &member->children, node) + print_annotated_data_type(mem_type, child, evsel, indent + 4); + + if (!list_empty(&member->children)) + printf("%*s}", 35 + indent, ""); + printf(";\n"); +} + static void hists__find_annotations(struct hists *hists, struct evsel *evsel, struct perf_annotate *ann) @@ -361,6 +390,40 @@ static void hists__find_annotations(struct hists *hists, continue; } + if (ann->data_type) { + struct dso *dso = map__dso(he->ms.map); + + /* skip unknown type */ + if (he->mem_type->histograms == NULL) + goto find_next; + + if (ann->target_data_type) { + const char *type_name = he->mem_type->self.type_name; + + /* skip 'struct ' prefix in the type name */ + if (strncmp(ann->target_data_type, "struct ", 7) && + !strncmp(type_name, "struct ", 7)) + type_name += 7; + + /* skip 'union ' prefix in the type name */ + if (strncmp(ann->target_data_type, "union ", 6) && + !strncmp(type_name, "union ", 6)) + type_name += 6; + + if (strcmp(ann->target_data_type, type_name)) + goto find_next; + } + + printf("Annotate type: '%s' in %s (%d samples):\n", + he->mem_type->self.type_name, dso->name, he->stat.nr_events); + printf("============================================================================\n"); + printf(" %10s %10s %10s %s\n", "samples", "offset", "size", "field"); + + print_annotated_data_type(he->mem_type, &he->mem_type->self, evsel, 0); + printf("\n"); + goto find_next; + } + if (use_browser == 2) { int ret; int (*annotate)(struct hist_entry *he, @@ -496,6 +559,17 @@ static int parse_percent_limit(const struct option *opt, const char *str, return 0; } +static int parse_data_type(const struct option *opt, const char *str, int unset) +{ + struct perf_annotate *ann = opt->value; + + ann->data_type = !unset; + if (str) + ann->target_data_type = strdup(str); + + return 0; +} + static const char * const annotate_usage[] = { "perf annotate []", NULL @@ -607,6 +681,9 @@ int cmd_annotate(int argc, const char **argv) OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts", "Instruction Tracing options\n" ITRACE_HELP, itrace_parse_synth_opts), + OPT_CALLBACK_OPTARG(0, "data-type", &annotate, NULL, "name", + "Show data type annotate for the memory accesses", + parse_data_type), OPT_END() }; @@ -661,6 +738,13 @@ int cmd_annotate(int argc, const char **argv) } #endif +#ifndef HAVE_DWARF_GETLOCATIONS_SUPPORT + if (annotate.data_type) { + pr_err("Error: Data type profiling is disabled due to missing DWARF support\n"); + return -ENOTSUP; + } +#endif + ret = symbol__validate_sym_arguments(); if (ret) return ret; @@ -703,6 +787,14 @@ int cmd_annotate(int argc, const char **argv) use_browser = 2; #endif + /* FIXME: only support stdio for now */ + if (annotate.data_type) { + use_browser = 0; + annotate_opts.annotate_src = false; + symbol_conf.annotate_data_member = true; + symbol_conf.annotate_data_sample = true; + } + setup_browser(true); /* @@ -710,7 +802,10 @@ int cmd_annotate(int argc, const char **argv) * symbol, we do not care about the processes in annotate, * set sort order to avoid repeated output. */ - sort_order = "dso,symbol"; + if (annotate.data_type) + sort_order = "dso,type"; + else + sort_order = "dso,symbol"; /* * Set SORT_MODE__BRANCH so that annotate display IPC/Cycle diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c index 868f52ccdb74..df9689f46619 100644 --- a/tools/perf/util/annotate-data.c +++ b/tools/perf/util/annotate-data.c @@ -19,6 +19,7 @@ #include "map_symbol.h" #include "strbuf.h" #include "symbol.h" +#include "symbol_conf.h" /* * Compare type name and size to maintain them in a tree. @@ -158,11 +159,8 @@ static struct annotated_data_type *dso__findnew_data_type(struct dso *dso, result->self.size = size; INIT_LIST_HEAD(&result->self.children); - /* - * Fill member info unconditionally for now, - * later perf annotate would need it. - */ - add_member_types(result, type_die); + if (symbol_conf.annotate_data_member) + add_member_types(result, type_die); rb_add(&result->node, &dso->data_types, data_type_less); return result; diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index f966e8f83c5e..68424ee0215e 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -3712,10 +3712,12 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) mem_type = find_data_type(ms, ip, op_loc->reg, op_loc->offset); - annotated_data_type__update_samples(mem_type, evsel, - op_loc->offset, - he->stat.nr_events, - he->stat.period); + if (symbol_conf.annotate_data_sample) { + annotated_data_type__update_samples(mem_type, evsel, + op_loc->offset, + he->stat.nr_events, + he->stat.period); + } he->mem_type_off = op_loc->offset; return mem_type; } diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 0cbbd5ba8175..30254eb63709 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -3401,6 +3401,8 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok, list->thread = 1; } else if (sd->entry == &sort_comm) { list->comm = 1; + } else if (sd->entry == &sort_type_offset) { + symbol_conf.annotate_data_member = true; } return __sort_dimension__add(sd, list, level); diff --git a/tools/perf/util/symbol_conf.h b/tools/perf/util/symbol_conf.h index 6040286e07a6..c114bbceef40 100644 --- a/tools/perf/util/symbol_conf.h +++ b/tools/perf/util/symbol_conf.h @@ -44,7 +44,9 @@ struct symbol_conf { buildid_mmap2, guest_code, lazy_load_kernel_maps, - keep_exited_threads; + keep_exited_threads, + annotate_data_member, + annotate_data_sample; const char *vmlinux_name, *kallsyms_name, *source_prefix, From 227ad323854ab48d45966461d7a0a94e03f19368 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:21 -0800 Subject: [PATCH 161/179] perf annotate: Support event group display When events are grouped together, it'd be natural to show them at once like in other mode. Handle group leaders with members to collect the number of samples together and display like below: $ perf annotate --data-type --group ... Annotate type: 'struct page' in vmlinux (1 samples): event[0] = cpu/mem-loads,ldlat=30/P event[1] = cpu/mem-stores/P event[2] = dummy:u ============================================================================ samples offset size field 1 0 0 0 64 struct page { 0 0 0 0 8 long unsigned int flags; 0 0 0 8 40 union { 0 0 0 8 40 struct { 0 0 0 8 16 union { 0 0 0 8 16 struct list_head lru { 0 0 0 8 8 struct list_head* next; 0 0 0 16 8 struct list_head* prev; }; 0 0 0 8 16 struct { 0 0 0 8 8 void* __filler; 0 0 0 16 4 unsigned int mlock_count; }; 0 0 0 8 16 struct list_head buddy_list { 0 0 0 8 8 struct list_head* next; 0 0 0 16 8 struct list_head* prev; }; Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-16-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-annotate.c | 89 ++++++++++++++++++++++++++++++----- 1 file changed, 77 insertions(+), 12 deletions(-) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 8acfbbc1b9c2..3956ea1334cc 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -325,19 +325,64 @@ static int hist_entry__tty_annotate(struct hist_entry *he, return symbol__tty_annotate2(&he->ms, evsel); } +static void print_annotated_data_header(struct hist_entry *he, struct evsel *evsel) +{ + struct dso *dso = map__dso(he->ms.map); + int nr_members = 1; + int nr_samples = he->stat.nr_events; + + if (evsel__is_group_event(evsel)) { + struct hist_entry *pair; + + list_for_each_entry(pair, &he->pairs.head, pairs.node) + nr_samples += pair->stat.nr_events; + } + + printf("Annotate type: '%s' in %s (%d samples):\n", + he->mem_type->self.type_name, dso->name, nr_samples); + + if (evsel__is_group_event(evsel)) { + struct evsel *pos; + int i = 0; + + for_each_group_evsel(pos, evsel) + printf(" event[%d] = %s\n", i++, pos->name); + + nr_members = evsel->core.nr_members; + } + + printf("============================================================================\n"); + printf("%*s %10s %10s %s\n", 11 * nr_members, "samples", "offset", "size", "field"); +} + static void print_annotated_data_type(struct annotated_data_type *mem_type, struct annotated_member *member, struct evsel *evsel, int indent) { struct annotated_member *child; struct type_hist *h = mem_type->histograms[evsel->core.idx]; - int i, samples = 0; + int i, nr_events = 1, samples = 0; for (i = 0; i < member->size; i++) samples += h->addr[member->offset + i].nr_samples; + printf(" %10d", samples); - printf(" %10d %10d %10d %*s%s\t%s", - samples, member->offset, member->size, indent, "", member->type_name, + if (evsel__is_group_event(evsel)) { + struct evsel *pos; + + for_each_group_member(pos, evsel) { + h = mem_type->histograms[pos->core.idx]; + + samples = 0; + for (i = 0; i < member->size; i++) + samples += h->addr[member->offset + i].nr_samples; + printf(" %10d", samples); + } + nr_events = evsel->core.nr_members; + } + + printf(" %10d %10d %*s%s\t%s", + member->offset, member->size, indent, "", member->type_name, member->var_name ?: ""); if (!list_empty(&member->children)) @@ -347,7 +392,7 @@ static void print_annotated_data_type(struct annotated_data_type *mem_type, print_annotated_data_type(mem_type, child, evsel, indent + 4); if (!list_empty(&member->children)) - printf("%*s}", 35 + indent, ""); + printf("%*s}", 11 * nr_events + 24 + indent, ""); printf(";\n"); } @@ -391,8 +436,6 @@ static void hists__find_annotations(struct hists *hists, } if (ann->data_type) { - struct dso *dso = map__dso(he->ms.map); - /* skip unknown type */ if (he->mem_type->histograms == NULL) goto find_next; @@ -414,11 +457,7 @@ static void hists__find_annotations(struct hists *hists, goto find_next; } - printf("Annotate type: '%s' in %s (%d samples):\n", - he->mem_type->self.type_name, dso->name, he->stat.nr_events); - printf("============================================================================\n"); - printf(" %10s %10s %10s %s\n", "samples", "offset", "size", "field"); - + print_annotated_data_header(he, evsel); print_annotated_data_type(he->mem_type, &he->mem_type->self, evsel, 0); printf("\n"); goto find_next; @@ -521,8 +560,20 @@ static int __cmd_annotate(struct perf_annotate *ann) evsel__reset_sample_bit(pos, CALLCHAIN); evsel__output_resort(pos, NULL); - if (symbol_conf.event_group && !evsel__is_group_leader(pos)) + /* + * An event group needs to display other events too. + * Let's delay printing until other events are processed. + */ + if (symbol_conf.event_group) { + if (!evsel__is_group_leader(pos)) { + struct hists *leader_hists; + + leader_hists = evsel__hists(evsel__leader(pos)); + hists__match(leader_hists, hists); + hists__link(leader_hists, hists); + } continue; + } hists__find_annotations(hists, pos, ann); } @@ -533,6 +584,20 @@ static int __cmd_annotate(struct perf_annotate *ann) goto out; } + /* Display group events together */ + evlist__for_each_entry(session->evlist, pos) { + struct hists *hists = evsel__hists(pos); + u32 nr_samples = hists->stats.nr_samples; + + if (nr_samples == 0) + continue; + + if (!symbol_conf.event_group || !evsel__is_group_leader(pos)) + continue; + + hists__find_annotations(hists, pos, ann); + } + if (use_browser == 2) { void (*show_annotations)(void); From 61a9741e9f78c64c5178e4ae9d405eeceff04c8f Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:22 -0800 Subject: [PATCH 162/179] perf annotate: Add --type-stat option for debugging The --type-stat option is to be used with --data-type and to print detailed failure reasons for the data type annotation. $ perf annotate --data-type --type-stat Annotate data type stats: total 294, ok 116 (39.5%), bad 178 (60.5%) ----------------------------------------------------------- 30 : no_sym 40 : no_insn_ops 33 : no_mem_ops 63 : no_var 4 : no_typeinfo 8 : bad_offset Signed-off-by: Namhyung Kim Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-17-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-annotate.txt | 3 ++ tools/perf/builtin-annotate.c | 44 +++++++++++++++++++++- tools/perf/util/annotate-data.c | 10 ++++- tools/perf/util/annotate-data.h | 31 +++++++++++++++ tools/perf/util/annotate.c | 29 +++++++++++--- 5 files changed, 109 insertions(+), 8 deletions(-) diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt index 0e6a49b7795c..b95524bea021 100644 --- a/tools/perf/Documentation/perf-annotate.txt +++ b/tools/perf/Documentation/perf-annotate.txt @@ -162,6 +162,9 @@ include::itrace.txt[] that case it'd show annotation for the type only, otherwise it'd show all data types it finds. +--type-stat:: + Show stats for the data type annotation. + SEE ALSO -------- diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 3956ea1334cc..55f97ab1395b 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -57,6 +57,7 @@ struct perf_annotate { bool has_br_stack; bool group_set; bool data_type; + bool type_stat; float min_percent; const char *sym_hist_filter; const char *cpu_list; @@ -396,6 +397,43 @@ static void print_annotated_data_type(struct annotated_data_type *mem_type, printf(";\n"); } +static void print_annotate_data_stat(struct annotated_data_stat *s) +{ +#define PRINT_STAT(fld) if (s->fld) printf("%10d : %s\n", s->fld, #fld) + + int bad = s->no_sym + + s->no_insn + + s->no_insn_ops + + s->no_mem_ops + + s->no_reg + + s->no_dbginfo + + s->no_cuinfo + + s->no_var + + s->no_typeinfo + + s->invalid_size + + s->bad_offset; + int ok = s->total - bad; + + printf("Annotate data type stats:\n"); + printf("total %d, ok %d (%.1f%%), bad %d (%.1f%%)\n", + s->total, ok, 100.0 * ok / (s->total ?: 1), bad, 100.0 * bad / (s->total ?: 1)); + printf("-----------------------------------------------------------\n"); + PRINT_STAT(no_sym); + PRINT_STAT(no_insn); + PRINT_STAT(no_insn_ops); + PRINT_STAT(no_mem_ops); + PRINT_STAT(no_reg); + PRINT_STAT(no_dbginfo); + PRINT_STAT(no_cuinfo); + PRINT_STAT(no_var); + PRINT_STAT(no_typeinfo); + PRINT_STAT(invalid_size); + PRINT_STAT(bad_offset); + printf("\n"); + +#undef PRINT_STAT +} + static void hists__find_annotations(struct hists *hists, struct evsel *evsel, struct perf_annotate *ann) @@ -403,6 +441,9 @@ static void hists__find_annotations(struct hists *hists, struct rb_node *nd = rb_first_cached(&hists->entries), *next; int key = K_RIGHT; + if (ann->type_stat) + print_annotate_data_stat(&ann_data_stat); + while (nd) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); struct annotation *notes; @@ -749,7 +790,8 @@ int cmd_annotate(int argc, const char **argv) OPT_CALLBACK_OPTARG(0, "data-type", &annotate, NULL, "name", "Show data type annotate for the memory accesses", parse_data_type), - + OPT_BOOLEAN(0, "type-stat", &annotate.type_stat, + "Show stats for the data type annotation"), OPT_END() }; int ret; diff --git a/tools/perf/util/annotate-data.c b/tools/perf/util/annotate-data.c index df9689f46619..f22b4f18271c 100644 --- a/tools/perf/util/annotate-data.c +++ b/tools/perf/util/annotate-data.c @@ -199,6 +199,7 @@ static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset) /* Get the type of the variable */ if (die_get_real_type(var_die, type_die) == NULL) { pr_debug("variable has no type\n"); + ann_data_stat.no_typeinfo++; return -1; } @@ -209,18 +210,21 @@ static int check_variable(Dwarf_Die *var_die, Dwarf_Die *type_die, int offset) if (dwarf_tag(type_die) != DW_TAG_pointer_type || die_get_real_type(type_die, type_die) == NULL) { pr_debug("no pointer or no type\n"); + ann_data_stat.no_typeinfo++; return -1; } /* Get the size of the actual type */ if (dwarf_aggregate_size(type_die, &size) < 0) { pr_debug("type size is unknown\n"); + ann_data_stat.invalid_size++; return -1; } /* Minimal sanity check */ if ((unsigned)offset >= size) { pr_debug("offset: %d is bigger than size: %" PRIu64 "\n", offset, size); + ann_data_stat.bad_offset++; return -1; } @@ -239,6 +243,7 @@ static int find_data_type_die(struct debuginfo *di, u64 pc, /* Get a compile_unit for this address */ if (!find_cu_die(di, pc, &cu_die)) { pr_debug("cannot find CU for address %" PRIx64 "\n", pc); + ann_data_stat.no_cuinfo++; return -1; } @@ -253,9 +258,12 @@ static int find_data_type_die(struct debuginfo *di, u64 pc, /* Found a variable, see if it's correct */ ret = check_variable(&var_die, type_die, offset); - break; + goto out; } + if (ret < 0) + ann_data_stat.no_var++; +out: free(scopes); return ret; } diff --git a/tools/perf/util/annotate-data.h b/tools/perf/util/annotate-data.h index d2dc025b1934..8e73096c01d1 100644 --- a/tools/perf/util/annotate-data.h +++ b/tools/perf/util/annotate-data.h @@ -70,6 +70,37 @@ struct annotated_data_type { extern struct annotated_data_type unknown_type; +/** + * struct annotated_data_stat - Debug statistics + * @total: Total number of entry + * @no_sym: No symbol or map found + * @no_insn: Failed to get disasm line + * @no_insn_ops: The instruction has no operands + * @no_mem_ops: The instruction has no memory operands + * @no_reg: Failed to extract a register from the operand + * @no_dbginfo: The binary has no debug information + * @no_cuinfo: Failed to find a compile_unit + * @no_var: Failed to find a matching variable + * @no_typeinfo: Failed to get a type info for the variable + * @invalid_size: Failed to get a size info of the type + * @bad_offset: The access offset is out of the type + */ +struct annotated_data_stat { + int total; + int no_sym; + int no_insn; + int no_insn_ops; + int no_mem_ops; + int no_reg; + int no_dbginfo; + int no_cuinfo; + int no_var; + int no_typeinfo; + int invalid_size; + int bad_offset; +}; +extern struct annotated_data_stat ann_data_stat; + #ifdef HAVE_DWARF_SUPPORT /* Returns data type at the location (ip, reg, offset) */ diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 68424ee0215e..9870257ce21e 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -103,6 +103,9 @@ static struct ins_ops nop_ops; static struct ins_ops lock_ops; static struct ins_ops ret_ops; +/* Data type collection debug statistics */ +struct annotated_data_stat ann_data_stat; + static int arch__grow_instructions(struct arch *arch) { struct ins *new_instructions; @@ -3683,14 +3686,22 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) u64 ip = he->ip; int i; - if (ms->map == NULL || ms->sym == NULL) - return NULL; + ann_data_stat.total++; - if (!symbol_conf.init_annotation) + if (ms->map == NULL || ms->sym == NULL) { + ann_data_stat.no_sym++; return NULL; + } - if (evsel__get_arch(evsel, &arch) < 0) + if (!symbol_conf.init_annotation) { + ann_data_stat.no_sym++; return NULL; + } + + if (evsel__get_arch(evsel, &arch) < 0) { + ann_data_stat.no_insn++; + return NULL; + } /* Make sure it runs objdump to get disasm of the function */ symbol__ensure_annotate(ms, evsel); @@ -3700,11 +3711,15 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) * This is too slow... */ dl = find_disasm_line(ms->sym, ip); - if (dl == NULL) + if (dl == NULL) { + ann_data_stat.no_insn++; return NULL; + } - if (annotate_get_insn_location(arch, dl, &loc) < 0) + if (annotate_get_insn_location(arch, dl, &loc) < 0) { + ann_data_stat.no_insn_ops++; return NULL; + } for_each_insn_op_loc(&loc, i, op_loc) { if (!op_loc->mem_ref) @@ -3721,5 +3736,7 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) he->mem_type_off = op_loc->offset; return mem_type; } + + ann_data_stat.no_mem_ops++; return NULL; } From 58824fa0087e1cb732edbf1f112a5ea0b2205c8b Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 12 Dec 2023 16:13:23 -0800 Subject: [PATCH 163/179] perf annotate: Add --insn-stat option for debugging This is for a debugging purpose. It'd be useful to see per-instrucion level success/failure stats. $ perf annotate --data-type --insn-stat Annotate Instruction stats total 264, ok 143 (54.2%), bad 121 (45.8%) Name : Good Bad ----------------------------------------------------------- movq : 45 31 movl : 22 11 popq : 0 19 cmpl : 16 3 addq : 8 7 cmpq : 11 3 cmpxchgl : 3 7 cmpxchgq : 8 0 incl : 3 3 movzbl : 4 2 incq : 4 2 decl : 6 0 ... Committer notes: So these are about being able to find the type for accesses from these instructions, we should improve the naming, but it is for debugging, we can improve this later: @@ -3726,6 +3759,10 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) continue; mem_type = find_data_type(ms, ip, op_loc->reg, op_loc->offset); + if (mem_type) + istat->good++; + else + istat->bad++; Signed-off-by: Namhyung Kim Cc: Adrian Hunter Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: Peter Zijlstra Cc: Stephane Eranian Cc: linux-toolchains@vger.kernel.org Cc: linux-trace-devel@vger.kernel.org Link: https://lore.kernel.org/r/20231213001323.718046-18-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-annotate.c | 41 +++++++++++++++++++++++++++++++++++ tools/perf/util/annotate.c | 38 ++++++++++++++++++++++++++++++++ tools/perf/util/annotate.h | 8 +++++++ 3 files changed, 87 insertions(+) diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index 55f97ab1395b..6c1cc797692d 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -58,6 +58,7 @@ struct perf_annotate { bool group_set; bool data_type; bool type_stat; + bool insn_stat; float min_percent; const char *sym_hist_filter; const char *cpu_list; @@ -434,6 +435,42 @@ static void print_annotate_data_stat(struct annotated_data_stat *s) #undef PRINT_STAT } +static void print_annotate_item_stat(struct list_head *head, const char *title) +{ + struct annotated_item_stat *istat, *pos, *iter; + int total_good, total_bad, total; + int sum1, sum2; + LIST_HEAD(tmp); + + /* sort the list by count */ + list_splice_init(head, &tmp); + total_good = total_bad = 0; + + list_for_each_entry_safe(istat, pos, &tmp, list) { + total_good += istat->good; + total_bad += istat->bad; + sum1 = istat->good + istat->bad; + + list_for_each_entry(iter, head, list) { + sum2 = iter->good + iter->bad; + if (sum1 > sum2) + break; + } + list_move_tail(&istat->list, &iter->list); + } + total = total_good + total_bad; + + printf("Annotate %s stats\n", title); + printf("total %d, ok %d (%.1f%%), bad %d (%.1f%%)\n\n", total, + total_good, 100.0 * total_good / (total ?: 1), + total_bad, 100.0 * total_bad / (total ?: 1)); + printf(" %-10s: %5s %5s\n", "Name", "Good", "Bad"); + printf("-----------------------------------------------------------\n"); + list_for_each_entry(istat, head, list) + printf(" %-10s: %5d %5d\n", istat->name, istat->good, istat->bad); + printf("\n"); +} + static void hists__find_annotations(struct hists *hists, struct evsel *evsel, struct perf_annotate *ann) @@ -443,6 +480,8 @@ static void hists__find_annotations(struct hists *hists, if (ann->type_stat) print_annotate_data_stat(&ann_data_stat); + if (ann->insn_stat) + print_annotate_item_stat(&ann_insn_stat, "Instruction"); while (nd) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); @@ -792,6 +831,8 @@ int cmd_annotate(int argc, const char **argv) parse_data_type), OPT_BOOLEAN(0, "type-stat", &annotate.type_stat, "Show stats for the data type annotation"), + OPT_BOOLEAN(0, "insn-stat", &annotate.insn_stat, + "Show instruction stats for the data type annotation"), OPT_END() }; int ret; diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 9870257ce21e..9b70ab110ce7 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -105,6 +105,7 @@ static struct ins_ops ret_ops; /* Data type collection debug statistics */ struct annotated_data_stat ann_data_stat; +LIST_HEAD(ann_insn_stat); static int arch__grow_instructions(struct arch *arch) { @@ -3665,6 +3666,30 @@ static struct disasm_line *find_disasm_line(struct symbol *sym, u64 ip) return NULL; } +static struct annotated_item_stat *annotate_data_stat(struct list_head *head, + const char *name) +{ + struct annotated_item_stat *istat; + + list_for_each_entry(istat, head, list) { + if (!strcmp(istat->name, name)) + return istat; + } + + istat = zalloc(sizeof(*istat)); + if (istat == NULL) + return NULL; + + istat->name = strdup(name); + if (istat->name == NULL) { + free(istat); + return NULL; + } + + list_add_tail(&istat->list, head); + return istat; +} + /** * hist_entry__get_data_type - find data type for given hist entry * @he: hist entry @@ -3683,6 +3708,7 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) struct annotated_insn_loc loc; struct annotated_op_loc *op_loc; struct annotated_data_type *mem_type; + struct annotated_item_stat *istat; u64 ip = he->ip; int i; @@ -3716,8 +3742,15 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) return NULL; } + istat = annotate_data_stat(&ann_insn_stat, dl->ins.name); + if (istat == NULL) { + ann_data_stat.no_insn++; + return NULL; + } + if (annotate_get_insn_location(arch, dl, &loc) < 0) { ann_data_stat.no_insn_ops++; + istat->bad++; return NULL; } @@ -3726,6 +3759,10 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) continue; mem_type = find_data_type(ms, ip, op_loc->reg, op_loc->offset); + if (mem_type) + istat->good++; + else + istat->bad++; if (symbol_conf.annotate_data_sample) { annotated_data_type__update_samples(mem_type, evsel, @@ -3738,5 +3775,6 @@ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he) } ann_data_stat.no_mem_ops++; + istat->bad++; return NULL; } diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 6c75b2832286..dba50762c6e8 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -479,4 +479,12 @@ int annotate_get_insn_location(struct arch *arch, struct disasm_line *dl, /* Returns a data type from the sample instruction (if any) */ struct annotated_data_type *hist_entry__get_data_type(struct hist_entry *he); +struct annotated_item_stat { + struct list_head list; + char *name; + int good; + int bad; +}; +extern struct list_head ann_insn_stat; + #endif /* __PERF_ANNOTATE_H */ From 9c51f8788b5d4e9f46afbcf563255cfd355690b3 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 17:46:55 -0800 Subject: [PATCH 164/179] perf env: Avoid recursively taking env->bpf_progs.lock Add variants of perf_env__insert_bpf_prog_info(), perf_env__insert_btf() and perf_env__find_btf prefixed with __ to indicate the env->bpf_progs.lock is assumed held. Call these variants when the lock is held to avoid recursively taking it and potentially having a thread deadlock with itself. Fixes: f8dfeae009effc0b ("perf bpf: Show more BPF program info in print_bpf_prog_info()") Signed-off-by: Ian Rogers Acked-by: Jiri Olsa Acked-by: Song Liu Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Huacai Chen Cc: Ingo Molnar Cc: K Prateek Nayak Cc: Kan Liang Cc: Mark Rutland Cc: Ming Wang Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Link: https://lore.kernel.org/r/20231207014655.1252484-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/bpf-event.c | 8 +++--- tools/perf/util/bpf-event.h | 12 ++++----- tools/perf/util/env.c | 50 ++++++++++++++++++++++++------------- tools/perf/util/env.h | 4 +++ tools/perf/util/header.c | 8 +++--- 5 files changed, 50 insertions(+), 32 deletions(-) diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c index 830711cae30d..3573e0b7ef3e 100644 --- a/tools/perf/util/bpf-event.c +++ b/tools/perf/util/bpf-event.c @@ -545,9 +545,9 @@ int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env) return evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env); } -void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, - struct perf_env *env, - FILE *fp) +void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, + struct perf_env *env, + FILE *fp) { __u32 *prog_lens = (__u32 *)(uintptr_t)(info->jited_func_lens); __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms); @@ -563,7 +563,7 @@ void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, if (info->btf_id) { struct btf_node *node; - node = perf_env__find_btf(env, info->btf_id); + node = __perf_env__find_btf(env, info->btf_id); if (node) btf = btf__new((__u8 *)(node->data), node->data_size); diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h index 1bcbd4fb6c66..e2f0420905f5 100644 --- a/tools/perf/util/bpf-event.h +++ b/tools/perf/util/bpf-event.h @@ -33,9 +33,9 @@ struct btf_node { int machine__process_bpf(struct machine *machine, union perf_event *event, struct perf_sample *sample); int evlist__add_bpf_sb_event(struct evlist *evlist, struct perf_env *env); -void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, - struct perf_env *env, - FILE *fp); +void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info, + struct perf_env *env, + FILE *fp); #else static inline int machine__process_bpf(struct machine *machine __maybe_unused, union perf_event *event __maybe_unused, @@ -50,9 +50,9 @@ static inline int evlist__add_bpf_sb_event(struct evlist *evlist __maybe_unused, return 0; } -static inline void bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused, - struct perf_env *env __maybe_unused, - FILE *fp __maybe_unused) +static inline void __bpf_event__print_bpf_prog_info(struct bpf_prog_info *info __maybe_unused, + struct perf_env *env __maybe_unused, + FILE *fp __maybe_unused) { } diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index c68b7a004f29..a459374d0a1a 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -24,13 +24,19 @@ struct perf_env perf_env; void perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node) +{ + down_write(&env->bpf_progs.lock); + __perf_env__insert_bpf_prog_info(env, info_node); + up_write(&env->bpf_progs.lock); +} + +void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node) { __u32 prog_id = info_node->info_linear->info.id; struct bpf_prog_info_node *node; struct rb_node *parent = NULL; struct rb_node **p; - down_write(&env->bpf_progs.lock); p = &env->bpf_progs.infos.rb_node; while (*p != NULL) { @@ -42,15 +48,13 @@ void perf_env__insert_bpf_prog_info(struct perf_env *env, p = &(*p)->rb_right; } else { pr_debug("duplicated bpf prog info %u\n", prog_id); - goto out; + return; } } rb_link_node(&info_node->rb_node, parent, p); rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos); env->bpf_progs.infos_cnt++; -out: - up_write(&env->bpf_progs.lock); } struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, @@ -79,14 +83,22 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, } bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node) +{ + bool ret; + + down_write(&env->bpf_progs.lock); + ret = __perf_env__insert_btf(env, btf_node); + up_write(&env->bpf_progs.lock); + return ret; +} + +bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node) { struct rb_node *parent = NULL; __u32 btf_id = btf_node->id; struct btf_node *node; struct rb_node **p; - bool ret = true; - down_write(&env->bpf_progs.lock); p = &env->bpf_progs.btfs.rb_node; while (*p != NULL) { @@ -98,25 +110,31 @@ bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node) p = &(*p)->rb_right; } else { pr_debug("duplicated btf %u\n", btf_id); - ret = false; - goto out; + return false; } } rb_link_node(&btf_node->rb_node, parent, p); rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs); env->bpf_progs.btfs_cnt++; -out: - up_write(&env->bpf_progs.lock); - return ret; + return true; } struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id) +{ + struct btf_node *res; + + down_read(&env->bpf_progs.lock); + res = __perf_env__find_btf(env, btf_id); + up_read(&env->bpf_progs.lock); + return res; +} + +struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id) { struct btf_node *node = NULL; struct rb_node *n; - down_read(&env->bpf_progs.lock); n = env->bpf_progs.btfs.rb_node; while (n) { @@ -126,13 +144,9 @@ struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id) else if (btf_id > node->id) n = n->rb_right; else - goto out; + return node; } - node = NULL; - -out: - up_read(&env->bpf_progs.lock); - return node; + return NULL; } /* purge data in bpf_progs.infos tree */ diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h index bf7e3c4c211f..7c527e65c186 100644 --- a/tools/perf/util/env.h +++ b/tools/perf/util/env.h @@ -175,12 +175,16 @@ const char *perf_env__raw_arch(struct perf_env *env); int perf_env__nr_cpus_avail(struct perf_env *env); void perf_env__init(struct perf_env *env); +void __perf_env__insert_bpf_prog_info(struct perf_env *env, + struct bpf_prog_info_node *info_node); void perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node); struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, __u32 prog_id); bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); +bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node); struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id); +struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id); int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu); char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name, diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index a9f71f8343f0..3fe28edc3d01 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1849,8 +1849,8 @@ static void print_bpf_prog_info(struct feat_fd *ff, FILE *fp) node = rb_entry(next, struct bpf_prog_info_node, rb_node); next = rb_next(&node->rb_node); - bpf_event__print_bpf_prog_info(&node->info_linear->info, - env, fp); + __bpf_event__print_bpf_prog_info(&node->info_linear->info, + env, fp); } up_read(&env->bpf_progs.lock); @@ -3188,7 +3188,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused) /* after reading from file, translate offset to address */ bpil_offs_to_addr(info_linear); info_node->info_linear = info_linear; - perf_env__insert_bpf_prog_info(env, info_node); + __perf_env__insert_bpf_prog_info(env, info_node); } up_write(&env->bpf_progs.lock); @@ -3235,7 +3235,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused) if (__do_read(ff, node->data, data_size)) goto out; - perf_env__insert_btf(env, node); + __perf_env__insert_btf(env, node); node = NULL; } From 7d1405c71df21f6c394b8a885aa8a133f749fa22 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 6 Dec 2023 18:16:27 -0800 Subject: [PATCH 165/179] perf record: Reduce memory for recording PERF_RECORD_LOST_SAMPLES event Reduce from PERF_SAMPLE_MAX_SIZE to "sizeof(*lost) + session->machines.host.id_hdr_size". Suggested-by: Namhyung Kim Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231207021627.1322884-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-record.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index a89013c44fd5..91e6828c38cc 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -1954,7 +1954,8 @@ static void record__read_lost_samples(struct record *rec) if (count.lost) { if (!lost) { - lost = zalloc(PERF_SAMPLE_MAX_SIZE); + lost = zalloc(sizeof(*lost) + + session->machines.host.id_hdr_size); if (!lost) { pr_debug("Memory allocation failed\n"); return; @@ -1970,7 +1971,8 @@ static void record__read_lost_samples(struct record *rec) lost_count = perf_bpf_filter__lost_count(evsel); if (lost_count) { if (!lost) { - lost = zalloc(PERF_SAMPLE_MAX_SIZE); + lost = zalloc(sizeof(*lost) + + session->machines.host.id_hdr_size); if (!lost) { pr_debug("Memory allocation failed\n"); return; From f2567e12a090f0eb22553a4468d4c4fe04aad906 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Mon, 11 Dec 2023 10:12:41 -0800 Subject: [PATCH 166/179] perf stat: Fix hard coded LL miss units Copy-paste error where LL cache misses are reported as l1i. Fixes: 0a57b910807ad163 ("perf stat: Use counts rather than saved_value") Suggested-by: Guillaume Endignoux Reviewed-by: Kan Liang Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ian Rogers Cc: Ingo Molnar Cc: Jiri Olsa Cc: John Garry Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231211181242.1721059-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/stat-shadow.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 1c5c3eeba4cf..e31426167852 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -264,7 +264,7 @@ static void print_ll_miss(struct perf_stat_config *config, static const double color_ratios[3] = {20.0, 10.0, 5.0}; print_ratio(config, evsel, aggr_idx, misses, out, STAT_LL_CACHE, color_ratios, - "of all L1-icache accesses"); + "of all LL-cache accesses"); } static void print_dtlb_miss(struct perf_stat_config *config, From 346878dacc81f53667381c8f4bb5018195ca10be Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Wed, 29 Nov 2023 11:55:04 +0530 Subject: [PATCH 167/179] perf vendor events amd: Add Zen 4 memory controller events Make the jevents parser aware of the Unified Memory Controller (UMC) PMU and add events taken from Section 8.2.1 "UMC Performance Monitor Events" of the Processor Programming Reference (PPR) for AMD Family 19h Model 11h processors. The events capture UMC command activity such as CAS, ACTIVATE, PRECHARGE etc. while the metrics derive data bus utilization and memory bandwidth out of these events. Signed-off-by: Sandipan Das Acked-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ananth Narayan Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Stephane Eranian Link: https://lore.kernel.org/r/e0d8a7e8ca8ee3e378d8029e80b456ac327d6419.1701238314.git.sandipan.das@amd.com Signed-off-by: Arnaldo Carvalho de Melo --- .../arch/x86/amdzen4/memory-controller.json | 101 ++++++++++++++++++ .../arch/x86/amdzen4/recommended.json | 84 +++++++++++++++ tools/perf/pmu-events/jevents.py | 2 + 3 files changed, 187 insertions(+) create mode 100644 tools/perf/pmu-events/arch/x86/amdzen4/memory-controller.json diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/memory-controller.json b/tools/perf/pmu-events/arch/x86/amdzen4/memory-controller.json new file mode 100644 index 000000000000..55263e5e4f69 --- /dev/null +++ b/tools/perf/pmu-events/arch/x86/amdzen4/memory-controller.json @@ -0,0 +1,101 @@ +[ + { + "EventName": "umc_mem_clk", + "PublicDescription": "Number of memory clock cycles.", + "EventCode": "0x00", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.all", + "PublicDescription": "Number of ACTIVATE commands sent.", + "EventCode": "0x05", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.rd", + "PublicDescription": "Number of ACTIVATE commands sent for reads.", + "EventCode": "0x05", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_act_cmd.wr", + "PublicDescription": "Number of ACTIVATE commands sent for writes.", + "EventCode": "0x05", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.all", + "PublicDescription": "Number of PRECHARGE commands sent.", + "EventCode": "0x06", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.rd", + "PublicDescription": "Number of PRECHARGE commands sent for reads.", + "EventCode": "0x06", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_pchg_cmd.wr", + "PublicDescription": "Number of PRECHARGE commands sent for writes.", + "EventCode": "0x06", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.all", + "PublicDescription": "Number of CAS commands sent.", + "EventCode": "0x0a", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.rd", + "PublicDescription": "Number of CAS commands sent for reads.", + "EventCode": "0x0a", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_cas_cmd.wr", + "PublicDescription": "Number of CAS commands sent for writes.", + "EventCode": "0x0a", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.all", + "PublicDescription": "Number of clocks used by the data bus.", + "EventCode": "0x14", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.rd", + "PublicDescription": "Number of clocks used by the data bus for reads.", + "EventCode": "0x14", + "RdWrMask": "0x1", + "PerPkg": "1", + "Unit": "UMCPMC" + }, + { + "EventName": "umc_data_slot_clks.wr", + "PublicDescription": "Number of clocks used by the data bus for writes.", + "EventCode": "0x14", + "RdWrMask": "0x2", + "PerPkg": "1", + "Unit": "UMCPMC" + } +] diff --git a/tools/perf/pmu-events/arch/x86/amdzen4/recommended.json b/tools/perf/pmu-events/arch/x86/amdzen4/recommended.json index 5e6a793acf7b..96e06401c6cb 100644 --- a/tools/perf/pmu-events/arch/x86/amdzen4/recommended.json +++ b/tools/perf/pmu-events/arch/x86/amdzen4/recommended.json @@ -330,5 +330,89 @@ "MetricGroup": "data_fabric", "PerPkg": "1", "ScaleUnit": "6.103515625e-5MiB" + }, + { + "MetricName": "umc_data_bus_utilization", + "BriefDescription": "Memory controller data bus utilization.", + "MetricExpr": "d_ratio(umc_data_slot_clks.all / 2, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_cas_cmd_rate", + "BriefDescription": "Memory controller CAS command rate.", + "MetricExpr": "d_ratio(umc_cas_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1" + }, + { + "MetricName": "umc_cas_cmd_read_ratio", + "BriefDescription": "Ratio of memory controller CAS commands for reads.", + "MetricExpr": "d_ratio(umc_cas_cmd.rd, umc_cas_cmd.all)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_cas_cmd_write_ratio", + "BriefDescription": "Ratio of memory controller CAS commands for writes.", + "MetricExpr": "d_ratio(umc_cas_cmd.wr, umc_cas_cmd.all)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_mem_read_bandwidth", + "BriefDescription": "Estimated memory read bandwidth.", + "MetricExpr": "(umc_cas_cmd.rd * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_mem_write_bandwidth", + "BriefDescription": "Estimated memory write bandwidth.", + "MetricExpr": "(umc_cas_cmd.wr * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_mem_bandwidth", + "BriefDescription": "Estimated combined memory bandwidth.", + "MetricExpr": "(umc_cas_cmd.all * 64) / 1e6 / duration_time", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "1MB/s" + }, + { + "MetricName": "umc_cas_cmd_read_ratio", + "BriefDescription": "Ratio of memory controller CAS commands for reads.", + "MetricExpr": "d_ratio(umc_cas_cmd.rd, umc_cas_cmd.all)", + "MetricGroup": "memory_controller", + "PerPkg": "1", + "ScaleUnit": "100%" + }, + { + "MetricName": "umc_cas_cmd_rate", + "BriefDescription": "Memory controller CAS command rate.", + "MetricExpr": "d_ratio(umc_cas_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1" + }, + { + "MetricName": "umc_activate_cmd_rate", + "BriefDescription": "Memory controller ACTIVATE command rate.", + "MetricExpr": "d_ratio(umc_act_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1" + }, + { + "MetricName": "umc_precharge_cmd_rate", + "BriefDescription": "Memory controller PRECHARGE command rate.", + "MetricExpr": "d_ratio(umc_pchg_cmd.all * 1000, umc_mem_clk)", + "MetricGroup": "memory_controller", + "PerPkg": "1" } ] diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py index 0093c998cb6e..53ab050c8fa4 100755 --- a/tools/perf/pmu-events/jevents.py +++ b/tools/perf/pmu-events/jevents.py @@ -286,6 +286,7 @@ class JsonEvent: 'imx8_ddr': 'imx8_ddr', 'L3PMC': 'amd_l3', 'DFPMC': 'amd_df', + 'UMCPMC': 'amd_umc', 'cpu_core': 'cpu_core', 'cpu_atom': 'cpu_atom', 'ali_drw': 'ali_drw', @@ -354,6 +355,7 @@ class JsonEvent: ('SampleAfterValue', 'period='), ('UMask', 'umask='), ('NodeType', 'type='), + ('RdWrMask', 'rdwrmask='), ] for key, value in event_fields: if key in jd and jd[key] != '0': From eb00697b91646de22d6d9b4e6a5fadf4495fdf69 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 3 Jan 2024 09:01:58 -0800 Subject: [PATCH 168/179] perf x86 test: Update hybrid expectations The legacy events cpu-cycles and instructions have sysfs event equivalents on x86 (see /sys/devices/cpu_core/events). As sysfs/JSON events are now higher in priority than legacy events this causes the hybrid test expectations not to be met. To fix this switch to legacy events that don't have sysfs versions, namely cpu-cycles becomes cycles and instructions becomes branches. Fixes: a24d9d9dc096fc0d ("perf parse-events: Make legacy events lower priority than sysfs/JSON") Reported-by: Arnaldo Carvalho de Melo Reviewed-by: Kan Liang Signed-off-by: Ian Rogers Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Closes: https://lore.kernel.org/lkml/ZYbm5L7tw7bdpDpE@kernel.org/ Link: https://lore.kernel.org/r/20240103170159.1435753-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/x86/tests/hybrid.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/perf/arch/x86/tests/hybrid.c b/tools/perf/arch/x86/tests/hybrid.c index eb152770f148..05a5f81e8167 100644 --- a/tools/perf/arch/x86/tests/hybrid.c +++ b/tools/perf/arch/x86/tests/hybrid.c @@ -47,7 +47,7 @@ static int test__hybrid_hw_group_event(struct evlist *evlist) evsel = evsel__next(evsel); TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW)); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); + TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_INSTRUCTIONS)); TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); return TEST_OK; } @@ -102,7 +102,7 @@ static int test__hybrid_group_modifier1(struct evlist *evlist) evsel = evsel__next(evsel); TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW)); - TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_INSTRUCTIONS)); + TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_BRANCH_INSTRUCTIONS)); TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); TEST_ASSERT_VAL("wrong exclude_user", !evsel->core.attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->core.attr.exclude_kernel); @@ -171,27 +171,27 @@ struct evlist_test { static const struct evlist_test test__hybrid_events[] = { { - .name = "cpu_core/cpu-cycles/", + .name = "cpu_core/cycles/", .check = test__hybrid_hw_event_with_pmu, /* 0 */ }, { - .name = "{cpu_core/cpu-cycles/,cpu_core/instructions/}", + .name = "{cpu_core/cycles/,cpu_core/branches/}", .check = test__hybrid_hw_group_event, /* 1 */ }, { - .name = "{cpu-clock,cpu_core/cpu-cycles/}", + .name = "{cpu-clock,cpu_core/cycles/}", .check = test__hybrid_sw_hw_group_event, /* 2 */ }, { - .name = "{cpu_core/cpu-cycles/,cpu-clock}", + .name = "{cpu_core/cycles/,cpu-clock}", .check = test__hybrid_hw_sw_group_event, /* 3 */ }, { - .name = "{cpu_core/cpu-cycles/k,cpu_core/instructions/u}", + .name = "{cpu_core/cycles/k,cpu_core/branches/u}", .check = test__hybrid_group_modifier1, /* 4 */ }, From ec5257d99e6894d65fae772ca43c53b3d6855115 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 3 Jan 2024 09:01:59 -0800 Subject: [PATCH 169/179] perf x86 test: Add hybrid test for conflicting legacy/sysfs event The cpu-cycles event is both a legacy event and declared in /sys/devices/cpu_core/events/cpu-cycles. The cycles event is a legacy event but with no sysfs version. Add a test that the sysfs version is preferred to the legacy for cpu-cycles, while for cycles we use the legacy version. Suggested-by: Kan Liang Signed-off-by: Ian Rogers Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20240103170159.1435753-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/x86/tests/hybrid.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tools/perf/arch/x86/tests/hybrid.c b/tools/perf/arch/x86/tests/hybrid.c index 05a5f81e8167..40f5d17fedab 100644 --- a/tools/perf/arch/x86/tests/hybrid.c +++ b/tools/perf/arch/x86/tests/hybrid.c @@ -163,6 +163,24 @@ static int test__checkevent_pmu(struct evlist *evlist) return TEST_OK; } +static int test__hybrid_hw_group_event_2(struct evlist *evlist) +{ + struct evsel *evsel, *leader; + + evsel = leader = evlist__first(evlist); + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->core.nr_entries); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->core.attr.type); + TEST_ASSERT_VAL("wrong hybrid type", test_hybrid_type(evsel, PERF_TYPE_RAW)); + TEST_ASSERT_VAL("wrong config", test_config(evsel, PERF_COUNT_HW_CPU_CYCLES)); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); + + evsel = evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->core.attr.type); + TEST_ASSERT_VAL("wrong config", evsel->core.attr.config == 0x3c); + TEST_ASSERT_VAL("wrong leader", evsel__has_leader(evsel, leader)); + return TEST_OK; +} + struct evlist_test { const char *name; bool (*valid)(void); @@ -215,6 +233,11 @@ static const struct evlist_test test__hybrid_events[] = { .check = test__hybrid_cache_event, /* 8 */ }, + { + .name = "{cpu_core/cycles/,cpu_core/cpu-cycles/}", + .check = test__hybrid_hw_group_event_2, + /* 9 */ + }, }; static int test_event(const struct evlist_test *e) From 982b6acec662ff06e9e89c61838f297f6544a84d Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 3 Jan 2024 23:42:56 -0800 Subject: [PATCH 170/179] perf vendor events intel: Alderlake/rocketlake metric fixes Fix that the core PMU is being specified for 2 uncore events. Specify a PMU for the alderlake UNCORE_FREQ metric. Conversion script updated in: https://github.com/intel/perfmon/pull/126 Committer testing: Before this patch the "perf all metricgroups test" was failing, now: root@number:~# perf test metric 10: PMU events : 10.3: Parsing of PMU event table metrics : Ok 10.4: Parsing of PMU event table metrics with fake PMUs : Ok 10.5: Parsing of metric thresholds with fake PMUs : Ok 61: Parse and process metrics : Ok 98: perf stat metrics (shadow stat) test : Skip 101: perf all metricgroups test : Ok 102: perf all metrics test : FAILED! 107: perf metrics value validation : Ok root@number:~# Test 102 is failing for another reason, not being able to get as many counters as needed, Ian Rogers suggested disabling the NMI watchdog to have more counters available: root@number:/home/acme# cat /proc/sys/kernel/nmi_watchdog 1 root@number:/home/acme# echo 0 > /proc/sys/kernel/nmi_watchdog root@number:/home/acme# perf test 102 102: perf all metrics test : Ok root@number:/home/acme# Closes: https://lore.kernel.org/lkml/ZZWOdHXJJ_oecWwm@kernel.org/ Reported-by: Arnaldo Carvalho de Melo Reviewed-by: Kan Liang Signed-off-by: Ian Rogers Tested-by: Arnaldo Carvalho de Melo Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Edward Baker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20240104074259.653219-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- .../arch/x86/alderlake/adl-metrics.json | 15 ++++++++------- .../arch/x86/rocketlake/rkl-metrics.json | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json index 3388b58b8f1a..35124a4ddcb2 100644 --- a/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json +++ b/tools/perf/pmu-events/arch/x86/alderlake/adl-metrics.json @@ -69,12 +69,6 @@ "MetricName": "C9_Pkg_Residency", "ScaleUnit": "100%" }, - { - "BriefDescription": "Uncore frequency per die [GHZ]", - "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9", - "MetricGroup": "SoC", - "MetricName": "UNCORE_FREQ" - }, { "BriefDescription": "Percentage of cycles spent in System Management Interrupts.", "MetricExpr": "((msr@aperf@ - cycles) / msr@aperf@ if msr@smi@ > 0 else 0)", @@ -809,6 +803,13 @@ "ScaleUnit": "100%", "Unit": "cpu_atom" }, + { + "BriefDescription": "Uncore frequency per die [GHZ]", + "MetricExpr": "tma_info_system_socket_clks / #num_dies / duration_time / 1e9", + "MetricGroup": "SoC", + "MetricName": "UNCORE_FREQ", + "Unit": "cpu_core" + }, { "BriefDescription": "This metric represents Core fraction of cycles CPU dispatched uops on execution ports for ALU operations.", "MetricExpr": "(cpu_core@UOPS_DISPATCHED.PORT_0@ + cpu_core@UOPS_DISPATCHED.PORT_1@ + cpu_core@UOPS_DISPATCHED.PORT_5_11@ + cpu_core@UOPS_DISPATCHED.PORT_6@) / (5 * tma_info_core_core_clks)", @@ -1838,7 +1839,7 @@ }, { "BriefDescription": "Average number of parallel data read requests to external memory", - "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / cpu_core@UNC_ARB_DAT_OCCUPANCY.RD\\,cmask\\=1@", + "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@", "MetricGroup": "Mem;MemoryBW;SoC", "MetricName": "tma_info_system_mem_parallel_reads", "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches", diff --git a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json index 0c880e415669..27433fc15ede 100644 --- a/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json +++ b/tools/perf/pmu-events/arch/x86/rocketlake/rkl-metrics.json @@ -985,7 +985,7 @@ }, { "BriefDescription": "Average number of parallel data read requests to external memory", - "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / cpu@UNC_ARB_DAT_OCCUPANCY.RD\\,cmask\\=1@", + "MetricExpr": "UNC_ARB_DAT_OCCUPANCY.RD / UNC_ARB_DAT_OCCUPANCY.RD@cmask\\=1@", "MetricGroup": "Mem;MemoryBW;SoC", "MetricName": "tma_info_system_mem_parallel_reads", "PublicDescription": "Average number of parallel data read requests to external memory. Accounts for demand loads and L1/L2 prefetches" From 576d7fed09c7edbae7600f29a8a3ed6c1ead904f Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 3 Jan 2024 23:42:57 -0800 Subject: [PATCH 171/179] perf vendor events intel: Update emeraldrapids events to v1.02 Update to v1.02 released in: https://github.com/intel/perfmon/pull/123 Removes events AMX_OPS_RETIRED.BF16 and AMX_OPS_RETIRED.INT8. Add events FP_ARITH_DISPATCHED.V0, FP_ARITH_DISPATCHED.V1, FP_ARITH_DISPATCHED.V2, UNC_IIO_IOMMU0.1G_HITS, UNC_IIO_IOMMU0.2M_HITS and UNC_IIO_IOMMU0.4K_HITS. Description updates. Signed-off-by: Ian Rogers Reviewed-by: Kan Liang Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Edward Baker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20240104074259.653219-2-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- .../x86/emeraldrapids/floating-point.json | 27 +++++++++++++++-- .../arch/x86/emeraldrapids/pipeline.json | 18 +---------- .../emeraldrapids/uncore-interconnect.json | 8 ++--- .../arch/x86/emeraldrapids/uncore-io.json | 30 +++++++++++++++++++ tools/perf/pmu-events/arch/x86/mapfile.csv | 2 +- 5 files changed, 60 insertions(+), 25 deletions(-) diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json index 4a9d211e9d4f..1bdefaf96287 100644 --- a/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json +++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/floating-point.json @@ -23,26 +23,47 @@ "UMask": "0x10" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_0", "SampleAfterValue": "2000003", "UMask": "0x1" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_1", "SampleAfterValue": "2000003", "UMask": "0x2" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_5", "SampleAfterValue": "2000003", "UMask": "0x4" }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V0", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V1", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V2", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, { "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", "EventCode": "0xc7", diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json index 6dcf3b763af4..1f8200fb8964 100644 --- a/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/pipeline.json @@ -1,20 +1,4 @@ [ - { - "BriefDescription": "AMX retired arithmetic BF16 operations.", - "EventCode": "0xce", - "EventName": "AMX_OPS_RETIRED.BF16", - "PublicDescription": "Number of AMX-based retired arithmetic bfloat16 (BF16) floating-point operations. Counts TDPBF16PS FP instructions. SW to use operation multiplier of 4", - "SampleAfterValue": "1000003", - "UMask": "0x2" - }, - { - "BriefDescription": "AMX retired arithmetic integer 8-bit operations.", - "EventCode": "0xce", - "EventName": "AMX_OPS_RETIRED.INT8", - "PublicDescription": "Number of AMX-based retired arithmetic integer operations of 8-bit width source operands. Counts TDPB[SS,UU,US,SU]D instructions. SW should use operation multiplier of 8.", - "SampleAfterValue": "1000003", - "UMask": "0x1" - }, { "BriefDescription": "This event is deprecated. Refer to new event ARITH.DIV_ACTIVE", "CounterMask": "1", @@ -505,7 +489,7 @@ "UMask": "0x1" }, { - "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES", + "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).", "EventCode": "0xad", "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES", "MSRIndex": "0x3F7", diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json index 09d840c7da4c..65d088556bae 100644 --- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json +++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-interconnect.json @@ -4825,11 +4825,11 @@ "Unit": "M3UPI" }, { - "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bouncable)", + "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bounceable)", "EventCode": "0x47", "EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_BNC", "PerPkg": "1", - "PublicDescription": "AD Bouncable : Number of allocations into the CRS Egress", + "PublicDescription": "AD Bounceable : Number of allocations into the CRS Egress", "UMask": "0x1", "Unit": "MDF" }, @@ -4861,11 +4861,11 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bouncable)", + "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bounceable)", "EventCode": "0x47", "EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_BNC", "PerPkg": "1", - "PublicDescription": "BL Bouncable : Number of allocations into the CRS Egress", + "PublicDescription": "BL Bounceable : Number of allocations into the CRS Egress", "UMask": "0x4", "Unit": "MDF" }, diff --git a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json index 557080b74ee5..0761980c34a0 100644 --- a/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json +++ b/tools/perf/pmu-events/arch/x86/emeraldrapids/uncore-io.json @@ -1185,6 +1185,36 @@ "UMask": "0x70ff010", "Unit": "IIO" }, + { + "BriefDescription": ": IOTLB Hits to a 1G Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.1G_HITS", + "PerPkg": "1", + "PortMask": "0x0000", + "PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": ": IOTLB Hits to a 2M Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.2M_HITS", + "PerPkg": "1", + "PortMask": "0x0000", + "PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": ": IOTLB Hits to a 4K Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.4K_HITS", + "PerPkg": "1", + "PortMask": "0x0000", + "PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.", + "UMask": "0x4", + "Unit": "IIO" + }, { "BriefDescription": ": Context cache hits", "EventCode": "0x40", diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv index e571683f59f3..fd38c516c048 100644 --- a/tools/perf/pmu-events/arch/x86/mapfile.csv +++ b/tools/perf/pmu-events/arch/x86/mapfile.csv @@ -7,7 +7,7 @@ GenuineIntel-6-56,v11,broadwellde,core GenuineIntel-6-4F,v22,broadwellx,core GenuineIntel-6-55-[56789ABCDEF],v1.20,cascadelakex,core GenuineIntel-6-9[6C],v1.04,elkhartlake,core -GenuineIntel-6-CF,v1.01,emeraldrapids,core +GenuineIntel-6-CF,v1.02,emeraldrapids,core GenuineIntel-6-5[CF],v13,goldmont,core GenuineIntel-6-7A,v1.01,goldmontplus,core GenuineIntel-6-B6,v1.00,grandridge,core From 8550506887a93cd6ff4f6b44a08e8c2cc7a4d481 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 3 Jan 2024 23:42:58 -0800 Subject: [PATCH 172/179] perf vendor events intel: Update icelakex events to v1.23 Update to v1.23 released in: https://github.com/intel/perfmon/pull/123 Updates to event descriptions. Signed-off-by: Ian Rogers Reviewed-by: Kan Liang Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Edward Baker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20240104074259.653219-3-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/x86/icelakex/other.json | 2 +- tools/perf/pmu-events/arch/x86/icelakex/pipeline.json | 2 +- .../pmu-events/arch/x86/icelakex/uncore-interconnect.json | 6 +++--- tools/perf/pmu-events/arch/x86/mapfile.csv | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/perf/pmu-events/arch/x86/icelakex/other.json b/tools/perf/pmu-events/arch/x86/icelakex/other.json index 63d5faf2fc43..11810daaf150 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/other.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/other.json @@ -19,7 +19,7 @@ "BriefDescription": "Core cycles where the core was running in a manner where Turbo may be clipped to the AVX512 turbo schedule.", "EventCode": "0x28", "EventName": "CORE_POWER.LVL2_TURBO_LICENSE", - "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchtecture). This includes high current AVX 512-bit instructions.", + "PublicDescription": "Core cycles where the core was running with power-delivery for license level 2 (introduced in Skylake Server microarchitecture). This includes high current AVX 512-bit instructions.", "SampleAfterValue": "200003", "UMask": "0x20" }, diff --git a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json index 176e5ef2a24a..45ee6bceba7f 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/pipeline.json @@ -519,7 +519,7 @@ "BriefDescription": "Cycles when Reservation Station (RS) is empty for the thread", "EventCode": "0x5e", "EventName": "RS_EVENTS.EMPTY_CYCLES", - "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into stravation periods (e.g. branch mispredictions or i-cache misses)", + "PublicDescription": "Counts cycles during which the reservation station (RS) is empty for this logical processor. This is usually caused when the front-end pipeline runs into starvation periods (e.g. branch mispredictions or i-cache misses)", "SampleAfterValue": "1000003", "UMask": "0x1" }, diff --git a/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json index f87ea3f66d1b..a066a009c511 100644 --- a/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json +++ b/tools/perf/pmu-events/arch/x86/icelakex/uncore-interconnect.json @@ -38,7 +38,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.CLFLUSH", "PerPkg": "1", - "PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Coherent Ops : CLFlush : Counts the number of coherency related operations serviced by the IRP", "UMask": "0x80", "Unit": "IRP" }, @@ -65,7 +65,7 @@ "EventCode": "0x10", "EventName": "UNC_I_COHERENT_OPS.WBMTOI", "PerPkg": "1", - "PublicDescription": "Coherent Ops : WbMtoI : Counts the number of coherency related operations servied by the IRP", + "PublicDescription": "Coherent Ops : WbMtoI : Counts the number of coherency related operations serviced by the IRP", "UMask": "0x40", "Unit": "IRP" }, @@ -454,7 +454,7 @@ "EventCode": "0x11", "EventName": "UNC_I_TRANSACTIONS.WRITES", "PerPkg": "1", - "PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Trackes only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.", + "PublicDescription": "Inbound Transaction Count : Writes : Counts the number of Inbound transactions from the IRP to the Uncore. This can be filtered based on request type in addition to the source queue. Note the special filtering equation. We do OR-reduction on the request type. If the SOURCE bit is set, then we also do AND qualification based on the source portID. : Tracks only write requests. Each write request should have a prefetch, so there is no need to explicitly track these requests. For writes that are tickled and have to retry, the counter will be incremented for each retry.", "UMask": "0x2", "Unit": "IRP" }, diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv index fd38c516c048..c1820eb16a19 100644 --- a/tools/perf/pmu-events/arch/x86/mapfile.csv +++ b/tools/perf/pmu-events/arch/x86/mapfile.csv @@ -15,7 +15,7 @@ GenuineIntel-6-A[DE],v1.01,graniterapids,core GenuineIntel-6-(3C|45|46),v33,haswell,core GenuineIntel-6-3F,v28,haswellx,core GenuineIntel-6-7[DE],v1.19,icelake,core -GenuineIntel-6-6[AC],v1.21,icelakex,core +GenuineIntel-6-6[AC],v1.23,icelakex,core GenuineIntel-6-3A,v24,ivybridge,core GenuineIntel-6-3E,v24,ivytown,core GenuineIntel-6-2D,v24,jaketown,core From 360b045fceb282fb21b66131c396b0f85759ddb7 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Wed, 3 Jan 2024 23:42:59 -0800 Subject: [PATCH 173/179] perf vendor events intel: Update sapphirerapids events to v1.17 Update to v1.17 released in: https://github.com/intel/perfmon/pull/123 Add events FP_ARITH_DISPATCHED.V0, FP_ARITH_DISPATCHED.V1, FP_ARITH_DISPATCHED.V2, UNC_IIO_IOMMU0.1G_HITS, UNC_IIO_IOMMU0.2M_HITS and UNC_IIO_IOMMU0.4K_HITS. Description updates. Signed-off-by: Ian Rogers Reviewed-by: Kan Liang Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Edward Baker Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20240104074259.653219-4-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/pmu-events/arch/x86/mapfile.csv | 2 +- .../x86/sapphirerapids/floating-point.json | 27 +++++++++++++++-- .../arch/x86/sapphirerapids/pipeline.json | 2 +- .../sapphirerapids/uncore-interconnect.json | 8 ++--- .../arch/x86/sapphirerapids/uncore-io.json | 30 +++++++++++++++++++ 5 files changed, 60 insertions(+), 9 deletions(-) diff --git a/tools/perf/pmu-events/arch/x86/mapfile.csv b/tools/perf/pmu-events/arch/x86/mapfile.csv index c1820eb16a19..4d1deed4437a 100644 --- a/tools/perf/pmu-events/arch/x86/mapfile.csv +++ b/tools/perf/pmu-events/arch/x86/mapfile.csv @@ -26,7 +26,7 @@ GenuineIntel-6-1[AEF],v4,nehalemep,core GenuineIntel-6-2E,v4,nehalemex,core GenuineIntel-6-A7,v1.01,rocketlake,core GenuineIntel-6-2A,v19,sandybridge,core -GenuineIntel-6-8F,v1.16,sapphirerapids,core +GenuineIntel-6-8F,v1.17,sapphirerapids,core GenuineIntel-6-AF,v1.00,sierraforest,core GenuineIntel-6-(37|4A|4C|4D|5A),v15,silvermont,core GenuineIntel-6-(4E|5E|8E|9E|A5|A6),v57,skylake,core diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json index 4a9d211e9d4f..1bdefaf96287 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/floating-point.json @@ -23,26 +23,47 @@ "UMask": "0x10" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_0 [This event is alias to FP_ARITH_DISPATCHED.V0]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_0", "SampleAfterValue": "2000003", "UMask": "0x1" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_1 [This event is alias to FP_ARITH_DISPATCHED.V1]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_1", "SampleAfterValue": "2000003", "UMask": "0x2" }, { - "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5", + "BriefDescription": "FP_ARITH_DISPATCHED.PORT_5 [This event is alias to FP_ARITH_DISPATCHED.V2]", "EventCode": "0xb3", "EventName": "FP_ARITH_DISPATCHED.PORT_5", "SampleAfterValue": "2000003", "UMask": "0x4" }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V0 [This event is alias to FP_ARITH_DISPATCHED.PORT_0]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V0", + "SampleAfterValue": "2000003", + "UMask": "0x1" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V1 [This event is alias to FP_ARITH_DISPATCHED.PORT_1]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V1", + "SampleAfterValue": "2000003", + "UMask": "0x2" + }, + { + "BriefDescription": "FP_ARITH_DISPATCHED.V2 [This event is alias to FP_ARITH_DISPATCHED.PORT_5]", + "EventCode": "0xb3", + "EventName": "FP_ARITH_DISPATCHED.V2", + "SampleAfterValue": "2000003", + "UMask": "0x4" + }, { "BriefDescription": "Counts number of SSE/AVX computational 128-bit packed double precision floating-point instructions retired; some instructions will count twice as noted below. Each count represents 2 computation operations, one for each element. Applies to SSE* and AVX* packed double precision floating-point instructions: ADD SUB HADD HSUB SUBADD MUL DIV MIN MAX SQRT DPP FM(N)ADD/SUB. DPP and FM(N)ADD/SUB instructions count twice as they perform 2 calculations per element.", "EventCode": "0xc7", diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json index 6dcf3b763af4..2cfe814d2015 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/pipeline.json @@ -505,7 +505,7 @@ "UMask": "0x1" }, { - "BriefDescription": "INT_MISC.UNKNOWN_BRANCH_CYCLES", + "BriefDescription": "Bubble cycles of BAClear (Unknown Branch).", "EventCode": "0xad", "EventName": "INT_MISC.UNKNOWN_BRANCH_CYCLES", "MSRIndex": "0x3F7", diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json index 09d840c7da4c..65d088556bae 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-interconnect.json @@ -4825,11 +4825,11 @@ "Unit": "M3UPI" }, { - "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bouncable)", + "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (AD Bounceable)", "EventCode": "0x47", "EventName": "UNC_MDF_CRS_TxR_INSERTS.AD_BNC", "PerPkg": "1", - "PublicDescription": "AD Bouncable : Number of allocations into the CRS Egress", + "PublicDescription": "AD Bounceable : Number of allocations into the CRS Egress", "UMask": "0x1", "Unit": "MDF" }, @@ -4861,11 +4861,11 @@ "Unit": "MDF" }, { - "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bouncable)", + "BriefDescription": "Number of allocations into the CRS Egress used to queue up requests destined to the mesh (BL Bounceable)", "EventCode": "0x47", "EventName": "UNC_MDF_CRS_TxR_INSERTS.BL_BNC", "PerPkg": "1", - "PublicDescription": "BL Bouncable : Number of allocations into the CRS Egress", + "PublicDescription": "BL Bounceable : Number of allocations into the CRS Egress", "UMask": "0x4", "Unit": "MDF" }, diff --git a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json index 8b5f54fed103..03596db87710 100644 --- a/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json +++ b/tools/perf/pmu-events/arch/x86/sapphirerapids/uncore-io.json @@ -1249,6 +1249,36 @@ "UMask": "0x70ff010", "Unit": "IIO" }, + { + "BriefDescription": ": IOTLB Hits to a 1G Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.1G_HITS", + "PerPkg": "1", + "PortMask": "0x0000", + "PublicDescription": ": IOTLB Hits to a 1G Page : Counts if a transaction to a 1G page, on its first lookup, hits the IOTLB.", + "UMask": "0x10", + "Unit": "IIO" + }, + { + "BriefDescription": ": IOTLB Hits to a 2M Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.2M_HITS", + "PerPkg": "1", + "PortMask": "0x0000", + "PublicDescription": ": IOTLB Hits to a 2M Page : Counts if a transaction to a 2M page, on its first lookup, hits the IOTLB.", + "UMask": "0x8", + "Unit": "IIO" + }, + { + "BriefDescription": ": IOTLB Hits to a 4K Page", + "EventCode": "0x40", + "EventName": "UNC_IIO_IOMMU0.4K_HITS", + "PerPkg": "1", + "PortMask": "0x0000", + "PublicDescription": ": IOTLB Hits to a 4K Page : Counts if a transaction to a 4K page, on its first lookup, hits the IOTLB.", + "UMask": "0x4", + "Unit": "IIO" + }, { "BriefDescription": ": Context cache hits", "EventCode": "0x40", From 6af6d22495efeb00cb4e220a4e6e30b5be3afdf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ahelenia=20Ziemia=C5=84ska?= Date: Tue, 27 Dec 2022 21:57:40 +0100 Subject: [PATCH 174/179] perf TUI: Don't ignore job control MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In its infinite wisdom, by default, SLang sets susp undef, and this can only be un-done by calling SLtty_set_suspend_state(true). After every SLang_init_tty(). Additionally, no provisions are made for maintaining the teletype attributes across suspend/continue (outside of curses emulation mode(?!), which provides full support, naturally), so we need to save and restore the flags ourselves, as well as reset the text colours when going under. We need to also re-draw the screen, and raising SIGWINCH, shockingly, Just Works. The correct solution would be to Not Use SLang, but as a stop-gap, this makes TUI 'perf report' usable. Signed-off-by: Ahelenia ZiemiaƄska Tested-by: Arnaldo Carvalho de Melo Tested-by: Namhyung Kim Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Peter Zijlstra Cc: yaowenbin Link: https://lore.kernel.org/r/0354dcae23a8713f75f4fed609e0caec3c6e3cd5.1672174189.git.nabijaczleweli@nabijaczleweli.xyz Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/annotate.c | 1 + tools/perf/ui/browsers/hists.c | 2 ++ tools/perf/ui/browsers/scripts.c | 1 + tools/perf/ui/tui/setup.c | 22 ++++++++++++++++++++++ 4 files changed, 26 insertions(+) diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index cb2eb6dcb532..ec5e21932876 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -938,6 +938,7 @@ int hist_entry__tui_annotate(struct hist_entry *he, struct evsel *evsel, /* reset abort key so that it can get Ctrl-C as a key */ SLang_reset_tty(); SLang_init_tty(0, 0, 0); + SLtty_set_suspend_state(true); return map_symbol__tui_annotate(&he->ms, evsel, hbt); } diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 3061dea29e6b..0c02b3a8e121 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -3000,6 +3000,7 @@ static int evsel__hists_browse(struct evsel *evsel, int nr_events, const char *h /* reset abort key so that it can get Ctrl-C as a key */ SLang_reset_tty(); SLang_init_tty(0, 0, 0); + SLtty_set_suspend_state(true); if (min_pcnt) browser->min_pcnt = min_pcnt; @@ -3667,6 +3668,7 @@ int block_hists_tui_browse(struct block_hist *bh, struct evsel *evsel, /* reset abort key so that it can get Ctrl-C as a key */ SLang_reset_tty(); SLang_init_tty(0, 0, 0); + SLtty_set_suspend_state(true); memset(&action, 0, sizeof(action)); diff --git a/tools/perf/ui/browsers/scripts.c b/tools/perf/ui/browsers/scripts.c index 47d2c7a8cbe1..50d45054ed6c 100644 --- a/tools/perf/ui/browsers/scripts.c +++ b/tools/perf/ui/browsers/scripts.c @@ -166,6 +166,7 @@ void run_script(char *cmd) printf("\033[c\033[H\033[J"); fflush(stdout); SLang_init_tty(0, 0, 0); + SLtty_set_suspend_state(true); SLsmg_refresh(); } diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c index 605d9e175ea7..16c6eff4d241 100644 --- a/tools/perf/ui/tui/setup.c +++ b/tools/perf/ui/tui/setup.c @@ -2,12 +2,14 @@ #include #include #include +#include #include #include #ifdef HAVE_BACKTRACE_SUPPORT #include #endif +#include "../../util/color.h" #include "../../util/debug.h" #include "../browser.h" #include "../helpline.h" @@ -121,6 +123,23 @@ static void ui__signal(int sig) exit(0); } +static void ui__sigcont(int sig) +{ + static struct termios tty; + + if (sig == SIGTSTP) { + while (tcgetattr(SLang_TT_Read_FD, &tty) == -1 && errno == EINTR) + ; + while (write(SLang_TT_Read_FD, PERF_COLOR_RESET, sizeof(PERF_COLOR_RESET) - 1) == -1 && errno == EINTR) + ; + raise(SIGSTOP); + } else { + while (tcsetattr(SLang_TT_Read_FD, TCSADRAIN, &tty) == -1 && errno == EINTR) + ; + raise(SIGWINCH); + } +} + int ui__init(void) { int err; @@ -135,6 +154,7 @@ int ui__init(void) err = SLang_init_tty(-1, 0, 0); if (err < 0) goto out; + SLtty_set_suspend_state(true); err = SLkp_init(); if (err < 0) { @@ -149,6 +169,8 @@ int ui__init(void) signal(SIGINT, ui__signal); signal(SIGQUIT, ui__signal); signal(SIGTERM, ui__signal); + signal(SIGTSTP, ui__sigcont); + signal(SIGCONT, ui__sigcont); perf_error__register(&perf_tui_eops); From ad30469a841b50dbb541df4d6971d891f703c297 Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 7 Dec 2023 16:05:13 -0800 Subject: [PATCH 175/179] libsubcmd: Fix memory leak in uniq() uniq() will write one command name over another causing the overwritten string to be leaked. Fix by doing a pass that removes duplicates and a second that removes the holes. Signed-off-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Chenyuan Mi Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231208000515.1693746-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/subcmd/help.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/tools/lib/subcmd/help.c b/tools/lib/subcmd/help.c index adfbae27dc36..8561b0f01a24 100644 --- a/tools/lib/subcmd/help.c +++ b/tools/lib/subcmd/help.c @@ -52,11 +52,21 @@ void uniq(struct cmdnames *cmds) if (!cmds->cnt) return; - for (i = j = 1; i < cmds->cnt; i++) - if (strcmp(cmds->names[i]->name, cmds->names[i-1]->name)) - cmds->names[j++] = cmds->names[i]; - + for (i = 1; i < cmds->cnt; i++) { + if (!strcmp(cmds->names[i]->name, cmds->names[i-1]->name)) + zfree(&cmds->names[i - 1]); + } + for (i = 0, j = 0; i < cmds->cnt; i++) { + if (cmds->names[i]) { + if (i == j) + j++; + else + cmds->names[j++] = cmds->names[i]; + } + } cmds->cnt = j; + while (j < i) + cmds->names[j++] = NULL; } void exclude_cmds(struct cmdnames *cmds, struct cmdnames *excludes) From bb177a85e82b37d3b76e65f3f773e8502be49d9b Mon Sep 17 00:00:00 2001 From: Ian Rogers Date: Thu, 7 Dec 2023 09:40:57 -0800 Subject: [PATCH 176/179] perf tests: Add perf script test Start a new set of shell tests for testing perf script. The initial contribution is checking that some perf db-export functionality works as reported in this regression by Ben Gainey : https://lore.kernel.org/lkml/20231207140911.3240408-1-ben.gainey@arm.com/ Signed-off-by: Ian Rogers Tested-by: Arnaldo Carvalho de Melo Tested-by: Ben Gainey Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231207174057.1482161-1-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/shell/script.sh | 66 ++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100755 tools/perf/tests/shell/script.sh diff --git a/tools/perf/tests/shell/script.sh b/tools/perf/tests/shell/script.sh new file mode 100755 index 000000000000..5ae7bd0031a8 --- /dev/null +++ b/tools/perf/tests/shell/script.sh @@ -0,0 +1,66 @@ +#!/bin/sh +# perf script tests +# SPDX-License-Identifier: GPL-2.0 + +set -e + +temp_dir=$(mktemp -d /tmp/perf-test-script.XXXXXXXXXX) + +perfdatafile="${temp_dir}/perf.data" +db_test="${temp_dir}/db_test.py" + +err=0 + +cleanup() +{ + trap - EXIT TERM INT + sane=$(echo "${temp_dir}" | cut -b 1-21) + if [ "${sane}" = "/tmp/perf-test-script" ] ; then + echo "--- Cleaning up ---" + rm -f "${temp_dir}/"* + rmdir "${temp_dir}" + fi +} + +trap_cleanup() +{ + cleanup + exit 1 +} + +trap trap_cleanup EXIT TERM INT + + +test_db() +{ + echo "DB test" + + # Check if python script is supported + libpython=$(perf version --build-options | grep python | grep -cv OFF) + if [ "${libpython}" != "1" ] ; then + echo "SKIP: python scripting is not supported" + err=2 + return + fi + + cat << "_end_of_file_" > "${db_test}" +perf_db_export_mode = True +perf_db_export_calls = False +perf_db_export_callchains = True + +def sample_table(*args): + print(f'sample_table({args})') + +def call_path_table(*args): + print(f'call_path_table({args}') +_end_of_file_ + perf record -g -o "${perfdatafile}" true + perf script -i "${perfdatafile}" -s "${db_test}" + echo "DB test [Success]" +} + +test_db + +cleanup + +exit $err From 1e24ce402c97dc3c0ab050593f1d5f6fde524564 Mon Sep 17 00:00:00 2001 From: Ben Gainey Date: Thu, 7 Dec 2023 14:09:11 +0000 Subject: [PATCH 177/179] perf db-export: Fix missing reference count get in call_path_from_sample() The addr_location map and maps fields in the inner loop were missing calls to map__get()/maps__get(). The subsequent addr_location__exit() call in each loop puts the map/maps fields causing use-after-free aborts. This issue reproduces on at least arm64 and x86_64 with something simple like `perf record -g ls` followed by `perf script -s script.py` with the following script: perf_db_export_mode = True perf_db_export_calls = False perf_db_export_callchains = True def sample_table(*args): print(f'sample_table({args})') def call_path_table(*args): print(f'call_path_table({args}') Committer testing: This test, just introduced by Ian Rogers, now passes, not segfaulting anymore: # perf test "perf script tests" 95: perf script tests : Ok # Fixes: 0dd5041c9a0eaf8c ("perf addr_location: Add init/exit/copy functions") Signed-off-by: Ben Gainey Tested-by: Arnaldo Carvalho de Melo Tested-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20231207140911.3240408-1-ben.gainey@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/db-export.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/db-export.c b/tools/perf/util/db-export.c index b9fb71ab7a73..106429155c2e 100644 --- a/tools/perf/util/db-export.c +++ b/tools/perf/util/db-export.c @@ -253,8 +253,8 @@ static struct call_path *call_path_from_sample(struct db_export *dbe, */ addr_location__init(&al); al.sym = node->ms.sym; - al.map = node->ms.map; - al.maps = thread__maps(thread); + al.map = map__get(node->ms.map); + al.maps = maps__get(thread__maps(thread)); al.addr = node->ip; if (al.map && !al.sym) From b6d8b858dbbbd832d255c3c8a3721173e6edf036 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Tue, 19 Dec 2023 15:32:35 +0100 Subject: [PATCH 178/179] perf test: test case 'Setup struct perf_event_attr' fails on s390 on z/vm perf test 17 'Setup struct perf_event_attr' fails on s390 z/VM guest, using linux-next kernel. Root cause is the fall-back from hardware counter cycles perf_event_attr: type 0 (PERF_TYPE_HARDWARE) size 136 config 0 (PERF_COUNT_HW_CPU_CYCLES) { sample_period, sample_freq } 4000 sample_type IP|TID|TIME|ADDR|PERIOD|DATA_SRC read_format ID|LOST which returns -ENOENT on s390 z/VM guest. This causes the code to fall back to software counter task-clock, as can be seen in the debug output: ------------------------------------------------------------ perf_event_attr: type 1 (PERF_TYPE_SOFTWARE) size 136 config 0x1 (PERF_COUNT_SW_TASK_CLOCK) <-here { sample_period, sample_freq } 4000 sample_type IP|TID|TIME|ADDR|PERIOD|DATA_SRC read_format ID|LOST This succeeds on s390 z/VM guest. This successful installation of the counter task-clock is not listed in the expected results and the test case fails. This is caused by commit eb2eac0c7b618033 ("perf evsel: Fallback to "task-clock" when not system wide") which introduced fall back from event 'cycles' to event 'task-clock'. To fix this on s390 allow event number 0 (cycles) and event number 1 (task-clock) as expected result. Output before: # ./perf test -Fv 17 17: Setup struct perf_event_attr : --- start --- running './tests/attr/test-stat-group1' unsupp './tests/attr/test-stat-group1' running './tests/attr/test-record-graph-default' test limitation '!aarch64' excluded architecture list ['aarch64'] expected config=0, got 1 FAILED './tests/attr/test-record-graph-default' - match failure ---- end ---- Setup struct perf_event_attr: FAILED! # Output after: # ./perf test -F 17 17: Setup struct perf_event_attr : Ok # Fixes: eb2eac0c7b618033 ("perf evsel: Fallback to "task-clock" when not system wide") Signed-off-by: Thomas Richter Acked-by: Ian Rogers Cc: Alexander Gordeev Cc: Heiko Carstens Cc: Sumanth Korikkar Cc: Sven Schnelle Cc: Vasily Gorbik Link: https://lore.kernel.org/r/20231219143235.1075522-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/attr/base-record | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/attr/base-record b/tools/perf/tests/attr/base-record index 27c21271a16c..b44e4e6e4443 100644 --- a/tools/perf/tests/attr/base-record +++ b/tools/perf/tests/attr/base-record @@ -6,7 +6,7 @@ flags=0|8 cpu=* type=0|1 size=136 -config=0 +config=0|1 sample_period=* sample_type=263 read_format=0|4|20 From d988c9f511af71a3445b6a4f3a2c67208ff8e480 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 8 Jan 2024 17:11:13 -0300 Subject: [PATCH 179/179] MAINTAINERS: Add Namhyung as tools/perf/ co-maintainer Acked-by: Ingo Molnar Acked-by: Linus Torvalds Acked-by: Namhyung Kim Cc: Thomas Gleixner Cc: Peter Zijlstra Cc: Ian Rogers Cc: Adrian Hunter Cc: Jiri Olsa Cc: Mark Rutland Cc: Alexander Shishkin Link: https://lore.kernel.org/lkml/ZZxbCeVPnOjShbMQ@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 9104430e148e..041ef848d736 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -16896,10 +16896,10 @@ PERFORMANCE EVENTS SUBSYSTEM M: Peter Zijlstra M: Ingo Molnar M: Arnaldo Carvalho de Melo +M: Namhyung Kim R: Mark Rutland R: Alexander Shishkin R: Jiri Olsa -R: Namhyung Kim R: Ian Rogers R: Adrian Hunter L: linux-perf-users@vger.kernel.org