linux/tools/perf/util/thread.h
Kan Liang ff165628d7 perf callchain: Stitch LBR call stack
In LBR call stack mode, the depth of reconstructed LBR call stack limits
to the number of LBR registers.

  For example, on skylake, the depth of reconstructed LBR call stack is
  always <= 32.

  # To display the perf.data header info, please use
  # --header/--header-only options.
  #
  #
  # Total Lost Samples: 0
  #
  # Samples: 6K of event 'cycles'
  # Event count (approx.): 6487119731
  #
  # Children      Self  Command          Shared Object       Symbol
  # ........  ........  ...............  ..................
  # ................................

    99.97%    99.97%  tchain_edit      tchain_edit        [.] f43
            |
             --99.64%--f11
                       f12
                       f13
                       f14
                       f15
                       f16
                       f17
                       f18
                       f19
                       f20
                       f21
                       f22
                       f23
                       f24
                       f25
                       f26
                       f27
                       f28
                       f29
                       f30
                       f31
                       f32
                       f33
                       f34
                       f35
                       f36
                       f37
                       f38
                       f39
                       f40
                       f41
                       f42
                       f43

For a call stack which is deeper than LBR limit, HW will overwrite the
LBR register with oldest branch. Only partial call stacks can be
reconstructed.

However, the overwritten LBRs may still be retrieved from previous
sample. At that moment, HW hasn't overwritten the LBR registers yet.
Perf tools can stitch those overwritten LBRs on current call stacks to
get a more complete call stack.

To determine if LBRs can be stitched, perf tools need to compare current
sample with previous sample.

- They should have identical LBR records (Same from, to and flags
  values, and the same physical index of LBR registers).

- The searching starts from the base-of-stack of current sample.

Once perf determines to stitch the previous LBRs, the corresponding LBR
cursor nodes will be copied to 'lists'.  The 'lists' is to track the LBR
cursor nodes which are going to be stitched.

When the stitching is over, the nodes will not be freed immediately.
They will be moved to 'free_lists'. Next stitching may reuse the space.
Both 'lists' and 'free_lists' will be freed when all samples are
processed.

Committer notes:

Fix the intel-pt.c initialization of the union with 'struct
branch_flags', that breaks the build with its unnamed union on older gcc
versions.

Uninline thread__free_stitch_list(), as it grew big and started dragging
includes to thread.h, so move it to thread.c where what it needs in
terms of headers are already there.

This fixes the build in several systems such as debian:experimental when
cross building to the MIPS32 architecture, i.e. in the other cases what
was needed was being included by sheer luck.

  In file included from builtin-sched.c:11:
  util/thread.h: In function 'thread__free_stitch_list':
  util/thread.h:169:3: error: implicit declaration of function 'free' [-Werror=implicit-function-declaration]
    169 |   free(pos);
        |   ^~~~
  util/thread.h:169:3: error: incompatible implicit declaration of built-in function 'free' [-Werror]
  util/thread.h:19:1: note: include '<stdlib.h>' or provide a declaration of 'free'
     18 | #include "callchain.h"
    +++ |+#include <stdlib.h>
     19 |
  util/thread.h:174:3: error: incompatible implicit declaration of built-in function 'free' [-Werror]
    174 |   free(pos);
        |   ^~~~
  util/thread.h:174:3: note: include '<stdlib.h>' or provide a declaration of 'free'

Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Pavel Gerasimov <pavel.gerasimov@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@linux.ibm.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vitaly Slobodskoy <vitaly.slobodskoy@intel.com>
Link: http://lore.kernel.org/lkml/20200319202517.23423-13-kan.liang@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2020-04-18 09:05:01 -03:00

161 lines
4.2 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PERF_THREAD_H
#define __PERF_THREAD_H
#include <linux/refcount.h>
#include <linux/rbtree.h>
#include <linux/list.h>
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
#include "srccode.h"
#include "symbol_conf.h"
#include <strlist.h>
#include <intlist.h>
#include "rwsem.h"
#include "event.h"
#include "callchain.h"
struct addr_location;
struct map;
struct perf_record_namespaces;
struct thread_stack;
struct unwind_libunwind_ops;
struct lbr_stitch {
struct list_head lists;
struct list_head free_lists;
struct perf_sample prev_sample;
struct callchain_cursor_node *prev_lbr_cursor;
};
struct thread {
union {
struct rb_node rb_node;
struct list_head node;
};
struct maps *maps;
pid_t pid_; /* Not all tools update this */
pid_t tid;
pid_t ppid;
int cpu;
refcount_t refcnt;
bool comm_set;
int comm_len;
bool dead; /* if set thread has exited */
struct list_head namespaces_list;
struct rw_semaphore namespaces_lock;
struct list_head comm_list;
struct rw_semaphore comm_lock;
u64 db_id;
void *priv;
struct thread_stack *ts;
struct nsinfo *nsinfo;
struct srccode_state srccode_state;
bool filter;
int filter_entry_depth;
/* LBR call stack stitch */
bool lbr_stitch_enable;
struct lbr_stitch *lbr_stitch;
};
struct machine;
struct namespaces;
struct comm;
struct thread *thread__new(pid_t pid, pid_t tid);
int thread__init_maps(struct thread *thread, struct machine *machine);
void thread__delete(struct thread *thread);
struct thread *thread__get(struct thread *thread);
void thread__put(struct thread *thread);
static inline void __thread__zput(struct thread **thread)
{
thread__put(*thread);
*thread = NULL;
}
#define thread__zput(thread) __thread__zput(&thread)
static inline void thread__exited(struct thread *thread)
{
thread->dead = true;
}
struct namespaces *thread__namespaces(struct thread *thread);
int thread__set_namespaces(struct thread *thread, u64 timestamp,
struct perf_record_namespaces *event);
int __thread__set_comm(struct thread *thread, const char *comm, u64 timestamp,
bool exec);
static inline int thread__set_comm(struct thread *thread, const char *comm,
u64 timestamp)
{
return __thread__set_comm(thread, comm, timestamp, false);
}
int thread__set_comm_from_proc(struct thread *thread);
int thread__comm_len(struct thread *thread);
struct comm *thread__comm(const struct thread *thread);
struct comm *thread__exec_comm(const struct thread *thread);
const char *thread__comm_str(struct thread *thread);
int thread__insert_map(struct thread *thread, struct map *map);
int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone);
size_t thread__fprintf(struct thread *thread, FILE *fp);
struct thread *thread__main_thread(struct machine *machine, struct thread *thread);
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al);
struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al);
struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
u64 addr, struct addr_location *al);
struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
u64 addr, struct addr_location *al);
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
struct addr_location *al);
int thread__memcpy(struct thread *thread, struct machine *machine,
void *buf, u64 ip, int len, bool *is64bit);
static inline void *thread__priv(struct thread *thread)
{
return thread->priv;
}
static inline void thread__set_priv(struct thread *thread, void *p)
{
thread->priv = p;
}
static inline bool thread__is_filtered(struct thread *thread)
{
if (symbol_conf.comm_list &&
!strlist__has_entry(symbol_conf.comm_list, thread__comm_str(thread))) {
return true;
}
if (symbol_conf.pid_list &&
!intlist__has_entry(symbol_conf.pid_list, thread->pid_)) {
return true;
}
if (symbol_conf.tid_list &&
!intlist__has_entry(symbol_conf.tid_list, thread->tid)) {
return true;
}
return false;
}
void thread__free_stitch_list(struct thread *thread);
#endif /* __PERF_THREAD_H */