Files
linux_media/tools/perf/util/bpf_lock_contention.c
Arnaldo Carvalho de Melo cc2367eebb machine: Adopt is_lock_function() from builtin-lock.c
It is used in bpf_lock_contention.c and builtin-lock.c will be made
CONFIG_LIBTRACEEVENT=y conditional, so move it to machine.c, that is
always available.

This makes those 4 global variables for sched and lock text start and
end to move to 'struct machine' too, as conceivably we can have that
info for several machine instances, say some 'perf diff' like tool.

Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nick Desaulniers <ndesaulniers@google.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: bpf@vger.kernel.org
Link: http://lore.kernel.org/lkml/
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2022-12-14 11:16:12 -03:00

212 lines
4.6 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include "util/debug.h"
#include "util/evlist.h"
#include "util/machine.h"
#include "util/map.h"
#include "util/symbol.h"
#include "util/target.h"
#include "util/thread_map.h"
#include "util/lock-contention.h"
#include <linux/zalloc.h>
#include <linux/string.h>
#include <bpf/bpf.h>
#include "bpf_skel/lock_contention.skel.h"
static struct lock_contention_bpf *skel;
struct lock_contention_data {
u64 total_time;
u64 min_time;
u64 max_time;
u32 count;
u32 flags;
};
int lock_contention_prepare(struct lock_contention *con)
{
int i, fd;
int ncpus = 1, ntasks = 1;
struct evlist *evlist = con->evlist;
struct target *target = con->target;
skel = lock_contention_bpf__open();
if (!skel) {
pr_err("Failed to open lock-contention BPF skeleton\n");
return -1;
}
bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
if (target__has_cpu(target))
ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
if (target__has_task(target))
ntasks = perf_thread_map__nr(evlist->core.threads);
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
if (lock_contention_bpf__load(skel) < 0) {
pr_err("Failed to load lock-contention BPF skeleton\n");
return -1;
}
if (target__has_cpu(target)) {
u32 cpu;
u8 val = 1;
skel->bss->has_cpu = 1;
fd = bpf_map__fd(skel->maps.cpu_filter);
for (i = 0; i < ncpus; i++) {
cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
}
}
if (target__has_task(target)) {
u32 pid;
u8 val = 1;
skel->bss->has_task = 1;
fd = bpf_map__fd(skel->maps.task_filter);
for (i = 0; i < ntasks; i++) {
pid = perf_thread_map__pid(evlist->core.threads, i);
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
}
if (target__none(target) && evlist->workload.pid > 0) {
u32 pid = evlist->workload.pid;
u8 val = 1;
skel->bss->has_task = 1;
fd = bpf_map__fd(skel->maps.task_filter);
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
skel->bss->stack_skip = con->stack_skip;
lock_contention_bpf__attach(skel);
return 0;
}
int lock_contention_start(void)
{
skel->bss->enabled = 1;
return 0;
}
int lock_contention_stop(void)
{
skel->bss->enabled = 0;
return 0;
}
int lock_contention_read(struct lock_contention *con)
{
int fd, stack, err = 0;
s32 prev_key, key;
struct lock_contention_data data = {};
struct lock_stat *st = NULL;
struct machine *machine = con->machine;
u64 *stack_trace;
size_t stack_size = con->max_stack * sizeof(*stack_trace);
fd = bpf_map__fd(skel->maps.lock_stat);
stack = bpf_map__fd(skel->maps.stacks);
con->lost = skel->bss->lost;
stack_trace = zalloc(stack_size);
if (stack_trace == NULL)
return -1;
prev_key = 0;
while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
struct map *kmap;
struct symbol *sym;
int idx = 0;
/* to handle errors in the loop body */
err = -1;
bpf_map_lookup_elem(fd, &key, &data);
st = zalloc(sizeof(*st));
if (st == NULL)
break;
st->nr_contended = data.count;
st->wait_time_total = data.total_time;
st->wait_time_max = data.max_time;
st->wait_time_min = data.min_time;
if (data.count)
st->avg_wait_time = data.total_time / data.count;
st->flags = data.flags;
bpf_map_lookup_elem(stack, &key, stack_trace);
/* skip lock internal functions */
while (machine__is_lock_function(machine, stack_trace[idx]) &&
idx < con->max_stack - 1)
idx++;
st->addr = stack_trace[idx];
sym = machine__find_kernel_symbol(machine, st->addr, &kmap);
if (sym) {
unsigned long offset;
int ret = 0;
offset = kmap->map_ip(kmap, st->addr) - sym->start;
if (offset)
ret = asprintf(&st->name, "%s+%#lx", sym->name, offset);
else
st->name = strdup(sym->name);
if (ret < 0 || st->name == NULL)
break;
} else if (asprintf(&st->name, "%#lx", (unsigned long)st->addr) < 0) {
break;
}
if (verbose) {
st->callstack = memdup(stack_trace, stack_size);
if (st->callstack == NULL)
break;
}
hlist_add_head(&st->hash_entry, con->result);
prev_key = key;
/* we're fine now, reset the values */
st = NULL;
err = 0;
}
free(stack_trace);
if (st) {
free(st->name);
free(st);
}
return err;
}
int lock_contention_finish(void)
{
if (skel) {
skel->bss->enabled = 0;
lock_contention_bpf__destroy(skel);
}
return 0;
}