mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 04:33:26 +02:00
Merge tag 'trace-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing updates from Steven Rostedt: - Add function names as a way to filter function addresses - Add sample module to test ftrace ops and dynamic trampolines - Allow stack traces to be passed from beginning event to end event for synthetic events. This will allow seeing the stack trace of when a task is scheduled out and recorded when it gets scheduled back in. - Add trace event helper __get_buf() to use as a temporary buffer when printing out trace event output. - Add kernel command line to create trace instances on boot up. - Add enabling of events to instances created at boot up. - Add trace_array_puts() to write into instances. - Allow boot instances to take a snapshot at the end of boot up. - Allow live patch modules to include trace events - Minor fixes and clean ups * tag 'trace-v6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: (31 commits) tracing: Remove unnecessary NULL assignment tracepoint: Allow livepatch module add trace event tracing: Always use canonical ftrace path tracing/histogram: Fix stacktrace histogram Documententation tracing/histogram: Fix stacktrace key tracing/histogram: Fix a few problems with stacktrace variable printing tracing: Add BUILD_BUG() to make sure stacktrace fits in strings tracing/histogram: Don't use strlen to find length of stacktrace variables tracing: Allow boot instances to have snapshot buffers tracing: Add trace_array_puts() to write into instance tracing: Add enabling of events to boot instances tracing: Add creation of instances at boot command line tracing: Fix trace_event_raw_event_synth() if else statement samples: ftrace: Make some global variables static ftrace: sample: avoid open-coded 64-bit division samples: ftrace: Include the nospec-branch.h only for x86 tracing: Acquire buffer from temparary trace sequence tracing/histogram: Wrap remaining shell snippets in code blocks tracing/osnoise: No need for schedule_hrtimeout range bpf/tracing: Use stage6 of tracing to not duplicate macros ...
This commit is contained in:
@@ -46,6 +46,13 @@ config SAMPLE_FTRACE_DIRECT_MULTI
|
||||
that hooks to wake_up_process and schedule, and prints
|
||||
the function addresses.
|
||||
|
||||
config SAMPLE_FTRACE_OPS
|
||||
tristate "Build custom ftrace ops example"
|
||||
depends on FUNCTION_TRACER
|
||||
help
|
||||
This builds an ftrace ops example that hooks two functions and
|
||||
measures the time taken to invoke one function a number of times.
|
||||
|
||||
config SAMPLE_TRACE_ARRAY
|
||||
tristate "Build sample module for kernel access to Ftrace instancess"
|
||||
depends on EVENT_TRACING && m
|
||||
|
@@ -24,6 +24,7 @@ obj-$(CONFIG_SAMPLE_TRACE_CUSTOM_EVENTS) += trace_events/
|
||||
obj-$(CONFIG_SAMPLE_TRACE_PRINTK) += trace_printk/
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace/
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace/
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_OPS) += ftrace/
|
||||
obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += ftrace/
|
||||
subdir-$(CONFIG_SAMPLE_UHID) += uhid
|
||||
obj-$(CONFIG_VIDEO_PCI_SKELETON) += v4l/
|
||||
|
@@ -5,6 +5,7 @@ obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-too.o
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT) += ftrace-direct-modify.o
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi.o
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_DIRECT_MULTI) += ftrace-direct-multi-modify.o
|
||||
obj-$(CONFIG_SAMPLE_FTRACE_OPS) += ftrace-ops.o
|
||||
|
||||
CFLAGS_sample-trace-array.o := -I$(src)
|
||||
obj-$(CONFIG_SAMPLE_TRACE_ARRAY) += sample-trace-array.o
|
||||
|
@@ -3,7 +3,6 @@
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
extern void my_direct_func1(void);
|
||||
extern void my_direct_func2(void);
|
||||
@@ -26,6 +25,7 @@ static unsigned long my_ip = (unsigned long)schedule;
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#include <asm/ibt.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
asm (
|
||||
" .pushsection .text, \"ax\", @progbits\n"
|
||||
|
@@ -3,7 +3,6 @@
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
extern void my_direct_func1(unsigned long ip);
|
||||
extern void my_direct_func2(unsigned long ip);
|
||||
@@ -24,6 +23,7 @@ extern void my_tramp2(void *);
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#include <asm/ibt.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
asm (
|
||||
" .pushsection .text, \"ax\", @progbits\n"
|
||||
|
@@ -5,7 +5,6 @@
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/sched/stat.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
extern void my_direct_func(unsigned long ip);
|
||||
|
||||
@@ -19,6 +18,7 @@ extern void my_tramp(void *);
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#include <asm/ibt.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
asm (
|
||||
" .pushsection .text, \"ax\", @progbits\n"
|
||||
|
@@ -4,7 +4,6 @@
|
||||
#include <linux/mm.h> /* for handle_mm_fault() */
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
extern void my_direct_func(struct vm_area_struct *vma,
|
||||
unsigned long address, unsigned int flags);
|
||||
@@ -21,6 +20,7 @@ extern void my_tramp(void *);
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#include <asm/ibt.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
asm (
|
||||
" .pushsection .text, \"ax\", @progbits\n"
|
||||
|
@@ -4,7 +4,6 @@
|
||||
#include <linux/sched.h> /* for wake_up_process() */
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
extern void my_direct_func(struct task_struct *p);
|
||||
|
||||
@@ -18,6 +17,7 @@ extern void my_tramp(void *);
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
#include <asm/ibt.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
|
||||
asm (
|
||||
" .pushsection .text, \"ax\", @progbits\n"
|
||||
|
252
samples/ftrace/ftrace-ops.c
Normal file
252
samples/ftrace/ftrace-ops.c
Normal file
@@ -0,0 +1,252 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/*
|
||||
* Arbitrary large value chosen to be sufficiently large to minimize noise but
|
||||
* sufficiently small to complete quickly.
|
||||
*/
|
||||
static unsigned int nr_function_calls = 100000;
|
||||
module_param(nr_function_calls, uint, 0);
|
||||
MODULE_PARM_DESC(nr_function_calls, "How many times to call the relevant tracee");
|
||||
|
||||
/*
|
||||
* The number of ops associated with a call site affects whether a tracer can
|
||||
* be called directly or whether it's necessary to go via the list func, which
|
||||
* can be significantly more expensive.
|
||||
*/
|
||||
static unsigned int nr_ops_relevant = 1;
|
||||
module_param(nr_ops_relevant, uint, 0);
|
||||
MODULE_PARM_DESC(nr_ops_relevant, "How many ftrace_ops to associate with the relevant tracee");
|
||||
|
||||
/*
|
||||
* On architectures where all call sites share the same trampoline, having
|
||||
* tracers enabled for distinct functions can force the use of the list func
|
||||
* and incur overhead for all call sites.
|
||||
*/
|
||||
static unsigned int nr_ops_irrelevant;
|
||||
module_param(nr_ops_irrelevant, uint, 0);
|
||||
MODULE_PARM_DESC(nr_ops_irrelevant, "How many ftrace_ops to associate with the irrelevant tracee");
|
||||
|
||||
/*
|
||||
* On architectures with DYNAMIC_FTRACE_WITH_REGS, saving the full pt_regs can
|
||||
* be more expensive than only saving the minimal necessary regs.
|
||||
*/
|
||||
static bool save_regs;
|
||||
module_param(save_regs, bool, 0);
|
||||
MODULE_PARM_DESC(save_regs, "Register ops with FTRACE_OPS_FL_SAVE_REGS (save all registers in the trampoline)");
|
||||
|
||||
static bool assist_recursion;
|
||||
module_param(assist_recursion, bool, 0);
|
||||
MODULE_PARM_DESC(assist_reursion, "Register ops with FTRACE_OPS_FL_RECURSION");
|
||||
|
||||
static bool assist_rcu;
|
||||
module_param(assist_rcu, bool, 0);
|
||||
MODULE_PARM_DESC(assist_reursion, "Register ops with FTRACE_OPS_FL_RCU");
|
||||
|
||||
/*
|
||||
* By default, a trivial tracer is used which immediately returns to mimimize
|
||||
* overhead. Sometimes a consistency check using a more expensive tracer is
|
||||
* desireable.
|
||||
*/
|
||||
static bool check_count;
|
||||
module_param(check_count, bool, 0);
|
||||
MODULE_PARM_DESC(check_count, "Check that tracers are called the expected number of times\n");
|
||||
|
||||
/*
|
||||
* Usually it's not interesting to leave the ops registered after the test
|
||||
* runs, but sometimes it can be useful to leave them registered so that they
|
||||
* can be inspected through the tracefs 'enabled_functions' file.
|
||||
*/
|
||||
static bool persist;
|
||||
module_param(persist, bool, 0);
|
||||
MODULE_PARM_DESC(persist, "Successfully load module and leave ftrace ops registered after test completes\n");
|
||||
|
||||
/*
|
||||
* Marked as noinline to ensure that an out-of-line traceable copy is
|
||||
* generated by the compiler.
|
||||
*
|
||||
* The barrier() ensures the compiler won't elide calls by determining there
|
||||
* are no side-effects.
|
||||
*/
|
||||
static noinline void tracee_relevant(void)
|
||||
{
|
||||
barrier();
|
||||
}
|
||||
|
||||
/*
|
||||
* Marked as noinline to ensure that an out-of-line traceable copy is
|
||||
* generated by the compiler.
|
||||
*
|
||||
* The barrier() ensures the compiler won't elide calls by determining there
|
||||
* are no side-effects.
|
||||
*/
|
||||
static noinline void tracee_irrelevant(void)
|
||||
{
|
||||
barrier();
|
||||
}
|
||||
|
||||
struct sample_ops {
|
||||
struct ftrace_ops ops;
|
||||
unsigned int count;
|
||||
};
|
||||
|
||||
static void ops_func_nop(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *op,
|
||||
struct ftrace_regs *fregs)
|
||||
{
|
||||
/* do nothing */
|
||||
}
|
||||
|
||||
static void ops_func_count(unsigned long ip, unsigned long parent_ip,
|
||||
struct ftrace_ops *op,
|
||||
struct ftrace_regs *fregs)
|
||||
{
|
||||
struct sample_ops *self;
|
||||
|
||||
self = container_of(op, struct sample_ops, ops);
|
||||
self->count++;
|
||||
}
|
||||
|
||||
static struct sample_ops *ops_relevant;
|
||||
static struct sample_ops *ops_irrelevant;
|
||||
|
||||
static struct sample_ops *ops_alloc_init(void *tracee, ftrace_func_t func,
|
||||
unsigned long flags, int nr)
|
||||
{
|
||||
struct sample_ops *ops;
|
||||
|
||||
ops = kcalloc(nr, sizeof(*ops), GFP_KERNEL);
|
||||
if (WARN_ON_ONCE(!ops))
|
||||
return NULL;
|
||||
|
||||
for (unsigned int i = 0; i < nr; i++) {
|
||||
ops[i].ops.func = func;
|
||||
ops[i].ops.flags = flags;
|
||||
WARN_ON_ONCE(ftrace_set_filter_ip(&ops[i].ops, (unsigned long)tracee, 0, 0));
|
||||
WARN_ON_ONCE(register_ftrace_function(&ops[i].ops));
|
||||
}
|
||||
|
||||
return ops;
|
||||
}
|
||||
|
||||
static void ops_destroy(struct sample_ops *ops, int nr)
|
||||
{
|
||||
if (!ops)
|
||||
return;
|
||||
|
||||
for (unsigned int i = 0; i < nr; i++) {
|
||||
WARN_ON_ONCE(unregister_ftrace_function(&ops[i].ops));
|
||||
ftrace_free_filter(&ops[i].ops);
|
||||
}
|
||||
|
||||
kfree(ops);
|
||||
}
|
||||
|
||||
static void ops_check(struct sample_ops *ops, int nr,
|
||||
unsigned int expected_count)
|
||||
{
|
||||
if (!ops || !check_count)
|
||||
return;
|
||||
|
||||
for (unsigned int i = 0; i < nr; i++) {
|
||||
if (ops->count == expected_count)
|
||||
continue;
|
||||
pr_warn("Counter called %u times (expected %u)\n",
|
||||
ops->count, expected_count);
|
||||
}
|
||||
}
|
||||
|
||||
static ftrace_func_t tracer_relevant = ops_func_nop;
|
||||
static ftrace_func_t tracer_irrelevant = ops_func_nop;
|
||||
|
||||
static int __init ftrace_ops_sample_init(void)
|
||||
{
|
||||
unsigned long flags = 0;
|
||||
ktime_t start, end;
|
||||
u64 period;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) && save_regs) {
|
||||
pr_info("this kernel does not support saving registers\n");
|
||||
save_regs = false;
|
||||
} else if (save_regs) {
|
||||
flags |= FTRACE_OPS_FL_SAVE_REGS;
|
||||
}
|
||||
|
||||
if (assist_recursion)
|
||||
flags |= FTRACE_OPS_FL_RECURSION;
|
||||
|
||||
if (assist_rcu)
|
||||
flags |= FTRACE_OPS_FL_RCU;
|
||||
|
||||
if (check_count) {
|
||||
tracer_relevant = ops_func_count;
|
||||
tracer_irrelevant = ops_func_count;
|
||||
}
|
||||
|
||||
pr_info("registering:\n"
|
||||
" relevant ops: %u\n"
|
||||
" tracee: %ps\n"
|
||||
" tracer: %ps\n"
|
||||
" irrelevant ops: %u\n"
|
||||
" tracee: %ps\n"
|
||||
" tracer: %ps\n"
|
||||
" saving registers: %s\n"
|
||||
" assist recursion: %s\n"
|
||||
" assist RCU: %s\n",
|
||||
nr_ops_relevant, tracee_relevant, tracer_relevant,
|
||||
nr_ops_irrelevant, tracee_irrelevant, tracer_irrelevant,
|
||||
save_regs ? "YES" : "NO",
|
||||
assist_recursion ? "YES" : "NO",
|
||||
assist_rcu ? "YES" : "NO");
|
||||
|
||||
ops_relevant = ops_alloc_init(tracee_relevant, tracer_relevant,
|
||||
flags, nr_ops_relevant);
|
||||
ops_irrelevant = ops_alloc_init(tracee_irrelevant, tracer_irrelevant,
|
||||
flags, nr_ops_irrelevant);
|
||||
|
||||
start = ktime_get();
|
||||
for (unsigned int i = 0; i < nr_function_calls; i++)
|
||||
tracee_relevant();
|
||||
end = ktime_get();
|
||||
|
||||
ops_check(ops_relevant, nr_ops_relevant, nr_function_calls);
|
||||
ops_check(ops_irrelevant, nr_ops_irrelevant, 0);
|
||||
|
||||
period = ktime_to_ns(ktime_sub(end, start));
|
||||
|
||||
pr_info("Attempted %u calls to %ps in %lluns (%lluns / call)\n",
|
||||
nr_function_calls, tracee_relevant,
|
||||
period, div_u64(period, nr_function_calls));
|
||||
|
||||
if (persist)
|
||||
return 0;
|
||||
|
||||
ops_destroy(ops_relevant, nr_ops_relevant);
|
||||
ops_destroy(ops_irrelevant, nr_ops_irrelevant);
|
||||
|
||||
/*
|
||||
* The benchmark completed sucessfully, but there's no reason to keep
|
||||
* the module around. Return an error do the user doesn't have to
|
||||
* manually unload the module.
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
module_init(ftrace_ops_sample_init);
|
||||
|
||||
static void __exit ftrace_ops_sample_exit(void)
|
||||
{
|
||||
ops_destroy(ops_relevant, nr_ops_relevant);
|
||||
ops_destroy(ops_irrelevant, nr_ops_irrelevant);
|
||||
}
|
||||
module_exit(ftrace_ops_sample_exit);
|
||||
|
||||
MODULE_AUTHOR("Mark Rutland");
|
||||
MODULE_DESCRIPTION("Example of using custom ftrace_ops");
|
||||
MODULE_LICENSE("GPL");
|
@@ -23,8 +23,8 @@
|
||||
#endif
|
||||
|
||||
/* Assumes debugfs is mounted */
|
||||
const char *data_file = "/sys/kernel/debug/tracing/user_events_data";
|
||||
const char *status_file = "/sys/kernel/debug/tracing/user_events_status";
|
||||
const char *data_file = "/sys/kernel/tracing/user_events_data";
|
||||
const char *status_file = "/sys/kernel/tracing/user_events_status";
|
||||
|
||||
static int event_status(long **status)
|
||||
{
|
||||
|
Reference in New Issue
Block a user