mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-22 20:30:58 +02:00
trace,smp: Add tracepoints for scheduling remotelly called functions
Add a tracepoint for when a CSD is queued to a remote CPU's call_single_queue. This allows finding exactly which CPU queued a given CSD when looking at a csd_function_{entry,exit} event, and also enables us to accurately measure IPI delivery time with e.g. a synthetic event: $ echo 'hist:keys=cpu,csd.hex:ts=common_timestamp.usecs' >\ /sys/kernel/tracing/events/smp/csd_queue_cpu/trigger $ echo 'csd_latency unsigned int dst_cpu; unsigned long csd; u64 time' >\ /sys/kernel/tracing/synthetic_events $ echo \ 'hist:keys=common_cpu,csd.hex:'\ 'time=common_timestamp.usecs-$ts:'\ 'onmatch(smp.csd_queue_cpu).trace(csd_latency,common_cpu,csd,$time)' >\ /sys/kernel/tracing/events/smp/csd_function_entry/trigger $ trace-cmd record -e 'synthetic:csd_latency' hackbench $ trace-cmd report <...>-467 [001] 21.824263: csd_queue_cpu: cpu=0 callsite=try_to_wake_up+0x2ea func=sched_ttwu_pending csd=0xffff8880076148b8 <...>-467 [001] 21.824280: ipi_send_cpu: cpu=0 callsite=try_to_wake_up+0x2ea callback=generic_smp_call_function_single_interrupt+0x0 <...>-489 [000] 21.824299: csd_function_entry: func=sched_ttwu_pending csd=0xffff8880076148b8 <...>-489 [000] 21.824320: csd_latency: dst_cpu=0, csd=18446612682193848504, time=36 Suggested-by: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Leonardo Bras <leobras@redhat.com> Tested-and-reviewed-by: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20230615065944.188876-7-leobras@redhat.com
This commit is contained in:
committed by
Peter Zijlstra
parent
949fa3f11c
commit
bf5a8c26ad
@@ -7,6 +7,33 @@
|
|||||||
|
|
||||||
#include <linux/tracepoint.h>
|
#include <linux/tracepoint.h>
|
||||||
|
|
||||||
|
TRACE_EVENT(csd_queue_cpu,
|
||||||
|
|
||||||
|
TP_PROTO(const unsigned int cpu,
|
||||||
|
unsigned long callsite,
|
||||||
|
smp_call_func_t func,
|
||||||
|
struct __call_single_data *csd),
|
||||||
|
|
||||||
|
TP_ARGS(cpu, callsite, func, csd),
|
||||||
|
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(unsigned int, cpu)
|
||||||
|
__field(void *, callsite)
|
||||||
|
__field(void *, func)
|
||||||
|
__field(void *, csd)
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->cpu = cpu;
|
||||||
|
__entry->callsite = (void *)callsite;
|
||||||
|
__entry->func = func;
|
||||||
|
__entry->csd = csd;
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_printk("cpu=%u callsite=%pS func=%ps csd=%p",
|
||||||
|
__entry->cpu, __entry->callsite, __entry->func, __entry->csd)
|
||||||
|
);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tracepoints for a function which is called as an effect of smp_call_function.*
|
* Tracepoints for a function which is called as an effect of smp_call_function.*
|
||||||
*/
|
*/
|
||||||
|
16
kernel/smp.c
16
kernel/smp.c
@@ -340,7 +340,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
|
|||||||
* even if we haven't sent the smp_call IPI yet (e.g. the stopper
|
* even if we haven't sent the smp_call IPI yet (e.g. the stopper
|
||||||
* executes migration_cpu_stop() on the remote CPU).
|
* executes migration_cpu_stop() on the remote CPU).
|
||||||
*/
|
*/
|
||||||
if (trace_ipi_send_cpu_enabled()) {
|
if (trace_csd_queue_cpu_enabled()) {
|
||||||
call_single_data_t *csd;
|
call_single_data_t *csd;
|
||||||
smp_call_func_t func;
|
smp_call_func_t func;
|
||||||
|
|
||||||
@@ -348,7 +348,7 @@ void __smp_call_single_queue(int cpu, struct llist_node *node)
|
|||||||
func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
|
func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
|
||||||
sched_ttwu_pending : csd->func;
|
sched_ttwu_pending : csd->func;
|
||||||
|
|
||||||
trace_ipi_send_cpu(cpu, _RET_IP_, func);
|
trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -741,7 +741,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
|||||||
int cpu, last_cpu, this_cpu = smp_processor_id();
|
int cpu, last_cpu, this_cpu = smp_processor_id();
|
||||||
struct call_function_data *cfd;
|
struct call_function_data *cfd;
|
||||||
bool wait = scf_flags & SCF_WAIT;
|
bool wait = scf_flags & SCF_WAIT;
|
||||||
int nr_cpus = 0, nr_queued = 0;
|
int nr_cpus = 0;
|
||||||
bool run_remote = false;
|
bool run_remote = false;
|
||||||
bool run_local = false;
|
bool run_local = false;
|
||||||
|
|
||||||
@@ -799,21 +799,15 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
|||||||
csd->node.src = smp_processor_id();
|
csd->node.src = smp_processor_id();
|
||||||
csd->node.dst = cpu;
|
csd->node.dst = cpu;
|
||||||
#endif
|
#endif
|
||||||
|
trace_csd_queue_cpu(cpu, _RET_IP_, func, csd);
|
||||||
|
|
||||||
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
|
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
|
||||||
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
|
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
|
||||||
nr_cpus++;
|
nr_cpus++;
|
||||||
last_cpu = cpu;
|
last_cpu = cpu;
|
||||||
}
|
}
|
||||||
nr_queued++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Trace each smp_function_call_*() as an IPI, actual IPIs
|
|
||||||
* will be traced with func==generic_smp_call_function_single_ipi().
|
|
||||||
*/
|
|
||||||
if (nr_queued)
|
|
||||||
trace_ipi_send_cpumask(cfd->cpumask, _RET_IP_, func);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Choose the most efficient way to send an IPI. Note that the
|
* Choose the most efficient way to send an IPI. Note that the
|
||||||
* number of CPUs might be zero due to concurrent changes to the
|
* number of CPUs might be zero due to concurrent changes to the
|
||||||
|
Reference in New Issue
Block a user