mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
sched/core: Print out straggler tasks in sched_cpu_dying()
Since commit
1cf12e08bc
("sched/hotplug: Consolidate task migration on CPU unplug")
tasks are expected to move themselves out of a out-going CPU. For most
tasks this will be done automagically via BALANCE_PUSH, but percpu kthreads
will have to cooperate and move themselves away one way or another.
Currently, some percpu kthreads (workqueues being a notable exemple) do not
cooperate nicely and can end up on an out-going CPU at the time
sched_cpu_dying() is invoked.
Print the dying rq's tasks to shed some light on the stragglers.
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Tested-by: Valentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/20210113183141.11974-1-valentin.schneider@arm.com
This commit is contained in:
committed by
Peter Zijlstra
parent
9c7d9017a4
commit
36c6e17bf1
@@ -7574,6 +7574,25 @@ static void calc_load_migrate(struct rq *rq)
|
|||||||
atomic_long_add(delta, &calc_load_tasks);
|
atomic_long_add(delta, &calc_load_tasks);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void dump_rq_tasks(struct rq *rq, const char *loglvl)
|
||||||
|
{
|
||||||
|
struct task_struct *g, *p;
|
||||||
|
int cpu = cpu_of(rq);
|
||||||
|
|
||||||
|
lockdep_assert_held(&rq->lock);
|
||||||
|
|
||||||
|
printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
|
||||||
|
for_each_process_thread(g, p) {
|
||||||
|
if (task_cpu(p) != cpu)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (!task_on_rq_queued(p))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int sched_cpu_dying(unsigned int cpu)
|
int sched_cpu_dying(unsigned int cpu)
|
||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
struct rq *rq = cpu_rq(cpu);
|
||||||
@@ -7583,7 +7602,10 @@ int sched_cpu_dying(unsigned int cpu)
|
|||||||
sched_tick_stop(cpu);
|
sched_tick_stop(cpu);
|
||||||
|
|
||||||
rq_lock_irqsave(rq, &rf);
|
rq_lock_irqsave(rq, &rf);
|
||||||
BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq));
|
if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
|
||||||
|
WARN(true, "Dying CPU not properly vacated!");
|
||||||
|
dump_rq_tasks(rq, KERN_WARNING);
|
||||||
|
}
|
||||||
rq_unlock_irqrestore(rq, &rf);
|
rq_unlock_irqrestore(rq, &rf);
|
||||||
|
|
||||||
calc_load_migrate(rq);
|
calc_load_migrate(rq);
|
||||||
|
Reference in New Issue
Block a user