mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
stop_machine: Ensure that a queued callback will be called before cpu_stop_park()
cpu_stop_queue_work() checks stopper->enabled before it queues the work, but ->enabled == T can only guarantee cpu_stop_signal_done() if we race with cpu_down(). This is not enough for stop_two_cpus() or stop_machine(), they will deadlock if multi_cpu_stop() won't be called by one of the target CPU's. stop_machine/stop_cpus are fine, they rely on stop_cpus_mutex. But stop_two_cpus() has to check cpu_active() to avoid the same race with hotplug, and this check is very unobvious and probably not even correct if we race with cpu_up(). Change cpu_down() pass to clear ->enabled before cpu_stopper_thread() flushes the pending ->works and returns with KTHREAD_SHOULD_PARK set. Note also that smpboot_thread_call() calls cpu_stop_unpark() which sets enabled == T at CPU_ONLINE stage, so this CPU can't go away until cpu_stopper_thread() is called at least once. This all means that if cpu_stop_queue_work() succeeds, we know that work->fn() will be called. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: heiko.carstens@de.ibm.com Link: http://lkml.kernel.org/r/20151008145131.GA18139@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
committed by
Ingo Molnar
parent
6af597de62
commit
233e7f267e
@@ -452,6 +452,18 @@ repeat:
|
||||
}
|
||||
}
|
||||
|
||||
void stop_machine_park(int cpu)
|
||||
{
|
||||
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
|
||||
/*
|
||||
* Lockless. cpu_stopper_thread() will take stopper->lock and flush
|
||||
* the pending works before it parks, until then it is fine to queue
|
||||
* the new works.
|
||||
*/
|
||||
stopper->enabled = false;
|
||||
kthread_park(stopper->thread);
|
||||
}
|
||||
|
||||
extern void sched_set_stop_task(int cpu, struct task_struct *stop);
|
||||
|
||||
static void cpu_stop_create(unsigned int cpu)
|
||||
@@ -462,17 +474,8 @@ static void cpu_stop_create(unsigned int cpu)
|
||||
static void cpu_stop_park(unsigned int cpu)
|
||||
{
|
||||
struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
|
||||
struct cpu_stop_work *work, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
/* drain remaining works */
|
||||
spin_lock_irqsave(&stopper->lock, flags);
|
||||
list_for_each_entry_safe(work, tmp, &stopper->works, list) {
|
||||
list_del_init(&work->list);
|
||||
cpu_stop_signal_done(work->done, false);
|
||||
}
|
||||
stopper->enabled = false;
|
||||
spin_unlock_irqrestore(&stopper->lock, flags);
|
||||
WARN_ON(!list_empty(&stopper->works));
|
||||
}
|
||||
|
||||
static void cpu_stop_unpark(unsigned int cpu)
|
||||
|
Reference in New Issue
Block a user