mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
Merge tag 'v5.11' into sched/core, to pick up fixes & refresh the branch
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -1796,13 +1796,28 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
|
||||
*/
|
||||
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
|
||||
{
|
||||
/* When not in the task's cpumask, no point in looking further. */
|
||||
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
|
||||
return false;
|
||||
|
||||
if (is_per_cpu_kthread(p) || is_migration_disabled(p))
|
||||
/* migrate_disabled() must be allowed to finish. */
|
||||
if (is_migration_disabled(p))
|
||||
return cpu_online(cpu);
|
||||
|
||||
return cpu_active(cpu);
|
||||
/* Non kernel threads are not allowed during either online or offline. */
|
||||
if (!(p->flags & PF_KTHREAD))
|
||||
return cpu_active(cpu);
|
||||
|
||||
/* KTHREAD_IS_PER_CPU is always allowed. */
|
||||
if (kthread_is_per_cpu(p))
|
||||
return cpu_online(cpu);
|
||||
|
||||
/* Regular kernel threads don't get to stay during offline. */
|
||||
if (cpu_rq(cpu)->balance_push)
|
||||
return false;
|
||||
|
||||
/* But are allowed during online. */
|
||||
return cpu_online(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2327,7 +2342,9 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
|
||||
|
||||
if (p->flags & PF_KTHREAD || is_migration_disabled(p)) {
|
||||
/*
|
||||
* Kernel threads are allowed on online && !active CPUs.
|
||||
* Kernel threads are allowed on online && !active CPUs,
|
||||
* however, during cpu-hot-unplug, even these might get pushed
|
||||
* away if not KTHREAD_IS_PER_CPU.
|
||||
*
|
||||
* Specifically, migration_disabled() tasks must not fail the
|
||||
* cpumask_any_and_distribute() pick below, esp. so on
|
||||
@@ -2371,16 +2388,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
|
||||
|
||||
__do_set_cpus_allowed(p, new_mask, flags);
|
||||
|
||||
if (p->flags & PF_KTHREAD) {
|
||||
/*
|
||||
* For kernel threads that do indeed end up on online &&
|
||||
* !active we want to ensure they are strict per-CPU threads.
|
||||
*/
|
||||
WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
|
||||
!cpumask_intersects(new_mask, cpu_active_mask) &&
|
||||
p->nr_cpus_allowed != 1);
|
||||
}
|
||||
|
||||
return affine_move_task(rq, p, &rf, dest_cpu, flags);
|
||||
|
||||
out:
|
||||
@@ -3121,6 +3128,13 @@ bool cpus_share_cache(int this_cpu, int that_cpu)
|
||||
|
||||
static inline bool ttwu_queue_cond(int cpu, int wake_flags)
|
||||
{
|
||||
/*
|
||||
* Do not complicate things with the async wake_list while the CPU is
|
||||
* in hotplug state.
|
||||
*/
|
||||
if (!cpu_active(cpu))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If the CPU does not share cache, then queue the task on the
|
||||
* remote rqs wakelist to avoid accessing remote data.
|
||||
@@ -7390,8 +7404,14 @@ static void balance_push(struct rq *rq)
|
||||
/*
|
||||
* Both the cpu-hotplug and stop task are in this case and are
|
||||
* required to complete the hotplug process.
|
||||
*
|
||||
* XXX: the idle task does not match kthread_is_per_cpu() due to
|
||||
* histerical raisins.
|
||||
*/
|
||||
if (is_per_cpu_kthread(push_task) || is_migration_disabled(push_task)) {
|
||||
if (rq->idle == push_task ||
|
||||
((push_task->flags & PF_KTHREAD) && kthread_is_per_cpu(push_task)) ||
|
||||
is_migration_disabled(push_task)) {
|
||||
|
||||
/*
|
||||
* If this is the idle task on the outgoing CPU try to wake
|
||||
* up the hotplug control thread which might wait for the
|
||||
@@ -7423,7 +7443,7 @@ static void balance_push(struct rq *rq)
|
||||
/*
|
||||
* At this point need_resched() is true and we'll take the loop in
|
||||
* schedule(). The next pick is obviously going to be the stop task
|
||||
* which is_per_cpu_kthread() and will push this task away.
|
||||
* which kthread_is_per_cpu() and will push this task away.
|
||||
*/
|
||||
raw_spin_lock(&rq->lock);
|
||||
}
|
||||
@@ -7434,10 +7454,13 @@ static void balance_push_set(int cpu, bool on)
|
||||
struct rq_flags rf;
|
||||
|
||||
rq_lock_irqsave(rq, &rf);
|
||||
if (on)
|
||||
rq->balance_push = on;
|
||||
if (on) {
|
||||
WARN_ON_ONCE(rq->balance_callback);
|
||||
rq->balance_callback = &balance_push_callback;
|
||||
else
|
||||
} else if (rq->balance_callback == &balance_push_callback) {
|
||||
rq->balance_callback = NULL;
|
||||
}
|
||||
rq_unlock_irqrestore(rq, &rf);
|
||||
}
|
||||
|
||||
@@ -7555,6 +7578,10 @@ int sched_cpu_activate(unsigned int cpu)
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
struct rq_flags rf;
|
||||
|
||||
/*
|
||||
* Make sure that when the hotplug state machine does a roll-back
|
||||
* we clear balance_push. Ideally that would happen earlier...
|
||||
*/
|
||||
balance_push_set(cpu, false);
|
||||
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
@@ -7603,17 +7630,27 @@ int sched_cpu_deactivate(unsigned int cpu)
|
||||
nohz_balance_exit_idle(rq);
|
||||
|
||||
set_cpu_active(cpu, false);
|
||||
|
||||
/*
|
||||
* We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
|
||||
* users of this state to go away such that all new such users will
|
||||
* observe it.
|
||||
* From this point forward, this CPU will refuse to run any task that
|
||||
* is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
|
||||
* push those tasks away until this gets cleared, see
|
||||
* sched_cpu_dying().
|
||||
*/
|
||||
balance_push_set(cpu, true);
|
||||
|
||||
/*
|
||||
* We've cleared cpu_active_mask / set balance_push, wait for all
|
||||
* preempt-disabled and RCU users of this state to go away such that
|
||||
* all new such users will observe it.
|
||||
*
|
||||
* Specifically, we rely on ttwu to no longer target this CPU, see
|
||||
* ttwu_queue_cond() and is_cpu_allowed().
|
||||
*
|
||||
* Do sync before park smpboot threads to take care the rcu boost case.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
balance_push_set(cpu, true);
|
||||
|
||||
rq_lock_irqsave(rq, &rf);
|
||||
if (rq->rd) {
|
||||
update_rq_clock(rq);
|
||||
@@ -7694,6 +7731,25 @@ static void calc_load_migrate(struct rq *rq)
|
||||
atomic_long_add(delta, &calc_load_tasks);
|
||||
}
|
||||
|
||||
static void dump_rq_tasks(struct rq *rq, const char *loglvl)
|
||||
{
|
||||
struct task_struct *g, *p;
|
||||
int cpu = cpu_of(rq);
|
||||
|
||||
lockdep_assert_held(&rq->lock);
|
||||
|
||||
printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
|
||||
for_each_process_thread(g, p) {
|
||||
if (task_cpu(p) != cpu)
|
||||
continue;
|
||||
|
||||
if (!task_on_rq_queued(p))
|
||||
continue;
|
||||
|
||||
printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
|
||||
}
|
||||
}
|
||||
|
||||
int sched_cpu_dying(unsigned int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
@@ -7703,9 +7759,18 @@ int sched_cpu_dying(unsigned int cpu)
|
||||
sched_tick_stop(cpu);
|
||||
|
||||
rq_lock_irqsave(rq, &rf);
|
||||
BUG_ON(rq->nr_running != 1 || rq_has_pinned_tasks(rq));
|
||||
if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
|
||||
WARN(true, "Dying CPU not properly vacated!");
|
||||
dump_rq_tasks(rq, KERN_WARNING);
|
||||
}
|
||||
rq_unlock_irqrestore(rq, &rf);
|
||||
|
||||
/*
|
||||
* Now that the CPU is offline, make sure we're welcome
|
||||
* to new tasks once we come back up.
|
||||
*/
|
||||
balance_push_set(cpu, false);
|
||||
|
||||
calc_load_migrate(rq);
|
||||
update_max_interval();
|
||||
hrtick_clear(rq);
|
||||
|
@@ -975,6 +975,7 @@ struct rq {
|
||||
unsigned long cpu_capacity_orig;
|
||||
|
||||
struct callback_head *balance_callback;
|
||||
unsigned char balance_push;
|
||||
|
||||
unsigned char nohz_idle_balance;
|
||||
unsigned char idle_balance;
|
||||
|
Reference in New Issue
Block a user