mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
Merge tag 'sched-core-2020-06-02' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The changes in this cycle are: - Optimize the task wakeup CPU selection logic, to improve scalability and reduce wakeup latency spikes - PELT enhancements - CFS bandwidth handling fixes - Optimize the wakeup path by remove rq->wake_list and replacing it with ->ttwu_pending - Optimize IPI cross-calls by making flush_smp_call_function_queue() process sync callbacks first. - Misc fixes and enhancements" * tag 'sched-core-2020-06-02' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits) irq_work: Define irq_work_single() on !CONFIG_IRQ_WORK too sched/headers: Split out open-coded prototypes into kernel/sched/smp.h sched: Replace rq::wake_list sched: Add rq::ttwu_pending irq_work, smp: Allow irq_work on call_single_queue smp: Optimize send_call_function_single_ipi() smp: Move irq_work_run() out of flush_smp_call_function_queue() smp: Optimize flush_smp_call_function_queue() sched: Fix smp_call_function_single_async() usage for ILB sched/core: Offload wakee task activation if it the wakee is descheduling sched/core: Optimize ttwu() spinning on p->on_cpu sched: Defend cfs and rt bandwidth quota against overflow sched/cpuacct: Fix charge cpuacct.usage_sys sched/fair: Replace zero-length array with flexible-array sched/pelt: Sync util/runnable_sum with PELT window when propagating sched/cpuacct: Use __this_cpu_add() instead of this_cpu_ptr() sched/fair: Optimize enqueue_task_fair() sched: Make scheduler_ipi inline sched: Clean up scheduler_ipi() sched/core: Simplify sched_init() ...
This commit is contained in:
18
kernel/cpu.c
18
kernel/cpu.c
@@ -3,6 +3,7 @@
|
||||
*
|
||||
* This code is licenced under the GPL.
|
||||
*/
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/init.h>
|
||||
@@ -564,6 +565,21 @@ static int bringup_cpu(unsigned int cpu)
|
||||
return bringup_wait_for_ap(cpu);
|
||||
}
|
||||
|
||||
static int finish_cpu(unsigned int cpu)
|
||||
{
|
||||
struct task_struct *idle = idle_thread_get(cpu);
|
||||
struct mm_struct *mm = idle->active_mm;
|
||||
|
||||
/*
|
||||
* idle_task_exit() will have switched to &init_mm, now
|
||||
* clean up any remaining active_mm state.
|
||||
*/
|
||||
if (mm != &init_mm)
|
||||
idle->active_mm = &init_mm;
|
||||
mmdrop(mm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Hotplug state machine related functions
|
||||
*/
|
||||
@@ -1549,7 +1565,7 @@ static struct cpuhp_step cpuhp_hp_states[] = {
|
||||
[CPUHP_BRINGUP_CPU] = {
|
||||
.name = "cpu:bringup",
|
||||
.startup.single = bringup_cpu,
|
||||
.teardown.single = NULL,
|
||||
.teardown.single = finish_cpu,
|
||||
.cant_stop = true,
|
||||
},
|
||||
/* Final state before CPU kills itself */
|
||||
|
Reference in New Issue
Block a user