mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
Merge tag 'sched-core-2022-05-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: - Updates to scheduler metrics: - PELT fixes & enhancements - PSI fixes & enhancements - Refactor cpu_util_without() - Updates to instrumentation/debugging: - Remove sched_trace_*() helper functions - can be done via debug info - Fix double update_rq_clock() warnings - Introduce & use "preemption model accessors" to simplify some of the Kconfig complexity. - Make softirq handling RT-safe. - Misc smaller fixes & cleanups. * tag 'sched-core-2022-05-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: topology: Remove unused cpu_cluster_mask() sched: Reverse sched_class layout sched/deadline: Remove superfluous rq clock update in push_dl_task() sched/core: Avoid obvious double update_rq_clock warning smp: Make softirq handling RT safe in flush_smp_call_function_queue() smp: Rename flush_smp_call_function_from_idle() sched: Fix missing prototype warnings sched/fair: Remove cfs_rq_tg_path() sched/fair: Remove sched_trace_*() helper functions sched/fair: Refactor cpu_util_without() sched/fair: Revise comment about lb decision matrix sched/psi: report zeroes for CPU full at the system level sched/fair: Delete useless condition in tg_unthrottle_up() sched/fair: Fix cfs_rq_clock_pelt() for throttled cfs_rq sched/fair: Move calculate of avg_load to a better location mailmap: Update my email address to @redhat.com MAINTAINERS: Add myself as scheduler topology reviewer psi: Fix trigger being fired unexpectedly at initial ftrace: Use preemption model accessors for trace header printout kcsan: Use preemption model accessors
This commit is contained in:
32
kernel/smp.c
32
kernel/smp.c
@@ -96,7 +96,7 @@ static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
|
||||
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
|
||||
|
||||
static void flush_smp_call_function_queue(bool warn_cpu_offline);
|
||||
static void __flush_smp_call_function_queue(bool warn_cpu_offline);
|
||||
|
||||
int smpcfd_prepare_cpu(unsigned int cpu)
|
||||
{
|
||||
@@ -141,7 +141,7 @@ int smpcfd_dying_cpu(unsigned int cpu)
|
||||
* ensure that the outgoing CPU doesn't go offline with work
|
||||
* still pending.
|
||||
*/
|
||||
flush_smp_call_function_queue(false);
|
||||
__flush_smp_call_function_queue(false);
|
||||
irq_work_run();
|
||||
return 0;
|
||||
}
|
||||
@@ -544,11 +544,11 @@ void generic_smp_call_function_single_interrupt(void)
|
||||
{
|
||||
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU,
|
||||
smp_processor_id(), CFD_SEQ_GOTIPI);
|
||||
flush_smp_call_function_queue(true);
|
||||
__flush_smp_call_function_queue(true);
|
||||
}
|
||||
|
||||
/**
|
||||
* flush_smp_call_function_queue - Flush pending smp-call-function callbacks
|
||||
* __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
|
||||
*
|
||||
* @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
|
||||
* offline CPU. Skip this check if set to 'false'.
|
||||
@@ -561,7 +561,7 @@ void generic_smp_call_function_single_interrupt(void)
|
||||
* Loop through the call_single_queue and run all the queued callbacks.
|
||||
* Must be called with interrupts disabled.
|
||||
*/
|
||||
static void flush_smp_call_function_queue(bool warn_cpu_offline)
|
||||
static void __flush_smp_call_function_queue(bool warn_cpu_offline)
|
||||
{
|
||||
call_single_data_t *csd, *csd_next;
|
||||
struct llist_node *entry, *prev;
|
||||
@@ -684,8 +684,22 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
|
||||
smp_processor_id(), CFD_SEQ_HDLEND);
|
||||
}
|
||||
|
||||
void flush_smp_call_function_from_idle(void)
|
||||
|
||||
/**
|
||||
* flush_smp_call_function_queue - Flush pending smp-call-function callbacks
|
||||
* from task context (idle, migration thread)
|
||||
*
|
||||
* When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
|
||||
* set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
|
||||
* setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
|
||||
* handle queued SMP function calls before scheduling.
|
||||
*
|
||||
* The migration thread has to ensure that an eventually pending wakeup has
|
||||
* been handled before it migrates a task.
|
||||
*/
|
||||
void flush_smp_call_function_queue(void)
|
||||
{
|
||||
unsigned int was_pending;
|
||||
unsigned long flags;
|
||||
|
||||
if (llist_empty(this_cpu_ptr(&call_single_queue)))
|
||||
@@ -694,9 +708,11 @@ void flush_smp_call_function_from_idle(void)
|
||||
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
|
||||
smp_processor_id(), CFD_SEQ_IDLE);
|
||||
local_irq_save(flags);
|
||||
flush_smp_call_function_queue(true);
|
||||
/* Get the already pending soft interrupts for RT enabled kernels */
|
||||
was_pending = local_softirq_pending();
|
||||
__flush_smp_call_function_queue(true);
|
||||
if (local_softirq_pending())
|
||||
do_softirq();
|
||||
do_softirq_post_smp_call_flush(was_pending);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
Reference in New Issue
Block a user