mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 12:43:29 +02:00
In parallel startup mode the APs are kicked alive by the control CPU quickly after each other and run through the early startup code in parallel. The real-mode startup code is already serialized with a bit-spinlock to protect the real-mode stack. In parallel startup mode the smpboot_control variable obviously cannot contain the Linux CPU number so the APs have to determine their Linux CPU number on their own. This is required to find the CPUs per CPU offset in order to find the idle task stack and other per CPU data. To achieve this, export the cpuid_to_apicid[] array so that each AP can find its own CPU number by searching therein based on its APIC ID. Introduce a flag in the top bits of smpboot_control which indicates that the AP should find its CPU number by reading the APIC ID from the APIC. This is required because CPUID based APIC ID retrieval can only provide the initial APIC ID, which might have been overruled by the firmware. Some AMD APUs come up with APIC ID = initial APIC ID + 0x10, so the APIC ID to CPU number lookup would fail miserably if based on CPUID. Also virtualization can make its own APIC ID assignements. The only requirement is that the APIC IDs are consistent with the APCI/MADT table. For the boot CPU or in case parallel bringup is disabled the control bits are empty and the CPU number is directly available in bit 0-23 of smpboot_control. [ tglx: Initial proof of concept patch with bitlock and APIC ID lookup ] [ dwmw2: Rework and testing, commit message, CPUID 0x1 and CPU0 support ] [ seanc: Fix stray override of initial_gs in common_cpu_up() ] [ Oleksandr Natalenko: reported suspend/resume issue fixed in x86_acpi_suspend_lowlevel ] [ tglx: Make it read the APIC ID from the APIC instead of using CPUID, split the bitlock part out ] Co-developed-by: Thomas Gleixner <tglx@linutronix.de> Co-developed-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Brian Gerst <brgerst@gmail.com> Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Michael Kelley <mikelley@microsoft.com> Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name> Tested-by: Helge Deller <deller@gmx.de> # parisc Tested-by: Guilherme G. Piccoli <gpiccoli@igalia.com> # Steam Deck Link: https://lore.kernel.org/r/20230512205257.411554373@linutronix.de
210 lines
5.3 KiB
C
210 lines
5.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_SMP_H
|
|
#define _ASM_X86_SMP_H
|
|
#ifndef __ASSEMBLY__
|
|
#include <linux/cpumask.h>
|
|
|
|
#include <asm/cpumask.h>
|
|
#include <asm/current.h>
|
|
#include <asm/thread_info.h>
|
|
|
|
extern int smp_num_siblings;
|
|
extern unsigned int num_processors;
|
|
|
|
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
|
|
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
|
|
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map);
|
|
/* cpus sharing the last level cache: */
|
|
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
|
|
DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map);
|
|
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
|
|
DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_l2c_id);
|
|
|
|
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid);
|
|
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid);
|
|
DECLARE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_bios_cpu_apicid);
|
|
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
|
|
DECLARE_EARLY_PER_CPU_READ_MOSTLY(int, x86_cpu_to_logical_apicid);
|
|
#endif
|
|
|
|
struct task_struct;
|
|
|
|
struct smp_ops {
|
|
void (*smp_prepare_boot_cpu)(void);
|
|
void (*smp_prepare_cpus)(unsigned max_cpus);
|
|
void (*smp_cpus_done)(unsigned max_cpus);
|
|
|
|
void (*stop_other_cpus)(int wait);
|
|
void (*crash_stop_other_cpus)(void);
|
|
void (*smp_send_reschedule)(int cpu);
|
|
|
|
void (*cleanup_dead_cpu)(unsigned cpu);
|
|
void (*poll_sync_state)(void);
|
|
int (*kick_ap_alive)(unsigned cpu, struct task_struct *tidle);
|
|
int (*cpu_disable)(void);
|
|
void (*cpu_die)(unsigned int cpu);
|
|
void (*play_dead)(void);
|
|
|
|
void (*send_call_func_ipi)(const struct cpumask *mask);
|
|
void (*send_call_func_single_ipi)(int cpu);
|
|
};
|
|
|
|
/* Globals due to paravirt */
|
|
extern void set_cpu_sibling_map(int cpu);
|
|
|
|
#ifdef CONFIG_SMP
|
|
extern struct smp_ops smp_ops;
|
|
|
|
static inline void smp_send_stop(void)
|
|
{
|
|
smp_ops.stop_other_cpus(0);
|
|
}
|
|
|
|
static inline void stop_other_cpus(void)
|
|
{
|
|
smp_ops.stop_other_cpus(1);
|
|
}
|
|
|
|
static inline void smp_prepare_boot_cpu(void)
|
|
{
|
|
smp_ops.smp_prepare_boot_cpu();
|
|
}
|
|
|
|
static inline void smp_prepare_cpus(unsigned int max_cpus)
|
|
{
|
|
smp_ops.smp_prepare_cpus(max_cpus);
|
|
}
|
|
|
|
static inline void smp_cpus_done(unsigned int max_cpus)
|
|
{
|
|
smp_ops.smp_cpus_done(max_cpus);
|
|
}
|
|
|
|
static inline int __cpu_disable(void)
|
|
{
|
|
return smp_ops.cpu_disable();
|
|
}
|
|
|
|
static inline void __cpu_die(unsigned int cpu)
|
|
{
|
|
if (smp_ops.cpu_die)
|
|
smp_ops.cpu_die(cpu);
|
|
}
|
|
|
|
static inline void __noreturn play_dead(void)
|
|
{
|
|
smp_ops.play_dead();
|
|
BUG();
|
|
}
|
|
|
|
static inline void arch_smp_send_reschedule(int cpu)
|
|
{
|
|
smp_ops.smp_send_reschedule(cpu);
|
|
}
|
|
|
|
static inline void arch_send_call_function_single_ipi(int cpu)
|
|
{
|
|
smp_ops.send_call_func_single_ipi(cpu);
|
|
}
|
|
|
|
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|
{
|
|
smp_ops.send_call_func_ipi(mask);
|
|
}
|
|
|
|
void cpu_disable_common(void);
|
|
void native_smp_prepare_boot_cpu(void);
|
|
void smp_prepare_cpus_common(void);
|
|
void native_smp_prepare_cpus(unsigned int max_cpus);
|
|
void calculate_max_logical_packages(void);
|
|
void native_smp_cpus_done(unsigned int max_cpus);
|
|
int common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
|
|
int native_kick_ap(unsigned int cpu, struct task_struct *tidle);
|
|
int native_cpu_disable(void);
|
|
void __noreturn hlt_play_dead(void);
|
|
void native_play_dead(void);
|
|
void play_dead_common(void);
|
|
void wbinvd_on_cpu(int cpu);
|
|
int wbinvd_on_all_cpus(void);
|
|
|
|
void native_smp_send_reschedule(int cpu);
|
|
void native_send_call_func_ipi(const struct cpumask *mask);
|
|
void native_send_call_func_single_ipi(int cpu);
|
|
void x86_idle_thread_init(unsigned int cpu, struct task_struct *idle);
|
|
|
|
void smp_store_boot_cpu_info(void);
|
|
void smp_store_cpu_info(int id);
|
|
|
|
asmlinkage __visible void smp_reboot_interrupt(void);
|
|
__visible void smp_reschedule_interrupt(struct pt_regs *regs);
|
|
__visible void smp_call_function_interrupt(struct pt_regs *regs);
|
|
__visible void smp_call_function_single_interrupt(struct pt_regs *r);
|
|
|
|
#define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
|
|
#define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu)
|
|
|
|
/*
|
|
* This function is needed by all SMP systems. It must _always_ be valid
|
|
* from the initial startup.
|
|
*/
|
|
#define raw_smp_processor_id() this_cpu_read(pcpu_hot.cpu_number)
|
|
#define __smp_processor_id() __this_cpu_read(pcpu_hot.cpu_number)
|
|
|
|
#ifdef CONFIG_X86_32
|
|
extern int safe_smp_processor_id(void);
|
|
#else
|
|
# define safe_smp_processor_id() smp_processor_id()
|
|
#endif
|
|
|
|
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
|
|
{
|
|
return per_cpu(cpu_llc_shared_map, cpu);
|
|
}
|
|
|
|
static inline struct cpumask *cpu_l2c_shared_mask(int cpu)
|
|
{
|
|
return per_cpu(cpu_l2c_shared_map, cpu);
|
|
}
|
|
|
|
#else /* !CONFIG_SMP */
|
|
#define wbinvd_on_cpu(cpu) wbinvd()
|
|
static inline int wbinvd_on_all_cpus(void)
|
|
{
|
|
wbinvd();
|
|
return 0;
|
|
}
|
|
|
|
static inline struct cpumask *cpu_llc_shared_mask(int cpu)
|
|
{
|
|
return (struct cpumask *)cpumask_of(0);
|
|
}
|
|
#endif /* CONFIG_SMP */
|
|
|
|
extern unsigned disabled_cpus;
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
extern int hard_smp_processor_id(void);
|
|
|
|
#else /* CONFIG_X86_LOCAL_APIC */
|
|
#define hard_smp_processor_id() 0
|
|
#endif /* CONFIG_X86_LOCAL_APIC */
|
|
|
|
#ifdef CONFIG_DEBUG_NMI_SELFTEST
|
|
extern void nmi_selftest(void);
|
|
#else
|
|
#define nmi_selftest() do { } while (0)
|
|
#endif
|
|
|
|
extern unsigned int smpboot_control;
|
|
extern unsigned long apic_mmio_base;
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
/* Control bits for startup_64 */
|
|
#define STARTUP_READ_APICID 0x80000000
|
|
|
|
/* Top 8 bits are reserved for control */
|
|
#define STARTUP_PARALLEL_MASK 0xFF000000
|
|
|
|
#endif /* _ASM_X86_SMP_H */
|