2021-06-21 15:34:09 +00:00
/*
* Copyright ( c ) 2018 - 2021 , Andreas Kling < kling @ serenityos . org >
2022-03-27 11:49:20 +00:00
* Copyright ( c ) 2022 , Linus Groh < linusg @ serenityos . org >
2022-02-09 18:33:39 +00:00
* Copyright ( c ) 2022 , the SerenityOS developers .
2021-06-21 15:34:09 +00:00
*
* SPDX - License - Identifier : BSD - 2 - Clause
*/
2021-12-19 21:46:55 +00:00
# include <AK/BuiltinWrappers.h>
2021-06-21 15:34:09 +00:00
# include <AK/Format.h>
2024-05-11 17:20:24 +00:00
# include <AK/StackUnwinder.h>
2021-06-21 15:34:09 +00:00
# include <AK/StdLibExtras.h>
2021-12-30 12:00:58 +00:00
# include <AK/StringBuilder.h>
2021-06-21 15:34:09 +00:00
# include <AK/Types.h>
2022-10-04 10:46:11 +00:00
# include <Kernel/Arch/x86_64/Interrupts/APIC.h>
2023-02-24 18:33:43 +00:00
# include <Kernel/Interrupts/InterruptDisabler.h>
2023-02-24 18:10:59 +00:00
# include <Kernel/Library/StdLib.h>
2021-06-22 15:40:16 +00:00
# include <Kernel/Sections.h>
2023-02-24 17:49:37 +00:00
# include <Kernel/Security/Random.h>
2023-02-24 17:45:37 +00:00
# include <Kernel/Tasks/Process.h>
# include <Kernel/Tasks/Scheduler.h>
# include <Kernel/Tasks/Thread.h>
2021-06-21 15:34:09 +00:00
2022-05-11 14:54:03 +00:00
# include <Kernel/Arch/Interrupts.h>
2021-10-13 23:07:37 +00:00
# include <Kernel/Arch/Processor.h>
2022-05-02 20:46:43 +00:00
# include <Kernel/Arch/SafeMem.h>
2022-10-16 14:57:21 +00:00
# include <Kernel/Arch/TrapFrame.h>
2022-10-04 10:46:11 +00:00
# include <Kernel/Arch/x86_64/CPUID.h>
# include <Kernel/Arch/x86_64/MSR.h>
# include <Kernel/Arch/x86_64/ProcessorInfo.h>
2023-02-24 18:10:59 +00:00
# include <Kernel/Library/ScopedCritical.h>
2021-10-12 22:37:52 +00:00
2023-01-19 21:36:48 +00:00
# include <Kernel/Arch/PageDirectory.h>
2021-10-13 23:07:37 +00:00
# include <Kernel/Memory/ScopedAddressSpaceSwitcher.h>
2021-06-21 15:34:09 +00:00
namespace Kernel {
READONLY_AFTER_INIT static ProcessorContainer s_processors { } ;
2022-04-01 17:58:27 +00:00
READONLY_AFTER_INIT static bool volatile s_smp_enabled ;
2021-06-21 15:34:09 +00:00
2021-06-24 08:03:07 +00:00
static Atomic < ProcessorMessage * > s_message_pool ;
2021-06-21 15:34:09 +00:00
Atomic < u32 > Processor : : s_idle_cpu_mask { 0 } ;
2021-06-27 17:49:19 +00:00
extern " C " void enter_thread_context ( Thread * from_thread , Thread * to_thread ) __attribute__ ( ( used ) ) ;
2021-06-28 15:05:38 +00:00
extern " C " FlatPtr do_init_context ( Thread * thread , u32 flags ) __attribute__ ( ( used ) ) ;
2021-07-23 20:52:25 +00:00
extern " C " void syscall_entry ( ) ;
2021-06-27 11:59:41 +00:00
2023-09-18 19:45:14 +00:00
template < typename T >
bool ProcessorBase < T > : : is_smp_enabled ( )
2021-08-09 11:39:08 +00:00
{
return s_smp_enabled ;
}
2021-06-21 15:34:09 +00:00
UNMAP_AFTER_INIT static void sse_init ( )
{
write_cr0 ( ( read_cr0 ( ) & 0xfffffffbu ) | 0x2 ) ;
write_cr4 ( read_cr4 ( ) | 0x600 ) ;
}
UNMAP_AFTER_INIT void Processor : : cpu_detect ( )
{
// NOTE: This is called during Processor::early_initialize, we cannot
// safely log at this point because we don't have kmalloc
// initialized yet!
2022-03-27 11:49:20 +00:00
m_features = CPUFeature : : Type ( 0u ) ;
2021-06-21 15:34:09 +00:00
CPUID processor_info ( 0x1 ) ;
2022-03-27 11:48:54 +00:00
auto handle_edx_bit_11_feature = [ & ] {
u32 stepping = processor_info . eax ( ) & 0xf ;
u32 model = ( processor_info . eax ( ) > > 4 ) & 0xf ;
u32 family = ( processor_info . eax ( ) > > 8 ) & 0xf ;
// FIXME: I have no clue what these mean or where it's from (the Intel manual I've seen just says EDX[11] is SEP).
// If you do, please convert them to constants or add comments!
if ( ! ( family = = 6 & & model < 3 & & stepping < 3 ) )
m_features | = CPUFeature : : SEP ;
if ( ( family = = 6 & & model > = 3 ) | | ( family = = 0xf & & model > = 0xe ) )
m_features | = CPUFeature : : CONSTANT_TSC ;
} ;
2021-06-21 15:34:09 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 0 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : SSE3 ;
2022-03-27 11:49:38 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 1 ) )
m_features | = CPUFeature : : PCLMULQDQ ;
if ( processor_info . ecx ( ) & ( 1 < < 2 ) )
m_features | = CPUFeature : : DTES64 ;
if ( processor_info . ecx ( ) & ( 1 < < 3 ) )
m_features | = CPUFeature : : MONITOR ;
if ( processor_info . ecx ( ) & ( 1 < < 4 ) )
m_features | = CPUFeature : : DS_CPL ;
if ( processor_info . ecx ( ) & ( 1 < < 5 ) )
m_features | = CPUFeature : : VMX ;
if ( processor_info . ecx ( ) & ( 1 < < 6 ) )
m_features | = CPUFeature : : SMX ;
if ( processor_info . ecx ( ) & ( 1 < < 7 ) )
m_features | = CPUFeature : : EST ;
if ( processor_info . ecx ( ) & ( 1 < < 8 ) )
m_features | = CPUFeature : : TM2 ;
2021-06-21 15:34:09 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 9 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : SSSE3 ;
2022-03-27 11:49:38 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 10 ) )
m_features | = CPUFeature : : CNXT_ID ;
if ( processor_info . ecx ( ) & ( 1 < < 11 ) )
m_features | = CPUFeature : : SDBG ;
if ( processor_info . ecx ( ) & ( 1 < < 12 ) )
m_features | = CPUFeature : : FMA ;
if ( processor_info . ecx ( ) & ( 1 < < 13 ) )
m_features | = CPUFeature : : CX16 ;
if ( processor_info . ecx ( ) & ( 1 < < 14 ) )
m_features | = CPUFeature : : XTPR ;
if ( processor_info . ecx ( ) & ( 1 < < 15 ) )
m_features | = CPUFeature : : PDCM ;
if ( processor_info . ecx ( ) & ( 1 < < 17 ) )
m_features | = CPUFeature : : PCID ;
if ( processor_info . ecx ( ) & ( 1 < < 18 ) )
m_features | = CPUFeature : : DCA ;
2021-06-21 15:34:09 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 19 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : SSE4_1 ;
2021-06-21 15:34:09 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 20 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : SSE4_2 ;
2022-03-27 11:49:38 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 21 ) )
m_features | = CPUFeature : : X2APIC ;
if ( processor_info . ecx ( ) & ( 1 < < 22 ) )
m_features | = CPUFeature : : MOVBE ;
if ( processor_info . ecx ( ) & ( 1 < < 23 ) )
m_features | = CPUFeature : : POPCNT ;
if ( processor_info . ecx ( ) & ( 1 < < 24 ) )
m_features | = CPUFeature : : TSC_DEADLINE ;
if ( processor_info . ecx ( ) & ( 1 < < 25 ) )
m_features | = CPUFeature : : AES ;
2021-06-21 15:34:09 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 26 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : XSAVE ;
2022-03-27 11:49:38 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 27 ) )
m_features | = CPUFeature : : OSXSAVE ;
2021-06-21 15:34:09 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 28 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : AVX ;
2022-03-27 11:49:38 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 29 ) )
m_features | = CPUFeature : : F16C ;
2021-06-21 15:34:09 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 30 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : RDRAND ;
2022-03-27 11:48:54 +00:00
if ( processor_info . ecx ( ) & ( 1 < < 31 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : HYPERVISOR ;
2022-03-27 11:48:54 +00:00
2022-03-27 11:49:38 +00:00
if ( processor_info . edx ( ) & ( 1 < < 0 ) )
m_features | = CPUFeature : : FPU ;
if ( processor_info . edx ( ) & ( 1 < < 1 ) )
m_features | = CPUFeature : : VME ;
if ( processor_info . edx ( ) & ( 1 < < 2 ) )
m_features | = CPUFeature : : DE ;
if ( processor_info . edx ( ) & ( 1 < < 3 ) )
m_features | = CPUFeature : : PSE ;
2022-03-27 11:48:54 +00:00
if ( processor_info . edx ( ) & ( 1 < < 4 ) )
m_features | = CPUFeature : : TSC ;
2022-03-27 11:49:38 +00:00
if ( processor_info . edx ( ) & ( 1 < < 5 ) )
m_features | = CPUFeature : : MSR ;
2022-03-27 11:48:54 +00:00
if ( processor_info . edx ( ) & ( 1 < < 6 ) )
m_features | = CPUFeature : : PAE ;
2022-03-27 11:49:38 +00:00
if ( processor_info . edx ( ) & ( 1 < < 7 ) )
m_features | = CPUFeature : : MCE ;
if ( processor_info . edx ( ) & ( 1 < < 8 ) )
m_features | = CPUFeature : : CX8 ;
if ( processor_info . edx ( ) & ( 1 < < 9 ) )
m_features | = CPUFeature : : APIC ;
2022-03-27 11:48:54 +00:00
if ( processor_info . edx ( ) & ( 1 < < 11 ) )
handle_edx_bit_11_feature ( ) ;
2022-03-27 11:49:38 +00:00
if ( processor_info . edx ( ) & ( 1 < < 12 ) )
m_features | = CPUFeature : : MTRR ;
if ( processor_info . edx ( ) & ( 1 < < 13 ) )
m_features | = CPUFeature : : PGE ;
if ( processor_info . edx ( ) & ( 1 < < 14 ) )
m_features | = CPUFeature : : MCA ;
if ( processor_info . edx ( ) & ( 1 < < 15 ) )
m_features | = CPUFeature : : CMOV ;
2022-01-26 00:35:34 +00:00
if ( processor_info . edx ( ) & ( 1 < < 16 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : PAT ;
2022-03-27 11:49:38 +00:00
if ( processor_info . edx ( ) & ( 1 < < 17 ) )
m_features | = CPUFeature : : PSE36 ;
if ( processor_info . edx ( ) & ( 1 < < 18 ) )
m_features | = CPUFeature : : PSN ;
if ( processor_info . edx ( ) & ( 1 < < 19 ) )
m_features | = CPUFeature : : CLFLUSH ;
if ( processor_info . edx ( ) & ( 1 < < 21 ) )
m_features | = CPUFeature : : DS ;
if ( processor_info . edx ( ) & ( 1 < < 22 ) )
m_features | = CPUFeature : : ACPI ;
2022-03-27 11:48:54 +00:00
if ( processor_info . edx ( ) & ( 1 < < 23 ) )
m_features | = CPUFeature : : MMX ;
if ( processor_info . edx ( ) & ( 1 < < 24 ) )
m_features | = CPUFeature : : FXSR ;
if ( processor_info . edx ( ) & ( 1 < < 25 ) )
m_features | = CPUFeature : : SSE ;
if ( processor_info . edx ( ) & ( 1 < < 26 ) )
m_features | = CPUFeature : : SSE2 ;
2022-03-27 11:49:38 +00:00
if ( processor_info . edx ( ) & ( 1 < < 27 ) )
m_features | = CPUFeature : : SS ;
if ( processor_info . edx ( ) & ( 1 < < 28 ) )
m_features | = CPUFeature : : HTT ;
if ( processor_info . edx ( ) & ( 1 < < 29 ) )
m_features | = CPUFeature : : TM ;
if ( processor_info . edx ( ) & ( 1 < < 30 ) )
m_features | = CPUFeature : : IA64 ;
if ( processor_info . edx ( ) & ( 1 < < 31 ) )
m_features | = CPUFeature : : PBE ;
2022-03-27 11:48:54 +00:00
CPUID extended_features ( 0x7 ) ;
2022-03-27 13:49:18 +00:00
if ( extended_features . ebx ( ) & ( 1 < < 0 ) )
m_features | = CPUFeature : : FSGSBASE ;
if ( extended_features . ebx ( ) & ( 1 < < 1 ) )
m_features | = CPUFeature : : TSC_ADJUST ;
if ( extended_features . ebx ( ) & ( 1 < < 2 ) )
m_features | = CPUFeature : : SGX ;
if ( extended_features . ebx ( ) & ( 1 < < 3 ) )
m_features | = CPUFeature : : BMI1 ;
if ( extended_features . ebx ( ) & ( 1 < < 4 ) )
m_features | = CPUFeature : : HLE ;
if ( extended_features . ebx ( ) & ( 1 < < 5 ) )
m_features | = CPUFeature : : AVX2 ;
if ( extended_features . ebx ( ) & ( 1 < < 6 ) )
m_features | = CPUFeature : : FDP_EXCPTN_ONLY ;
2022-03-27 11:48:54 +00:00
if ( extended_features . ebx ( ) & ( 1 < < 7 ) )
m_features | = CPUFeature : : SMEP ;
2022-03-27 13:49:18 +00:00
if ( extended_features . ebx ( ) & ( 1 < < 8 ) )
m_features | = CPUFeature : : BMI2 ;
if ( extended_features . ebx ( ) & ( 1 < < 9 ) )
m_features | = CPUFeature : : ERMS ;
if ( extended_features . ebx ( ) & ( 1 < < 10 ) )
m_features | = CPUFeature : : INVPCID ;
if ( extended_features . ebx ( ) & ( 1 < < 11 ) )
m_features | = CPUFeature : : RTM ;
if ( extended_features . ebx ( ) & ( 1 < < 12 ) )
m_features | = CPUFeature : : PQM ;
if ( extended_features . ebx ( ) & ( 1 < < 13 ) )
m_features | = CPUFeature : : ZERO_FCS_FDS ;
if ( extended_features . ebx ( ) & ( 1 < < 14 ) )
m_features | = CPUFeature : : MPX ;
if ( extended_features . ebx ( ) & ( 1 < < 15 ) )
m_features | = CPUFeature : : PQE ;
if ( extended_features . ebx ( ) & ( 1 < < 16 ) )
m_features | = CPUFeature : : AVX512_F ;
if ( extended_features . ebx ( ) & ( 1 < < 17 ) )
m_features | = CPUFeature : : AVX512_DQ ;
2022-03-27 11:48:54 +00:00
if ( extended_features . ebx ( ) & ( 1 < < 18 ) )
m_features | = CPUFeature : : RDSEED ;
2022-03-27 13:49:18 +00:00
if ( extended_features . ebx ( ) & ( 1 < < 19 ) )
m_features | = CPUFeature : : ADX ;
2022-03-27 11:48:54 +00:00
if ( extended_features . ebx ( ) & ( 1 < < 20 ) )
m_features | = CPUFeature : : SMAP ;
2022-03-27 13:49:18 +00:00
if ( extended_features . ebx ( ) & ( 1 < < 21 ) )
m_features | = CPUFeature : : AVX512_IFMA ;
if ( extended_features . ebx ( ) & ( 1 < < 22 ) )
m_features | = CPUFeature : : PCOMMIT ;
if ( extended_features . ebx ( ) & ( 1 < < 23 ) )
m_features | = CPUFeature : : CLFLUSHOPT ;
if ( extended_features . ebx ( ) & ( 1 < < 24 ) )
m_features | = CPUFeature : : CLWB ;
if ( extended_features . ebx ( ) & ( 1 < < 25 ) )
m_features | = CPUFeature : : INTEL_PT ;
if ( extended_features . ebx ( ) & ( 1 < < 26 ) )
m_features | = CPUFeature : : AVX512_PF ;
if ( extended_features . ebx ( ) & ( 1 < < 27 ) )
m_features | = CPUFeature : : AVX512_ER ;
if ( extended_features . ebx ( ) & ( 1 < < 28 ) )
m_features | = CPUFeature : : AVX512_CD ;
if ( extended_features . ebx ( ) & ( 1 < < 29 ) )
m_features | = CPUFeature : : SHA ;
if ( extended_features . ebx ( ) & ( 1 < < 30 ) )
m_features | = CPUFeature : : AVX512_BW ;
if ( extended_features . ebx ( ) & ( 1 < < 31 ) )
m_features | = CPUFeature : : AVX512_VL ;
if ( extended_features . ecx ( ) & ( 1 < < 0 ) )
m_features | = CPUFeature : : PREFETCHWT1 ;
if ( extended_features . ecx ( ) & ( 1 < < 1 ) )
m_features | = CPUFeature : : AVX512_VBMI ;
2022-03-27 11:48:54 +00:00
if ( extended_features . ecx ( ) & ( 1 < < 2 ) )
m_features | = CPUFeature : : UMIP ;
2022-03-27 13:49:18 +00:00
if ( extended_features . ecx ( ) & ( 1 < < 3 ) )
m_features | = CPUFeature : : PKU ;
if ( extended_features . ecx ( ) & ( 1 < < 4 ) )
2022-04-08 17:18:51 +00:00
m_features | = CPUFeature : : OSPKE ;
2022-03-27 13:49:18 +00:00
if ( extended_features . ecx ( ) & ( 1 < < 5 ) )
m_features | = CPUFeature : : WAITPKG ;
if ( extended_features . ecx ( ) & ( 1 < < 6 ) )
m_features | = CPUFeature : : AVX512_VBMI2 ;
if ( extended_features . ecx ( ) & ( 1 < < 7 ) )
m_features | = CPUFeature : : CET_SS ;
if ( extended_features . ecx ( ) & ( 1 < < 8 ) )
m_features | = CPUFeature : : GFNI ;
if ( extended_features . ecx ( ) & ( 1 < < 9 ) )
m_features | = CPUFeature : : VAES ;
if ( extended_features . ecx ( ) & ( 1 < < 10 ) )
m_features | = CPUFeature : : VPCLMULQDQ ;
if ( extended_features . ecx ( ) & ( 1 < < 11 ) )
m_features | = CPUFeature : : AVX512_VNNI ;
if ( extended_features . ecx ( ) & ( 1 < < 12 ) )
m_features | = CPUFeature : : AVX512_BITALG ;
if ( extended_features . ecx ( ) & ( 1 < < 13 ) )
m_features | = CPUFeature : : TME_EN ;
if ( extended_features . ecx ( ) & ( 1 < < 14 ) )
m_features | = CPUFeature : : AVX512_VPOPCNTDQ ;
if ( extended_features . ecx ( ) & ( 1 < < 16 ) )
m_features | = CPUFeature : : INTEL_5_LEVEL_PAGING ;
if ( extended_features . ecx ( ) & ( 1 < < 22 ) )
m_features | = CPUFeature : : RDPID ;
if ( extended_features . ecx ( ) & ( 1 < < 23 ) )
m_features | = CPUFeature : : KL ;
if ( extended_features . ecx ( ) & ( 1 < < 25 ) )
m_features | = CPUFeature : : CLDEMOTE ;
if ( extended_features . ecx ( ) & ( 1 < < 27 ) )
m_features | = CPUFeature : : MOVDIRI ;
if ( extended_features . ecx ( ) & ( 1 < < 28 ) )
m_features | = CPUFeature : : MOVDIR64B ;
if ( extended_features . ecx ( ) & ( 1 < < 29 ) )
m_features | = CPUFeature : : ENQCMD ;
if ( extended_features . ecx ( ) & ( 1 < < 30 ) )
m_features | = CPUFeature : : SGX_LC ;
if ( extended_features . ecx ( ) & ( 1 < < 31 ) )
m_features | = CPUFeature : : PKS ;
if ( extended_features . edx ( ) & ( 1 < < 2 ) )
m_features | = CPUFeature : : AVX512_4VNNIW ;
if ( extended_features . edx ( ) & ( 1 < < 3 ) )
m_features | = CPUFeature : : AVX512_4FMAPS ;
if ( extended_features . edx ( ) & ( 1 < < 4 ) )
m_features | = CPUFeature : : FSRM ;
if ( extended_features . edx ( ) & ( 1 < < 8 ) )
m_features | = CPUFeature : : AVX512_VP2INTERSECT ;
if ( extended_features . edx ( ) & ( 1 < < 9 ) )
m_features | = CPUFeature : : SRBDS_CTRL ;
if ( extended_features . edx ( ) & ( 1 < < 10 ) )
m_features | = CPUFeature : : MD_CLEAR ;
if ( extended_features . edx ( ) & ( 1 < < 11 ) )
m_features | = CPUFeature : : RTM_ALWAYS_ABORT ;
if ( extended_features . edx ( ) & ( 1 < < 13 ) )
m_features | = CPUFeature : : TSX_FORCE_ABORT ;
if ( extended_features . edx ( ) & ( 1 < < 14 ) )
m_features | = CPUFeature : : SERIALIZE ;
if ( extended_features . edx ( ) & ( 1 < < 15 ) )
m_features | = CPUFeature : : HYBRID ;
if ( extended_features . edx ( ) & ( 1 < < 16 ) )
m_features | = CPUFeature : : TSXLDTRK ;
if ( extended_features . edx ( ) & ( 1 < < 18 ) )
m_features | = CPUFeature : : PCONFIG ;
if ( extended_features . edx ( ) & ( 1 < < 19 ) )
m_features | = CPUFeature : : LBR ;
if ( extended_features . edx ( ) & ( 1 < < 20 ) )
m_features | = CPUFeature : : CET_IBT ;
if ( extended_features . edx ( ) & ( 1 < < 22 ) )
m_features | = CPUFeature : : AMX_BF16 ;
if ( extended_features . edx ( ) & ( 1 < < 23 ) )
m_features | = CPUFeature : : AVX512_FP16 ;
if ( extended_features . edx ( ) & ( 1 < < 24 ) )
m_features | = CPUFeature : : AMX_TILE ;
if ( extended_features . edx ( ) & ( 1 < < 25 ) )
m_features | = CPUFeature : : AMX_INT8 ;
if ( extended_features . edx ( ) & ( 1 < < 26 ) )
m_features | = CPUFeature : : SPEC_CTRL ;
if ( extended_features . edx ( ) & ( 1 < < 27 ) )
m_features | = CPUFeature : : STIBP ;
if ( extended_features . edx ( ) & ( 1 < < 28 ) )
m_features | = CPUFeature : : L1D_FLUSH ;
if ( extended_features . edx ( ) & ( 1 < < 29 ) )
m_features | = CPUFeature : : IA32_ARCH_CAPABILITIES ;
if ( extended_features . edx ( ) & ( 1 < < 30 ) )
m_features | = CPUFeature : : IA32_CORE_CAPABILITIES ;
if ( extended_features . edx ( ) & ( 1 < < 31 ) )
m_features | = CPUFeature : : SSBD ;
2021-06-21 15:34:09 +00:00
u32 max_extended_leaf = CPUID ( 0x80000000 ) . eax ( ) ;
if ( max_extended_leaf > = 0x80000001 ) {
CPUID extended_processor_info ( 0x80000001 ) ;
2022-03-27 14:36:47 +00:00
if ( extended_processor_info . ecx ( ) & ( 1 < < 0 ) )
m_features | = CPUFeature : : LAHF_LM ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 1 ) )
m_features | = CPUFeature : : CMP_LEGACY ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 2 ) )
m_features | = CPUFeature : : SVM ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 3 ) )
m_features | = CPUFeature : : EXTAPIC ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 4 ) )
m_features | = CPUFeature : : CR8_LEGACY ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 5 ) )
m_features | = CPUFeature : : ABM ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 6 ) )
m_features | = CPUFeature : : SSE4A ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 7 ) )
m_features | = CPUFeature : : MISALIGNSSE ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 8 ) )
m_features | = CPUFeature : : _3DNOWPREFETCH ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 9 ) )
m_features | = CPUFeature : : OSVW ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 10 ) )
m_features | = CPUFeature : : IBS ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 11 ) )
m_features | = CPUFeature : : XOP ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 12 ) )
m_features | = CPUFeature : : SKINIT ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 13 ) )
m_features | = CPUFeature : : WDT ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 15 ) )
m_features | = CPUFeature : : LWP ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 16 ) )
m_features | = CPUFeature : : FMA4 ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 17 ) )
m_features | = CPUFeature : : TCE ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 19 ) )
m_features | = CPUFeature : : NODEID_MSR ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 21 ) )
m_features | = CPUFeature : : TBM ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 22 ) )
m_features | = CPUFeature : : TOPOEXT ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 23 ) )
m_features | = CPUFeature : : PERFCTR_CORE ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 24 ) )
m_features | = CPUFeature : : PERFCTR_NB ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 26 ) )
m_features | = CPUFeature : : DBX ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 27 ) )
m_features | = CPUFeature : : PERFTSC ;
if ( extended_processor_info . ecx ( ) & ( 1 < < 28 ) )
m_features | = CPUFeature : : PCX_L2I ;
2022-03-27 11:48:54 +00:00
if ( extended_processor_info . edx ( ) & ( 1 < < 11 ) )
m_features | = CPUFeature : : SYSCALL ; // Only available in 64 bit mode
2022-03-27 14:36:47 +00:00
if ( extended_processor_info . edx ( ) & ( 1 < < 19 ) )
m_features | = CPUFeature : : MP ;
2021-06-21 15:34:09 +00:00
if ( extended_processor_info . edx ( ) & ( 1 < < 20 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : NX ;
2022-03-27 14:36:47 +00:00
if ( extended_processor_info . edx ( ) & ( 1 < < 22 ) )
m_features | = CPUFeature : : MMXEXT ;
if ( extended_processor_info . edx ( ) & ( 1 < < 23 ) )
m_features | = CPUFeature : : RDTSCP ;
if ( extended_processor_info . edx ( ) & ( 1 < < 25 ) )
m_features | = CPUFeature : : FXSR_OPT ;
if ( extended_processor_info . edx ( ) & ( 1 < < 26 ) )
m_features | = CPUFeature : : PDPE1GB ;
2021-06-21 15:34:09 +00:00
if ( extended_processor_info . edx ( ) & ( 1 < < 27 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : RDTSCP ;
2021-06-26 02:21:51 +00:00
if ( extended_processor_info . edx ( ) & ( 1 < < 29 ) )
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : LM ;
2022-03-27 14:36:47 +00:00
if ( extended_processor_info . edx ( ) & ( 1 < < 30 ) )
m_features | = CPUFeature : : _3DNOWEXT ;
if ( extended_processor_info . edx ( ) & ( 1 < < 31 ) )
m_features | = CPUFeature : : _3DNOW ;
2021-06-21 15:34:09 +00:00
}
if ( max_extended_leaf > = 0x80000007 ) {
CPUID cpuid ( 0x80000007 ) ;
if ( cpuid . edx ( ) & ( 1 < < 8 ) ) {
2022-03-22 18:12:42 +00:00
m_features | = CPUFeature : : CONSTANT_TSC ;
m_features | = CPUFeature : : NONSTOP_TSC ;
2021-06-21 15:34:09 +00:00
}
}
if ( max_extended_leaf > = 0x80000008 ) {
// CPUID.80000008H:EAX[7:0] reports the physical-address width supported by the processor.
CPUID cpuid ( 0x80000008 ) ;
m_physical_address_bit_width = cpuid . eax ( ) & 0xff ;
2021-10-04 20:41:10 +00:00
// CPUID.80000008H:EAX[15:8] reports the linear-address width supported by the processor.
m_virtual_address_bit_width = ( cpuid . eax ( ) > > 8 ) & 0xff ;
2021-06-21 15:34:09 +00:00
} else {
// For processors that do not support CPUID function 80000008H, the width is generally 36 if CPUID.01H:EDX.PAE [bit 6] = 1 and 32 otherwise.
m_physical_address_bit_width = has_feature ( CPUFeature : : PAE ) ? 36 : 32 ;
2021-10-04 20:41:10 +00:00
// Processors that do not support CPUID function 80000008H, support a linear-address width of 32.
m_virtual_address_bit_width = 32 ;
2022-02-11 00:45:34 +00:00
// Workaround QEMU hypervisor.framework bug
// https://gitlab.com/qemu-project/qemu/-/issues/664
//
// We detect this as follows:
// * We're in a hypervisor
// * hypervisor_leaf_range is null under Hypervisor.framework
// * m_physical_address_bit_width is 36 bits
if ( has_feature ( CPUFeature : : HYPERVISOR ) ) {
CPUID hypervisor_leaf_range ( 0x40000000 ) ;
if ( ! hypervisor_leaf_range . ebx ( ) & & m_physical_address_bit_width = = 36 ) {
2024-04-22 10:30:09 +00:00
m_has_qemu_hvf_quirk . set ( ) ;
2022-02-11 00:45:34 +00:00
m_virtual_address_bit_width = 48 ;
}
}
2021-06-21 15:34:09 +00:00
}
}
UNMAP_AFTER_INIT void Processor : : cpu_setup ( )
{
// NOTE: This is called during Processor::early_initialize, we cannot
// safely log at this point because we don't have kmalloc
// initialized yet!
cpu_detect ( ) ;
if ( has_feature ( CPUFeature : : SSE ) ) {
// enter_thread_context() assumes that if a x86 CPU supports SSE then it also supports FXSR.
// SSE support without FXSR is an extremely unlikely scenario, so let's be pragmatic about it.
VERIFY ( has_feature ( CPUFeature : : FXSR ) ) ;
sse_init ( ) ;
}
write_cr0 ( read_cr0 ( ) | 0x00010000 ) ;
if ( has_feature ( CPUFeature : : PGE ) ) {
// Turn on CR4.PGE so the CPU will respect the G bit in page tables.
write_cr4 ( read_cr4 ( ) | 0x80 ) ;
}
if ( has_feature ( CPUFeature : : NX ) ) {
// Turn on IA32_EFER.NXE
2021-09-10 18:38:34 +00:00
MSR ia32_efer ( MSR_IA32_EFER ) ;
ia32_efer . set ( ia32_efer . get ( ) | 0x800 ) ;
2021-06-21 15:34:09 +00:00
}
2022-01-26 00:35:34 +00:00
if ( has_feature ( CPUFeature : : PAT ) ) {
MSR ia32_pat ( MSR_IA32_PAT ) ;
// Set PA4 to Write Comine. This allows us to
// use this mode by only setting the bit in the PTE
// and leaving all other bits in the upper levels unset,
// which maps to setting bit 3 of the index, resulting
// in the index value 0 or 4.
u64 pat = ia32_pat . get ( ) & ~ ( 0x7ull < < 32 ) ;
pat | = 0x1ull < < 32 ; // set WC mode for PA4
ia32_pat . set ( pat ) ;
}
2021-06-21 15:34:09 +00:00
if ( has_feature ( CPUFeature : : SMEP ) ) {
// Turn on CR4.SMEP
write_cr4 ( read_cr4 ( ) | 0x100000 ) ;
}
if ( has_feature ( CPUFeature : : SMAP ) ) {
// Turn on CR4.SMAP
write_cr4 ( read_cr4 ( ) | 0x200000 ) ;
}
if ( has_feature ( CPUFeature : : UMIP ) ) {
write_cr4 ( read_cr4 ( ) | 0x800 ) ;
}
if ( has_feature ( CPUFeature : : XSAVE ) ) {
// Turn on CR4.OSXSAVE
write_cr4 ( read_cr4 ( ) | 0x40000 ) ;
// According to the Intel manual: "After reset, all bits (except bit 0) in XCR0 are cleared to zero; XCR0[0] is set to 1."
// Sadly we can't trust this, for example VirtualBox starts with bits 0-4 set, so let's do it ourselves.
write_xcr0 ( 0x1 ) ;
if ( has_feature ( CPUFeature : : AVX ) ) {
// Turn on SSE, AVX and x87 flags
2022-04-25 13:09:57 +00:00
write_xcr0 ( read_xcr0 ( ) | SIMD : : StateComponent : : AVX | SIMD : : StateComponent : : SSE | SIMD : : StateComponent : : X87 ) ;
2021-06-21 15:34:09 +00:00
}
}
2021-07-23 20:52:25 +00:00
2021-12-29 23:27:44 +00:00
// x86_64 processors must support the syscall feature.
2021-07-23 20:52:25 +00:00
VERIFY ( has_feature ( CPUFeature : : SYSCALL ) ) ;
MSR efer_msr ( MSR_EFER ) ;
efer_msr . set ( efer_msr . get ( ) | 1u ) ;
// Write code and stack selectors to the STAR MSR. The first value stored in bits 63:48 controls the sysret CS (value + 0x10) and SS (value + 0x8),
// and the value stored in bits 47:32 controls the syscall CS (value) and SS (value + 0x8).
u64 star = 0 ;
star | = 0x13ul < < 48u ;
star | = 0x08ul < < 32u ;
MSR star_msr ( MSR_STAR ) ;
star_msr . set ( star ) ;
2021-12-29 23:27:44 +00:00
// Write the syscall entry point to the LSTAR MSR.
2021-07-23 20:52:25 +00:00
MSR lstar_msr ( MSR_LSTAR ) ;
lstar_msr . set ( reinterpret_cast < u64 > ( & syscall_entry ) ) ;
2021-12-29 23:27:44 +00:00
// Write the SFMASK MSR. This MSR controls which bits of rflags are masked when a syscall instruction is executed -
// if a bit is set in sfmask, the corresponding bit in rflags is cleared. The value set here clears most of rflags,
// but keeps the reserved and virtualization bits intact. The userspace rflags value is saved in r11 by syscall.
constexpr u64 rflags_mask = 0x257fd5u ;
MSR sfmask_msr ( MSR_SFMASK ) ;
sfmask_msr . set ( rflags_mask ) ;
2022-06-21 21:03:10 +00:00
if ( has_feature ( CPUFeature : : FSGSBASE ) ) {
// Turn off CR4.FSGSBASE to ensure the current Processor base kernel address is not leaked via
// the RDGSBASE instruction until we implement proper GS swapping at the userspace/kernel boundaries
write_cr4 ( read_cr4 ( ) & ~ 0x10000 ) ;
}
2022-04-08 17:25:38 +00:00
// Query OS-enabled CPUID features again, and set the flags if needed.
CPUID processor_info ( 0x1 ) ;
if ( processor_info . ecx ( ) & ( 1 < < 27 ) )
m_features | = CPUFeature : : OSXSAVE ;
CPUID extended_features ( 0x7 ) ;
if ( extended_features . ecx ( ) & ( 1 < < 4 ) )
m_features | = CPUFeature : : OSPKE ;
2021-06-21 15:34:09 +00:00
}
2023-09-18 19:45:14 +00:00
template < typename T >
UNMAP_AFTER_INIT void ProcessorBase < T > : : early_initialize ( u32 cpu )
2021-06-21 15:34:09 +00:00
{
2023-09-18 19:45:14 +00:00
m_self = static_cast < Processor * > ( this ) ;
auto self = static_cast < Processor * > ( this ) ;
2021-06-21 15:34:09 +00:00
m_cpu = cpu ;
m_in_irq = 0 ;
m_in_critical = 0 ;
m_invoke_scheduler_async = false ;
2021-08-29 17:31:22 +00:00
m_in_scheduler = true ;
2021-06-21 15:34:09 +00:00
2023-09-18 19:45:14 +00:00
self - > m_message_queue = nullptr ;
2021-06-21 15:34:09 +00:00
m_idle_thread = nullptr ;
m_current_thread = nullptr ;
2023-09-18 19:45:14 +00:00
self - > m_info = nullptr ;
2021-06-21 15:34:09 +00:00
m_halt_requested = false ;
if ( cpu = = 0 ) {
s_smp_enabled = false ;
2021-06-24 08:03:07 +00:00
g_total_processors . store ( 1u , AK : : MemoryOrder : : memory_order_release ) ;
2021-06-21 15:34:09 +00:00
} else {
2021-06-24 08:03:07 +00:00
g_total_processors . fetch_add ( 1u , AK : : MemoryOrder : : memory_order_acq_rel ) ;
2021-06-21 15:34:09 +00:00
}
2023-02-22 22:32:00 +00:00
m_deferred_call_pool . init ( ) ;
2021-06-21 15:34:09 +00:00
2023-09-18 19:45:14 +00:00
self - > cpu_setup ( ) ;
self - > gdt_init ( ) ;
2021-06-21 15:34:09 +00:00
2023-09-18 19:45:14 +00:00
VERIFY ( is_initialized ( ) ) ;
2021-06-21 15:34:09 +00:00
VERIFY ( & current ( ) = = this ) ; // sanity check
}
2023-09-18 19:45:14 +00:00
template < typename T >
UNMAP_AFTER_INIT void ProcessorBase < T > : : initialize ( u32 cpu )
2021-06-21 15:34:09 +00:00
{
VERIFY ( m_self = = this ) ;
VERIFY ( & current ( ) = = this ) ; // sanity check
2023-09-18 19:45:14 +00:00
auto self = static_cast < Processor * > ( this ) ;
2022-04-03 15:47:38 +00:00
2023-09-18 19:45:14 +00:00
self - > m_info = new ProcessorInfo ( * self ) ;
dmesgln ( " CPU[{}]: Supported features: {} " , current_id ( ) , self - > m_info - > features_string ( ) ) ;
2021-06-21 15:34:09 +00:00
if ( ! has_feature ( CPUFeature : : RDRAND ) )
2021-08-22 10:37:50 +00:00
dmesgln ( " CPU[{}]: No RDRAND support detected, randomness will be poor " , current_id ( ) ) ;
dmesgln ( " CPU[{}]: Physical address bit width: {} " , current_id ( ) , m_physical_address_bit_width ) ;
2021-10-04 20:41:10 +00:00
dmesgln ( " CPU[{}]: Virtual address bit width: {} " , current_id ( ) , m_virtual_address_bit_width ) ;
2024-04-22 10:30:09 +00:00
if ( self - > m_has_qemu_hvf_quirk . was_set ( ) )
2022-02-11 00:45:34 +00:00
dmesgln ( " CPU[{}]: Applied correction for QEMU Hypervisor.framework quirk " , current_id ( ) ) ;
2021-06-21 15:34:09 +00:00
if ( cpu = = 0 )
2022-05-17 09:07:02 +00:00
initialize_interrupts ( ) ;
2021-06-21 15:34:09 +00:00
else
flush_idt ( ) ;
if ( cpu = = 0 ) {
VERIFY ( ( FlatPtr ( & s_clean_fpu_state ) & 0xF ) = = 0 ) ;
asm volatile ( " fninit " ) ;
2022-04-25 13:09:57 +00:00
// Initialize AVX state
if ( has_feature ( CPUFeature : : XSAVE | CPUFeature : : AVX ) ) {
asm volatile ( " xsave %0 \n "
: " =m " ( s_clean_fpu_state )
: " a " ( static_cast < u32 > ( SIMD : : StateComponent : : AVX | SIMD : : StateComponent : : SSE | SIMD : : StateComponent : : X87 ) ) , " d " ( 0u ) ) ;
} else if ( has_feature ( CPUFeature : : FXSR ) ) {
2021-06-21 15:34:09 +00:00
asm volatile ( " fxsave %0 "
: " =m " ( s_clean_fpu_state ) ) ;
2022-04-25 13:09:57 +00:00
} else {
2021-06-21 15:34:09 +00:00
asm volatile ( " fnsave %0 "
: " =m " ( s_clean_fpu_state ) ) ;
2022-04-25 13:09:57 +00:00
}
2021-07-11 17:45:11 +00:00
if ( has_feature ( CPUFeature : : HYPERVISOR ) )
2023-09-18 19:45:14 +00:00
self - > detect_hypervisor ( ) ;
2021-06-21 15:34:09 +00:00
}
{
// We need to prevent races between APs starting up at the same time
VERIFY ( cpu < s_processors . size ( ) ) ;
2023-09-18 19:45:14 +00:00
s_processors [ cpu ] = static_cast < Processor * > ( this ) ;
2021-06-21 15:34:09 +00:00
}
}
2021-07-11 17:45:11 +00:00
UNMAP_AFTER_INIT void Processor : : detect_hypervisor ( )
{
CPUID hypervisor_leaf_range ( 0x40000000 ) ;
2022-04-03 15:58:36 +00:00
auto hypervisor_vendor_id_string = m_info - > hypervisor_vendor_id_string ( ) ;
2022-04-03 15:59:44 +00:00
dmesgln ( " CPU[{}]: CPUID hypervisor signature '{}', max leaf {:#x} " , current_id ( ) , hypervisor_vendor_id_string , hypervisor_leaf_range . eax ( ) ) ;
2021-07-11 17:45:11 +00:00
2022-04-03 15:58:36 +00:00
if ( hypervisor_vendor_id_string = = " Microsoft Hv " sv )
2021-07-11 20:57:09 +00:00
detect_hypervisor_hyperv ( hypervisor_leaf_range ) ;
}
UNMAP_AFTER_INIT void Processor : : detect_hypervisor_hyperv ( CPUID const & hypervisor_leaf_range )
{
if ( hypervisor_leaf_range . eax ( ) < 0x40000001 )
return ;
CPUID hypervisor_interface ( 0x40000001 ) ;
// Get signature of hypervisor interface.
alignas ( sizeof ( u32 ) ) char interface_signature_buffer [ 5 ] ;
* reinterpret_cast < u32 * > ( interface_signature_buffer ) = hypervisor_interface . eax ( ) ;
interface_signature_buffer [ 4 ] = ' \0 ' ;
2022-07-11 19:53:29 +00:00
StringView hyperv_interface_signature { interface_signature_buffer , strlen ( interface_signature_buffer ) } ;
2021-07-11 20:57:09 +00:00
2021-08-22 10:37:50 +00:00
dmesgln ( " CPU[{}]: Hyper-V interface signature '{}' ({:#x}) " , current_id ( ) , hyperv_interface_signature , hypervisor_interface . eax ( ) ) ;
2021-07-11 20:57:09 +00:00
if ( hypervisor_leaf_range . eax ( ) < 0x40000001 )
return ;
CPUID hypervisor_sysid ( 0x40000002 ) ;
2021-08-22 10:37:50 +00:00
dmesgln ( " CPU[{}]: Hyper-V system identity {}.{}, build number {} " , current_id ( ) , hypervisor_sysid . ebx ( ) > > 16 , hypervisor_sysid . ebx ( ) & 0xFFFF , hypervisor_sysid . eax ( ) ) ;
2021-07-11 20:57:09 +00:00
if ( hypervisor_leaf_range . eax ( ) < 0x40000005 | | hyperv_interface_signature ! = " Hv#1 " sv )
return ;
2021-08-22 10:37:50 +00:00
dmesgln ( " CPU[{}]: Hyper-V hypervisor detected " , current_id ( ) ) ;
2021-07-11 20:57:09 +00:00
// TODO: Actually do something with Hyper-V.
2021-07-11 17:45:11 +00:00
}
2021-06-21 15:34:09 +00:00
void Processor : : write_raw_gdt_entry ( u16 selector , u32 low , u32 high )
{
u16 i = ( selector & 0xfffc ) > > 3 ;
u32 prev_gdt_length = m_gdt_length ;
2021-06-25 21:04:00 +00:00
if ( i > = m_gdt_length ) {
2021-06-21 15:34:09 +00:00
m_gdt_length = i + 1 ;
VERIFY ( m_gdt_length < = sizeof ( m_gdt ) / sizeof ( m_gdt [ 0 ] ) ) ;
m_gdtr . limit = ( m_gdt_length + 1 ) * 8 - 1 ;
}
m_gdt [ i ] . low = low ;
m_gdt [ i ] . high = high ;
// clear selectors we may have skipped
2022-01-03 18:41:59 +00:00
for ( auto j = prev_gdt_length ; j < i ; + + j ) {
m_gdt [ j ] . low = 0 ;
m_gdt [ j ] . high = 0 ;
2021-06-21 15:34:09 +00:00
}
}
void Processor : : write_gdt_entry ( u16 selector , Descriptor & descriptor )
{
write_raw_gdt_entry ( selector , descriptor . low , descriptor . high ) ;
}
Descriptor & Processor : : get_gdt_entry ( u16 selector )
{
u16 i = ( selector & 0xfffc ) > > 3 ;
return * ( Descriptor * ) ( & m_gdt [ i ] ) ;
}
void Processor : : flush_gdt ( )
{
m_gdtr . address = m_gdt ;
m_gdtr . limit = ( m_gdt_length * 8 ) - 1 ;
asm volatile ( " lgdt %0 " : : " m " ( m_gdtr )
: " memory " ) ;
}
2022-04-01 17:58:27 +00:00
DescriptorTablePointer const & Processor : : get_gdtr ( )
2021-06-21 15:34:09 +00:00
{
return m_gdtr ;
}
2023-09-18 19:45:14 +00:00
template < typename T >
ErrorOr < Vector < FlatPtr , 32 > > ProcessorBase < T > : : capture_stack_trace ( Thread & thread , size_t max_frames )
2021-06-21 15:34:09 +00:00
{
2021-06-29 08:47:31 +00:00
FlatPtr frame_ptr = 0 , ip = 0 ;
2021-06-21 15:34:09 +00:00
Vector < FlatPtr , 32 > stack_trace ;
2024-05-11 17:20:24 +00:00
auto walk_stack = [ & ] ( FlatPtr frame_ptr ) - > ErrorOr < void > {
2022-02-09 18:33:39 +00:00
constexpr size_t max_stack_frames = 4096 ;
2021-12-19 17:16:15 +00:00
bool is_walking_userspace_stack = false ;
2022-01-15 19:19:16 +00:00
TRY ( stack_trace . try_append ( ip ) ) ;
2024-05-11 17:20:24 +00:00
TRY ( AK : : unwind_stack_from_frame_pointer (
frame_ptr ,
[ & is_walking_userspace_stack ] ( FlatPtr address ) - > ErrorOr < FlatPtr > {
if ( ! Memory : : is_user_address ( VirtualAddress { address } ) ) {
if ( is_walking_userspace_stack ) {
dbgln ( " SHENANIGANS! Userspace stack points back into kernel memory " ) ;
return EFAULT ;
}
} else {
is_walking_userspace_stack = true ;
2021-12-19 17:16:15 +00:00
}
2024-05-11 17:20:24 +00:00
FlatPtr value ;
if ( Memory : : is_user_range ( VirtualAddress { address } , sizeof ( FlatPtr ) ) ) {
TRY ( copy_from_user ( & value , bit_cast < FlatPtr * > ( address ) ) ) ;
} else {
void * fault_at ;
if ( ! safe_memcpy ( & value , bit_cast < FlatPtr * > ( address ) , sizeof ( FlatPtr ) , fault_at ) )
return EFAULT ;
}
return value ;
} ,
[ & stack_trace , max_frames ] ( AK : : StackFrame stack_frame ) - > ErrorOr < IterationDecision > {
if ( stack_trace . size ( ) > = max_stack_frames | | ( max_frames ! = 0 & & stack_trace . size ( ) > = max_frames ) )
return IterationDecision : : Break ;
TRY ( stack_trace . try_append ( stack_frame . return_address ) ) ;
return IterationDecision : : Continue ;
} ) ) ;
2022-01-15 19:19:16 +00:00
return { } ;
2021-06-21 15:34:09 +00:00
} ;
2024-05-11 17:20:24 +00:00
2021-06-21 15:34:09 +00:00
auto capture_current_thread = [ & ] ( ) {
frame_ptr = ( FlatPtr ) __builtin_frame_address ( 0 ) ;
2021-06-29 08:47:31 +00:00
ip = ( FlatPtr ) __builtin_return_address ( 0 ) ;
2021-06-21 15:34:09 +00:00
2022-01-15 19:19:16 +00:00
return walk_stack ( frame_ptr ) ;
2021-06-21 15:34:09 +00:00
} ;
// Since the thread may be running on another processor, there
// is a chance a context switch may happen while we're trying
// to get it. It also won't be entirely accurate and merely
// reflect the status at the last context switch.
2021-08-21 23:49:22 +00:00
SpinlockLocker lock ( g_scheduler_lock ) ;
2021-06-21 15:34:09 +00:00
if ( & thread = = Processor : : current_thread ( ) ) {
2022-01-30 10:38:50 +00:00
VERIFY ( thread . state ( ) = = Thread : : State : : Running ) ;
2021-06-21 15:34:09 +00:00
// Leave the scheduler lock. If we trigger page faults we may
// need to be preempted. Since this is our own thread it won't
// cause any problems as the stack won't change below this frame.
lock . unlock ( ) ;
2022-01-15 19:19:16 +00:00
TRY ( capture_current_thread ( ) ) ;
2021-06-21 15:34:09 +00:00
} else if ( thread . is_active ( ) ) {
2021-08-22 10:37:50 +00:00
VERIFY ( thread . cpu ( ) ! = Processor : : current_id ( ) ) ;
2021-06-21 15:34:09 +00:00
// If this is the case, the thread is currently running
// on another processor. We can't trust the kernel stack as
// it may be changing at any time. We need to probably send
// an IPI to that processor, have it walk the stack and wait
// until it returns the data back to us
auto & proc = Processor : : current ( ) ;
2022-01-15 19:19:16 +00:00
ErrorOr < void > result ;
2023-09-18 19:45:14 +00:00
Processor : : smp_unicast (
2021-06-21 15:34:09 +00:00
thread . cpu ( ) ,
[ & ] ( ) {
2021-08-22 10:37:50 +00:00
dbgln ( " CPU[{}] getting stack for cpu #{} " , Processor : : current_id ( ) , proc . id ( ) ) ;
2021-09-06 15:22:36 +00:00
ScopedAddressSpaceSwitcher switcher ( thread . process ( ) ) ;
2021-06-21 15:34:09 +00:00
VERIFY ( & Processor : : current ( ) ! = & proc ) ;
VERIFY ( & thread = = Processor : : current_thread ( ) ) ;
// NOTE: Because the other processor is still holding the
// scheduler lock while waiting for this callback to finish,
// the current thread on the target processor cannot change
// TODO: What to do about page faults here? We might deadlock
// because the other processor is still holding the
// scheduler lock...
2022-01-15 19:19:16 +00:00
result = capture_current_thread ( ) ;
2021-06-21 15:34:09 +00:00
} ,
false ) ;
2022-01-15 19:19:16 +00:00
TRY ( result ) ;
2021-06-21 15:34:09 +00:00
} else {
switch ( thread . state ( ) ) {
2022-01-30 10:38:50 +00:00
case Thread : : State : : Running :
2021-06-21 15:34:09 +00:00
VERIFY_NOT_REACHED ( ) ; // should have been handled above
2022-01-30 10:38:50 +00:00
case Thread : : State : : Runnable :
case Thread : : State : : Stopped :
case Thread : : State : : Blocked :
case Thread : : State : : Dying :
case Thread : : State : : Dead : {
2021-09-06 15:22:36 +00:00
ScopedAddressSpaceSwitcher switcher ( thread . process ( ) ) ;
2021-06-26 17:57:16 +00:00
auto & regs = thread . regs ( ) ;
2021-07-18 23:50:08 +00:00
ip = regs . ip ( ) ;
2023-04-17 13:13:39 +00:00
frame_ptr = regs . rbp ;
2021-07-18 23:50:08 +00:00
2021-06-21 15:34:09 +00:00
// TODO: We need to leave the scheduler lock here, but we also
// need to prevent the target thread from being run while
// we walk the stack
lock . unlock ( ) ;
2022-01-15 19:19:16 +00:00
TRY ( walk_stack ( frame_ptr ) ) ;
2021-06-21 15:34:09 +00:00
break ;
}
default :
dbgln ( " Cannot capture stack trace for thread {} in state {} " , thread , thread . state_string ( ) ) ;
break ;
}
}
return stack_trace ;
}
ProcessorContainer & Processor : : processors ( )
{
return s_processors ;
}
2023-09-18 19:45:14 +00:00
template < typename T >
Processor & ProcessorBase < T > : : by_id ( u32 id )
2021-09-04 03:27:57 +00:00
{
return * s_processors [ id ] ;
}
2023-09-18 19:45:14 +00:00
template < typename T >
void ProcessorBase < T > : : exit_trap ( TrapFrame & trap )
2021-06-21 15:34:09 +00:00
{
VERIFY_INTERRUPTS_DISABLED ( ) ;
VERIFY ( & Processor : : current ( ) = = this ) ;
2023-01-08 15:16:08 +00:00
2023-09-18 19:45:14 +00:00
auto * self = static_cast < Processor * > ( this ) ;
2021-08-08 20:22:38 +00:00
// Temporarily enter a critical section. This is to prevent critical
// sections entered and left within e.g. smp_process_pending_messages
// to trigger a context switch while we're executing this function
// See the comment at the end of the function why we don't use
// ScopedCritical here.
2021-08-09 23:16:08 +00:00
m_in_critical = m_in_critical + 1 ;
2021-08-08 20:22:38 +00:00
2021-06-21 15:34:09 +00:00
VERIFY ( m_in_irq > = trap . prev_irq_level ) ;
m_in_irq = trap . prev_irq_level ;
2021-08-09 11:23:05 +00:00
if ( s_smp_enabled )
2023-09-18 19:45:14 +00:00
self - > smp_process_pending_messages ( ) ;
2021-06-21 15:34:09 +00:00
2021-08-09 11:26:45 +00:00
// Process the deferred call queue. Among other things, this ensures
// that any pending thread unblocks happen before we enter the scheduler.
2023-02-22 22:32:00 +00:00
m_deferred_call_pool . execute_pending ( ) ;
2021-08-09 11:26:45 +00:00
2021-06-21 15:34:09 +00:00
auto * current_thread = Processor : : current_thread ( ) ;
if ( current_thread ) {
auto & current_trap = current_thread - > current_trap ( ) ;
current_trap = trap . next_trap ;
2023-01-08 15:16:08 +00:00
ExecutionMode new_previous_mode ;
2021-06-21 15:34:09 +00:00
if ( current_trap ) {
VERIFY ( current_trap - > regs ) ;
// If we have another higher level trap then we probably returned
2023-01-08 15:16:08 +00:00
// from an interrupt or irq handler.
new_previous_mode = current_trap - > regs - > previous_mode ( ) ;
2021-06-21 15:34:09 +00:00
} else {
// If we don't have a higher level trap then we're back in user mode.
2021-07-15 03:46:32 +00:00
// Which means that the previous mode prior to being back in user mode was kernel mode
2023-01-08 15:16:08 +00:00
new_previous_mode = ExecutionMode : : Kernel ;
2021-06-21 15:34:09 +00:00
}
2021-07-15 03:46:32 +00:00
if ( current_thread - > set_previous_mode ( new_previous_mode ) )
2022-10-10 14:36:18 +00:00
current_thread - > update_time_scheduled ( TimeManagement : : scheduler_current_time ( ) , true , false ) ;
2021-06-21 15:34:09 +00:00
}
2021-07-15 03:46:32 +00:00
2021-08-09 23:16:08 +00:00
VERIFY_INTERRUPTS_DISABLED ( ) ;
2021-08-08 20:22:38 +00:00
// Leave the critical section without actually enabling interrupts.
// We don't want context switches to happen until we're explicitly
// triggering a switch in check_invoke_scheduler.
2021-08-09 23:16:08 +00:00
m_in_critical = m_in_critical - 1 ;
if ( ! m_in_irq & & ! m_in_critical )
2021-07-15 03:46:32 +00:00
check_invoke_scheduler ( ) ;
2021-06-21 15:34:09 +00:00
}
2023-09-18 19:45:14 +00:00
template < typename T >
void ProcessorBase < T > : : flush_tlb_local ( VirtualAddress vaddr , size_t page_count )
2021-06-21 15:34:09 +00:00
{
auto ptr = vaddr . as_ptr ( ) ;
while ( page_count > 0 ) {
asm volatile ( " invlpg %0 "
2024-04-24 11:32:02 +00:00
:
: " m " ( * ptr )
: " memory " ) ;
2021-06-21 15:34:09 +00:00
ptr + = PAGE_SIZE ;
page_count - - ;
}
}
2023-09-18 19:45:14 +00:00
template < typename T >
void ProcessorBase < T > : : flush_entire_tlb_local ( )
{
write_cr3 ( read_cr3 ( ) ) ;
}
template < typename T >
void ProcessorBase < T > : : flush_tlb ( Memory : : PageDirectory const * page_directory , VirtualAddress vaddr , size_t page_count )
2021-06-21 15:34:09 +00:00
{
2021-08-19 19:45:07 +00:00
if ( s_smp_enabled & & ( ! Memory : : is_user_address ( vaddr ) | | Process : : current ( ) . thread_count ( ) > 1 ) )
2023-09-18 19:45:14 +00:00
Processor : : smp_broadcast_flush_tlb ( page_directory , vaddr , page_count ) ;
2021-06-21 15:34:09 +00:00
else
flush_tlb_local ( vaddr , page_count ) ;
}
void Processor : : smp_return_to_pool ( ProcessorMessage & msg )
{
ProcessorMessage * next = nullptr ;
2021-08-08 17:03:01 +00:00
for ( ; ; ) {
2021-06-21 15:34:09 +00:00
msg . next = next ;
2021-08-08 17:03:01 +00:00
if ( s_message_pool . compare_exchange_strong ( next , & msg , AK : : MemoryOrder : : memory_order_acq_rel ) )
break ;
Processor : : pause ( ) ;
}
2021-06-21 15:34:09 +00:00
}
ProcessorMessage & Processor : : smp_get_from_pool ( )
{
ProcessorMessage * msg ;
// The assumption is that messages are never removed from the pool!
for ( ; ; ) {
2021-06-24 08:03:07 +00:00
msg = s_message_pool . load ( AK : : MemoryOrder : : memory_order_consume ) ;
2021-06-21 15:34:09 +00:00
if ( ! msg ) {
if ( ! Processor : : current ( ) . smp_process_pending_messages ( ) ) {
2021-08-08 13:27:04 +00:00
Processor : : pause ( ) ;
2021-06-21 15:34:09 +00:00
}
continue ;
}
// If another processor were to use this message in the meanwhile,
// "msg" is still valid (because it never gets freed). We'd detect
// this because the expected value "msg" and pool would
// no longer match, and the compare_exchange will fail. But accessing
// "msg->next" is always safe here.
2021-06-24 08:03:07 +00:00
if ( s_message_pool . compare_exchange_strong ( msg , msg - > next , AK : : MemoryOrder : : memory_order_acq_rel ) ) {
2021-06-21 15:34:09 +00:00
// We successfully "popped" this available message
break ;
}
}
VERIFY ( msg ! = nullptr ) ;
return * msg ;
}
2023-09-18 19:45:14 +00:00
template < typename T >
u32 ProcessorBase < T > : : smp_wake_n_idle_processors ( u32 wake_count )
2021-06-21 15:34:09 +00:00
{
2021-08-09 23:56:21 +00:00
VERIFY_INTERRUPTS_DISABLED ( ) ;
2021-06-21 15:34:09 +00:00
VERIFY ( wake_count > 0 ) ;
if ( ! s_smp_enabled )
return 0 ;
// Wake at most N - 1 processors
if ( wake_count > = Processor : : count ( ) ) {
wake_count = Processor : : count ( ) - 1 ;
VERIFY ( wake_count > 0 ) ;
}
2021-08-22 10:37:50 +00:00
u32 current_id = Processor : : current_id ( ) ;
2021-06-21 15:34:09 +00:00
u32 did_wake_count = 0 ;
auto & apic = APIC : : the ( ) ;
while ( did_wake_count < wake_count ) {
// Try to get a set of idle CPUs and flip them to busy
2023-09-18 19:45:14 +00:00
u32 idle_mask = Processor : : s_idle_cpu_mask . load ( AK : : MemoryOrder : : memory_order_relaxed ) & ~ ( 1u < < current_id ) ;
2021-12-19 21:46:55 +00:00
u32 idle_count = popcount ( idle_mask ) ;
2021-06-21 15:34:09 +00:00
if ( idle_count = = 0 )
break ; // No (more) idle processor available
u32 found_mask = 0 ;
for ( u32 i = 0 ; i < idle_count ; i + + ) {
2021-12-19 21:46:55 +00:00
u32 cpu = bit_scan_forward ( idle_mask ) - 1 ;
2021-06-21 15:34:09 +00:00
idle_mask & = ~ ( 1u < < cpu ) ;
found_mask | = 1u < < cpu ;
}
2023-09-18 19:45:14 +00:00
idle_mask = Processor : : s_idle_cpu_mask . fetch_and ( ~ found_mask , AK : : MemoryOrder : : memory_order_acq_rel ) & found_mask ;
2021-06-21 15:34:09 +00:00
if ( idle_mask = = 0 )
continue ; // All of them were flipped to busy, try again
2021-12-19 21:46:55 +00:00
idle_count = popcount ( idle_mask ) ;
2021-06-21 15:34:09 +00:00
for ( u32 i = 0 ; i < idle_count ; i + + ) {
2021-12-19 21:46:55 +00:00
u32 cpu = bit_scan_forward ( idle_mask ) - 1 ;
2021-06-21 15:34:09 +00:00
idle_mask & = ~ ( 1u < < cpu ) ;
// Send an IPI to that CPU to wake it up. There is a possibility
// someone else woke it up as well, or that it woke up due to
// a timer interrupt. But we tried hard to avoid this...
apic . send_ipi ( cpu ) ;
did_wake_count + + ;
}
}
return did_wake_count ;
}
2023-09-18 19:45:14 +00:00
template < typename T >
UNMAP_AFTER_INIT void ProcessorBase < T > : : smp_enable ( )
2021-06-21 15:34:09 +00:00
{
size_t msg_pool_size = Processor : : count ( ) * 100u ;
size_t msg_entries_cnt = Processor : : count ( ) ;
auto msgs = new ProcessorMessage [ msg_pool_size ] ;
auto msg_entries = new ProcessorMessageEntry [ msg_pool_size * msg_entries_cnt ] ;
size_t msg_entry_i = 0 ;
for ( size_t i = 0 ; i < msg_pool_size ; i + + , msg_entry_i + = msg_entries_cnt ) {
auto & msg = msgs [ i ] ;
msg . next = i < msg_pool_size - 1 ? & msgs [ i + 1 ] : nullptr ;
msg . per_proc_entries = & msg_entries [ msg_entry_i ] ;
for ( size_t k = 0 ; k < msg_entries_cnt ; k + + )
msg_entries [ msg_entry_i + k ] . msg = & msg ;
}
2021-06-24 08:03:07 +00:00
s_message_pool . store ( & msgs [ 0 ] , AK : : MemoryOrder : : memory_order_release ) ;
2021-06-21 15:34:09 +00:00
// Start sending IPI messages
s_smp_enabled = true ;
}
void Processor : : smp_cleanup_message ( ProcessorMessage & msg )
{
switch ( msg . type ) {
case ProcessorMessage : : Callback :
msg . callback_value ( ) . ~ Function ( ) ;
break ;
default :
break ;
}
}
bool Processor : : smp_process_pending_messages ( )
{
2021-08-09 11:39:08 +00:00
VERIFY ( s_smp_enabled ) ;
2021-06-21 15:34:09 +00:00
bool did_process = false ;
2021-08-09 23:56:21 +00:00
enter_critical ( ) ;
2021-06-21 15:34:09 +00:00
2021-06-24 08:03:07 +00:00
if ( auto pending_msgs = m_message_queue . exchange ( nullptr , AK : : MemoryOrder : : memory_order_acq_rel ) ) {
2021-06-21 15:34:09 +00:00
// We pulled the stack of pending messages in LIFO order, so we need to reverse the list first
auto reverse_list =
[ ] ( ProcessorMessageEntry * list ) - > ProcessorMessageEntry * {
ProcessorMessageEntry * rev_list = nullptr ;
while ( list ) {
auto next = list - > next ;
list - > next = rev_list ;
rev_list = list ;
list = next ;
}
return rev_list ;
} ;
pending_msgs = reverse_list ( pending_msgs ) ;
// now process in the right order
ProcessorMessageEntry * next_msg ;
for ( auto cur_msg = pending_msgs ; cur_msg ; cur_msg = next_msg ) {
next_msg = cur_msg - > next ;
auto msg = cur_msg - > msg ;
2021-08-22 10:37:50 +00:00
dbgln_if ( SMP_DEBUG , " SMP[{}]: Processing message {} " , current_id ( ) , VirtualAddress ( msg ) ) ;
2021-06-21 15:34:09 +00:00
switch ( msg - > type ) {
case ProcessorMessage : : Callback :
msg - > invoke_callback ( ) ;
break ;
case ProcessorMessage : : FlushTlb :
2021-08-06 11:49:36 +00:00
if ( Memory : : is_user_address ( VirtualAddress ( msg - > flush_tlb . ptr ) ) ) {
2021-06-21 15:34:09 +00:00
// We assume that we don't cross into kernel land!
2021-08-06 11:49:36 +00:00
VERIFY ( Memory : : is_user_range ( VirtualAddress ( msg - > flush_tlb . ptr ) , msg - > flush_tlb . page_count * PAGE_SIZE ) ) ;
2021-06-21 15:34:09 +00:00
if ( read_cr3 ( ) ! = msg - > flush_tlb . page_directory - > cr3 ( ) ) {
// This processor isn't using this page directory right now, we can ignore this request
2021-08-22 10:37:50 +00:00
dbgln_if ( SMP_DEBUG , " SMP[{}]: No need to flush {} pages at {} " , current_id ( ) , msg - > flush_tlb . page_count , VirtualAddress ( msg - > flush_tlb . ptr ) ) ;
2021-06-21 15:34:09 +00:00
break ;
}
}
flush_tlb_local ( VirtualAddress ( msg - > flush_tlb . ptr ) , msg - > flush_tlb . page_count ) ;
break ;
}
bool is_async = msg - > async ; // Need to cache this value *before* dropping the ref count!
2021-06-24 08:03:07 +00:00
auto prev_refs = msg - > refs . fetch_sub ( 1u , AK : : MemoryOrder : : memory_order_acq_rel ) ;
2021-06-21 15:34:09 +00:00
VERIFY ( prev_refs ! = 0 ) ;
if ( prev_refs = = 1 ) {
// All processors handled this. If this is an async message,
// we need to clean it up and return it to the pool
if ( is_async ) {
smp_cleanup_message ( * msg ) ;
smp_return_to_pool ( * msg ) ;
}
}
if ( m_halt_requested . load ( AK : : MemoryOrder : : memory_order_relaxed ) )
halt_this ( ) ;
}
did_process = true ;
} else if ( m_halt_requested . load ( AK : : MemoryOrder : : memory_order_relaxed ) ) {
halt_this ( ) ;
}
2021-08-09 23:56:21 +00:00
leave_critical ( ) ;
2021-06-21 15:34:09 +00:00
return did_process ;
}
2021-08-08 14:59:11 +00:00
bool Processor : : smp_enqueue_message ( ProcessorMessage & msg )
2021-06-21 15:34:09 +00:00
{
// Note that it's quite possible that the other processor may pop
// the queue at any given time. We rely on the fact that the messages
// are pooled and never get freed!
2021-08-22 10:37:50 +00:00
auto & msg_entry = msg . per_proc_entries [ id ( ) ] ;
2021-06-21 15:34:09 +00:00
VERIFY ( msg_entry . msg = = & msg ) ;
ProcessorMessageEntry * next = nullptr ;
2021-08-08 14:59:39 +00:00
for ( ; ; ) {
2021-06-21 15:34:09 +00:00
msg_entry . next = next ;
2021-08-08 14:59:39 +00:00
if ( m_message_queue . compare_exchange_strong ( next , & msg_entry , AK : : MemoryOrder : : memory_order_acq_rel ) )
break ;
Processor : : pause ( ) ;
}
// If the enqueued message was the only message in the queue when posted,
// we return true. This is used by callers when deciding whether to generate an IPI.
2021-06-21 15:34:09 +00:00
return next = = nullptr ;
}
void Processor : : smp_broadcast_message ( ProcessorMessage & msg )
{
2021-08-22 10:37:50 +00:00
auto & current_processor = Processor : : current ( ) ;
2021-06-21 15:34:09 +00:00
2021-08-22 10:37:50 +00:00
dbgln_if ( SMP_DEBUG , " SMP[{}]: Broadcast message {} to cpus: {} processor: {} " , current_processor . id ( ) , VirtualAddress ( & msg ) , count ( ) , VirtualAddress ( & current_processor ) ) ;
2021-06-21 15:34:09 +00:00
2021-06-24 08:03:07 +00:00
msg . refs . store ( count ( ) - 1 , AK : : MemoryOrder : : memory_order_release ) ;
2021-06-21 15:34:09 +00:00
VERIFY ( msg . refs > 0 ) ;
bool need_broadcast = false ;
for_each (
[ & ] ( Processor & proc ) {
2021-08-22 10:37:50 +00:00
if ( & proc ! = & current_processor ) {
2021-08-08 14:59:11 +00:00
if ( proc . smp_enqueue_message ( msg ) )
2021-06-21 15:34:09 +00:00
need_broadcast = true ;
}
} ) ;
// Now trigger an IPI on all other APs (unless all targets already had messages queued)
if ( need_broadcast )
APIC : : the ( ) . broadcast_ipi ( ) ;
}
void Processor : : smp_broadcast_wait_sync ( ProcessorMessage & msg )
{
auto & cur_proc = Processor : : current ( ) ;
VERIFY ( ! msg . async ) ;
// If synchronous then we must cleanup and return the message back
// to the pool. Otherwise, the last processor to complete it will return it
2021-06-24 08:03:07 +00:00
while ( msg . refs . load ( AK : : MemoryOrder : : memory_order_consume ) ! = 0 ) {
2021-08-08 13:27:04 +00:00
Processor : : pause ( ) ;
2021-06-21 15:34:09 +00:00
// We need to process any messages that may have been sent to
// us while we're waiting. This also checks if another processor
// may have requested us to halt.
cur_proc . smp_process_pending_messages ( ) ;
}
smp_cleanup_message ( msg ) ;
smp_return_to_pool ( msg ) ;
}
void Processor : : smp_unicast_message ( u32 cpu , ProcessorMessage & msg , bool async )
{
2021-08-22 10:37:50 +00:00
auto & current_processor = Processor : : current ( ) ;
VERIFY ( cpu ! = current_processor . id ( ) ) ;
auto & target_processor = processors ( ) [ cpu ] ;
2021-06-21 15:34:09 +00:00
msg . async = async ;
2021-08-22 10:37:50 +00:00
dbgln_if ( SMP_DEBUG , " SMP[{}]: Send message {} to cpu #{} processor: {} " , current_processor . id ( ) , VirtualAddress ( & msg ) , cpu , VirtualAddress ( & target_processor ) ) ;
2021-06-21 15:34:09 +00:00
2021-06-24 08:03:07 +00:00
msg . refs . store ( 1u , AK : : MemoryOrder : : memory_order_release ) ;
2021-08-22 10:37:50 +00:00
if ( target_processor - > smp_enqueue_message ( msg ) ) {
2021-06-21 15:34:09 +00:00
APIC : : the ( ) . send_ipi ( cpu ) ;
}
if ( ! async ) {
// If synchronous then we must cleanup and return the message back
// to the pool. Otherwise, the last processor to complete it will return it
2021-06-24 08:03:07 +00:00
while ( msg . refs . load ( AK : : MemoryOrder : : memory_order_consume ) ! = 0 ) {
2021-08-08 13:27:04 +00:00
Processor : : pause ( ) ;
2021-06-21 15:34:09 +00:00
// We need to process any messages that may have been sent to
// us while we're waiting. This also checks if another processor
// may have requested us to halt.
2021-08-22 10:37:50 +00:00
current_processor . smp_process_pending_messages ( ) ;
2021-06-21 15:34:09 +00:00
}
smp_cleanup_message ( msg ) ;
smp_return_to_pool ( msg ) ;
}
}
void Processor : : smp_unicast ( u32 cpu , Function < void ( ) > callback , bool async )
{
auto & msg = smp_get_from_pool ( ) ;
msg . type = ProcessorMessage : : Callback ;
new ( msg . callback_storage ) ProcessorMessage : : CallbackFunction ( move ( callback ) ) ;
smp_unicast_message ( cpu , msg , async ) ;
}
2021-08-06 11:49:36 +00:00
void Processor : : smp_broadcast_flush_tlb ( Memory : : PageDirectory const * page_directory , VirtualAddress vaddr , size_t page_count )
2021-06-21 15:34:09 +00:00
{
auto & msg = smp_get_from_pool ( ) ;
msg . async = false ;
msg . type = ProcessorMessage : : FlushTlb ;
msg . flush_tlb . page_directory = page_directory ;
msg . flush_tlb . ptr = vaddr . as_ptr ( ) ;
msg . flush_tlb . page_count = page_count ;
smp_broadcast_message ( msg ) ;
// While the other processors handle this request, we'll flush ours
flush_tlb_local ( vaddr , page_count ) ;
// Now wait until everybody is done as well
smp_broadcast_wait_sync ( msg ) ;
}
void Processor : : smp_broadcast_halt ( )
{
// We don't want to use a message, because this could have been triggered
// by being out of memory and we might not be able to get a message
for_each (
[ & ] ( Processor & proc ) {
proc . m_halt_requested . store ( true , AK : : MemoryOrder : : memory_order_release ) ;
} ) ;
// Now trigger an IPI on all other APs
APIC : : the ( ) . broadcast_ipi ( ) ;
}
2023-09-18 19:45:14 +00:00
template < typename T >
void ProcessorBase < T > : : halt ( )
2021-06-21 15:34:09 +00:00
{
if ( s_smp_enabled )
2023-09-18 19:45:14 +00:00
Processor : : smp_broadcast_halt ( ) ;
2021-06-21 15:34:09 +00:00
halt_this ( ) ;
}
UNMAP_AFTER_INIT void Processor : : gdt_init ( )
{
m_gdt_length = 0 ;
m_gdtr . address = nullptr ;
m_gdtr . limit = 0 ;
write_raw_gdt_entry ( 0x0000 , 0x00000000 , 0x00000000 ) ;
2021-06-26 09:43:36 +00:00
write_raw_gdt_entry ( GDT_SELECTOR_CODE0 , 0x0000ffff , 0x00af9a00 ) ; // code0
2021-07-23 20:41:57 +00:00
write_raw_gdt_entry ( GDT_SELECTOR_DATA0 , 0x0000ffff , 0x00af9200 ) ; // data0
2021-06-28 15:03:08 +00:00
write_raw_gdt_entry ( GDT_SELECTOR_DATA3 , 0x0000ffff , 0x008ff200 ) ; // data3
2021-07-23 20:41:57 +00:00
write_raw_gdt_entry ( GDT_SELECTOR_CODE3 , 0x0000ffff , 0x00affa00 ) ; // code3
2021-06-21 15:34:09 +00:00
Descriptor tss_descriptor { } ;
2021-06-25 12:34:04 +00:00
tss_descriptor . set_base ( VirtualAddress { ( size_t ) & m_tss & 0xffffffff } ) ;
tss_descriptor . set_limit ( sizeof ( TSS ) - 1 ) ;
2021-06-21 15:34:09 +00:00
tss_descriptor . dpl = 0 ;
tss_descriptor . segment_present = 1 ;
tss_descriptor . granularity = 0 ;
tss_descriptor . operation_size64 = 0 ;
tss_descriptor . operation_size32 = 1 ;
tss_descriptor . descriptor_type = 0 ;
2022-01-02 15:51:17 +00:00
tss_descriptor . type = Descriptor : : SystemType : : AvailableTSS ;
2021-06-21 15:34:09 +00:00
write_gdt_entry ( GDT_SELECTOR_TSS , tss_descriptor ) ; // tss
2021-06-25 12:34:04 +00:00
Descriptor tss_descriptor_part2 { } ;
tss_descriptor_part2 . low = ( size_t ) & m_tss > > 32 ;
write_gdt_entry ( GDT_SELECTOR_TSS_PART2 , tss_descriptor_part2 ) ;
2021-06-21 15:34:09 +00:00
flush_gdt ( ) ;
load_task_register ( GDT_SELECTOR_TSS ) ;
2021-07-02 12:02:36 +00:00
MSR gs_base ( MSR_GS_BASE ) ;
2021-07-02 22:25:41 +00:00
gs_base . set ( ( u64 ) this ) ;
2021-06-21 15:34:09 +00:00
}
2021-06-27 11:59:41 +00:00
2023-09-18 19:45:14 +00:00
extern " C " void context_first_init ( Thread * from_thread , Thread * to_thread , [[maybe_unused]] TrapFrame * trap )
2021-06-27 11:59:41 +00:00
{
2023-09-18 19:45:14 +00:00
do_context_first_init ( from_thread , to_thread ) ;
2021-06-27 11:59:41 +00:00
}
2021-06-27 17:49:19 +00:00
extern " C " void enter_thread_context ( Thread * from_thread , Thread * to_thread )
{
2022-01-30 10:38:50 +00:00
VERIFY ( from_thread = = to_thread | | from_thread - > state ( ) ! = Thread : : State : : Running ) ;
VERIFY ( to_thread - > state ( ) = = Thread : : State : : Running ) ;
2021-06-27 17:49:19 +00:00
bool has_fxsr = Processor : : current ( ) . has_feature ( CPUFeature : : FXSR ) ;
2022-04-25 13:09:57 +00:00
bool has_xsave_avx_support = Processor : : current ( ) . has_feature ( CPUFeature : : XSAVE ) & & Processor : : current ( ) . has_feature ( CPUFeature : : AVX ) ;
2021-06-27 17:49:19 +00:00
Processor : : set_current_thread ( * to_thread ) ;
auto & from_regs = from_thread - > regs ( ) ;
auto & to_regs = to_thread - > regs ( ) ;
2022-01-30 13:17:46 +00:00
// NOTE: IOPL should never be non-zero in any situation, so let's panic immediately
// instead of carrying on with elevated I/O privileges.
VERIFY ( get_iopl_from_eflags ( to_regs . flags ( ) ) = = 0 ) ;
2022-04-25 13:09:57 +00:00
if ( has_xsave_avx_support ) {
// The specific state components saved correspond to the bits set in the requested-feature bitmap (RFBM), which is the logical-AND of EDX:EAX and XCR0.
// https://www.moritz.systems/blog/how-debuggers-work-getting-and-setting-x86-registers-part-2/
asm volatile ( " xsave %0 \n "
: " =m " ( from_thread - > fpu_state ( ) )
: " a " ( static_cast < u32 > ( SIMD : : StateComponent : : AVX | SIMD : : StateComponent : : SSE | SIMD : : StateComponent : : X87 ) ) , " d " ( 0u ) ) ;
} else if ( has_fxsr ) {
2021-06-27 17:49:19 +00:00
asm volatile ( " fxsave %0 "
: " =m " ( from_thread - > fpu_state ( ) ) ) ;
2022-04-25 13:09:57 +00:00
} else {
2021-06-27 17:49:19 +00:00
asm volatile ( " fnsave %0 "
: " =m " ( from_thread - > fpu_state ( ) ) ) ;
2022-04-25 13:09:57 +00:00
}
2021-06-27 17:49:19 +00:00
if ( from_thread - > process ( ) . is_traced ( ) )
read_debug_registers_into ( from_thread - > debug_register_state ( ) ) ;
if ( to_thread - > process ( ) . is_traced ( ) ) {
write_debug_registers_from ( to_thread - > debug_register_state ( ) ) ;
} else {
clear_debug_registers ( ) ;
}
auto & processor = Processor : : current ( ) ;
2024-04-13 08:36:53 +00:00
Processor : : set_fs_base ( to_thread - > arch_specific_data ( ) . fs_base ) ;
2021-06-27 17:49:19 +00:00
if ( from_regs . cr3 ! = to_regs . cr3 )
write_cr3 ( to_regs . cr3 ) ;
2021-08-22 10:37:50 +00:00
to_thread - > set_cpu ( processor . id ( ) ) ;
2021-08-08 20:22:38 +00:00
auto in_critical = to_thread - > saved_critical ( ) ;
VERIFY ( in_critical > 0 ) ;
2022-10-25 20:09:00 +00:00
Processor : : restore_critical ( in_critical ) ;
2021-06-27 17:49:19 +00:00
2022-04-25 13:09:57 +00:00
if ( has_xsave_avx_support )
asm volatile ( " xrstor %0 " : : " m " ( to_thread - > fpu_state ( ) ) , " a " ( static_cast < u32 > ( SIMD : : StateComponent : : AVX | SIMD : : StateComponent : : SSE | SIMD : : StateComponent : : X87 ) ) , " d " ( 0u ) ) ;
else if ( has_fxsr )
2021-06-27 17:49:19 +00:00
asm volatile ( " fxrstor %0 " : : " m " ( to_thread - > fpu_state ( ) ) ) ;
else
asm volatile ( " frstor %0 " : : " m " ( to_thread - > fpu_state ( ) ) ) ;
}
2024-04-08 00:52:13 +00:00
extern " C " NO_SANITIZE_COVERAGE FlatPtr do_init_context ( Thread * thread , u32 flags )
2021-06-27 17:49:19 +00:00
{
VERIFY_INTERRUPTS_DISABLED ( ) ;
2021-08-22 10:42:10 +00:00
thread - > regs ( ) . set_flags ( flags ) ;
2021-06-27 17:49:19 +00:00
return Processor : : current ( ) . init_context ( * thread , true ) ;
}
2021-06-29 00:56:07 +00:00
2023-09-18 19:45:14 +00:00
// FIXME: Share this code with other architectures.
template < typename T >
void ProcessorBase < T > : : assume_context ( Thread & thread , InterruptsState new_interrupts_state )
2021-06-29 00:56:07 +00:00
{
dbgln_if ( CONTEXT_SWITCH_DEBUG , " Assume context for thread {} {} " , VirtualAddress ( & thread ) , thread ) ;
VERIFY_INTERRUPTS_DISABLED ( ) ;
Scheduler : : prepare_after_exec ( ) ;
// in_critical() should be 2 here. The critical section in Process::exec
// and then the scheduler lock
2021-08-09 23:16:08 +00:00
VERIFY ( Processor : : in_critical ( ) = = 2 ) ;
2021-06-29 00:56:07 +00:00
2023-04-02 00:26:57 +00:00
u32 flags = 2 | ( new_interrupts_state = = InterruptsState : : Enabled ? 0x200 : 0 ) ;
2021-06-29 00:56:07 +00:00
do_assume_context ( & thread , flags ) ;
VERIFY_NOT_REACHED ( ) ;
}
2023-09-18 19:45:14 +00:00
template < typename T >
u32 ProcessorBase < T > : : clear_critical ( )
2022-08-17 17:21:07 +00:00
{
2022-08-17 17:35:14 +00:00
InterruptDisabler disabler ;
2022-08-17 17:21:07 +00:00
auto prev_critical = in_critical ( ) ;
write_gs_ptr ( __builtin_offsetof ( Processor , m_in_critical ) , 0 ) ;
auto & proc = current ( ) ;
if ( proc . m_in_irq = = 0 )
proc . check_invoke_scheduler ( ) ;
return prev_critical ;
}
2022-10-04 10:46:11 +00:00
NAKED void thread_context_first_enter ( void )
{
// enter_thread_context returns to here first time a thread is executing
asm (
// switch_context will have pushed from_thread and to_thread to our news
// stack prior to thread_context_first_enter() being called, and the
// pointer to TrapFrame was the top of the stack before that
" popq %rdi \n " // from_thread (argument 0)
" popq %rsi \n " // to_thread (argument 1)
" popq %rdx \n " // pointer to TrapFrame (argument 2)
" cld \n "
" call context_first_init \n "
" jmp common_trap_exit \n " ) ;
2023-07-08 02:48:11 +00:00
}
2022-10-04 10:46:11 +00:00
2024-04-08 00:52:13 +00:00
NAKED NO_SANITIZE_COVERAGE void do_assume_context ( Thread * , u32 )
2022-10-04 10:46:11 +00:00
{
// clang-format off
// FIXME: I hope (Thread* thread, u32 flags) aren't compiled away
asm (
" movq %rdi, %r12 \n " // save thread ptr
" movq %rsi, %r13 \n " // save flags
// We're going to call Processor::init_context, so just make sure
// we have enough stack space so we don't stomp over it
" subq $( " __STRINGIFY ( 16 + REGISTER_STATE_SIZE + TRAP_FRAME_SIZE + 8 ) " ), %rsp \n "
" cld \n "
" call do_init_context \n "
" movq %rax, %rsp \n " // move stack pointer to what Processor::init_context set up for us
" movq %r12, %rdi \n " // to_thread
" movq %r12, %rsi \n " // from_thread
" pushq %r12 \n " // to_thread (for thread_context_first_enter)
" pushq %r12 \n " // from_thread (for thread_context_first_enter)
" leaq thread_context_first_enter(%rip), %r12 \n " // should be same as regs.rip
" pushq %r12 \n "
" jmp enter_thread_context \n " ) ;
// clang-format on
}
2023-09-18 19:45:14 +00:00
template < typename T >
StringView ProcessorBase < T > : : platform_string ( )
2022-10-04 10:46:11 +00:00
{
return " x86_64 " sv ;
}
2023-09-18 19:45:14 +00:00
template < typename T >
FlatPtr ProcessorBase < T > : : init_context ( Thread & thread , bool leave_crit )
2022-10-04 10:46:11 +00:00
{
VERIFY ( is_kernel_mode ( ) ) ;
VERIFY ( g_scheduler_lock . is_locked ( ) ) ;
if ( leave_crit ) {
// Leave the critical section we set up in in Process::exec,
// but because we still have the scheduler lock we should end up with 1
VERIFY ( in_critical ( ) = = 2 ) ;
m_in_critical = 1 ; // leave it without triggering anything or restoring flags
}
u64 kernel_stack_top = thread . kernel_stack_top ( ) ;
// Add a random offset between 0-256 (16-byte aligned)
kernel_stack_top - = round_up_to_power_of_two ( get_fast_random < u8 > ( ) , 16 ) ;
u64 stack_top = kernel_stack_top ;
// TODO: handle NT?
VERIFY ( ( cpu_flags ( ) & 0x24000 ) = = 0 ) ; // Assume !(NT | VM)
auto & regs = thread . regs ( ) ;
bool return_to_user = ( regs . cs & 3 ) ! = 0 ;
stack_top - = 1 * sizeof ( u64 ) ;
* reinterpret_cast < u64 * > ( kernel_stack_top - 2 * sizeof ( u64 ) ) = FlatPtr ( & exit_kernel_thread ) ;
stack_top - = sizeof ( RegisterState ) ;
// we want to end up 16-byte aligned, %rsp + 8 should be aligned
stack_top - = sizeof ( u64 ) ;
* reinterpret_cast < u64 * > ( kernel_stack_top - sizeof ( u64 ) ) = 0 ;
// set up the stack so that after returning from thread_context_first_enter()
// we will end up either in kernel mode or user mode, depending on how the thread is set up
// However, the first step is to always start in kernel mode with thread_context_first_enter
RegisterState & iretframe = * reinterpret_cast < RegisterState * > ( stack_top ) ;
iretframe . rdi = regs . rdi ;
iretframe . rsi = regs . rsi ;
iretframe . rbp = regs . rbp ;
iretframe . rsp = 0 ;
iretframe . rbx = regs . rbx ;
iretframe . rdx = regs . rdx ;
iretframe . rcx = regs . rcx ;
iretframe . rax = regs . rax ;
iretframe . r8 = regs . r8 ;
iretframe . r9 = regs . r9 ;
iretframe . r10 = regs . r10 ;
iretframe . r11 = regs . r11 ;
iretframe . r12 = regs . r12 ;
iretframe . r13 = regs . r13 ;
iretframe . r14 = regs . r14 ;
iretframe . r15 = regs . r15 ;
iretframe . rflags = regs . rflags ;
iretframe . rip = regs . rip ;
iretframe . cs = regs . cs ;
if ( return_to_user ) {
iretframe . userspace_rsp = regs . rsp ;
iretframe . userspace_ss = GDT_SELECTOR_DATA3 | 3 ;
} else {
iretframe . userspace_rsp = kernel_stack_top ;
iretframe . userspace_ss = 0 ;
}
// make space for a trap frame
stack_top - = sizeof ( TrapFrame ) ;
TrapFrame & trap = * reinterpret_cast < TrapFrame * > ( stack_top ) ;
trap . regs = & iretframe ;
trap . prev_irq_level = 0 ;
trap . next_trap = nullptr ;
stack_top - = sizeof ( u64 ) ; // pointer to TrapFrame
* reinterpret_cast < u64 * > ( stack_top ) = stack_top + 8 ;
if constexpr ( CONTEXT_SWITCH_DEBUG ) {
if ( return_to_user ) {
dbgln ( " init_context {} ({}) set up to execute at rip={}:{}, rsp={}, stack_top={}, user_top={} " ,
thread ,
VirtualAddress ( & thread ) ,
iretframe . cs , regs . rip ,
VirtualAddress ( regs . rsp ) ,
VirtualAddress ( stack_top ) ,
iretframe . userspace_rsp ) ;
} else {
dbgln ( " init_context {} ({}) set up to execute at rip={}:{}, rsp={}, stack_top={} " ,
thread ,
VirtualAddress ( & thread ) ,
iretframe . cs , regs . rip ,
VirtualAddress ( regs . rsp ) ,
VirtualAddress ( stack_top ) ) ;
}
}
// make switch_context() always first return to thread_context_first_enter()
// in kernel mode, so set up these values so that we end up popping iretframe
// off the stack right after the context switch completed, at which point
// control is transferred to what iretframe is pointing to.
regs . rip = FlatPtr ( & thread_context_first_enter ) ;
regs . rsp0 = kernel_stack_top ;
regs . rsp = stack_top ;
regs . cs = GDT_SELECTOR_CODE0 ;
return stack_top ;
}
2023-09-18 19:45:14 +00:00
template < typename T >
void ProcessorBase < T > : : switch_context ( Thread * & from_thread , Thread * & to_thread )
2022-10-04 10:46:11 +00:00
{
VERIFY ( ! m_in_irq ) ;
VERIFY ( m_in_critical = = 1 ) ;
VERIFY ( is_kernel_mode ( ) ) ;
2023-09-18 19:45:14 +00:00
auto * self = static_cast < Processor * > ( this ) ;
2022-10-04 10:46:11 +00:00
dbgln_if ( CONTEXT_SWITCH_DEBUG , " switch_context --> switching out of: {} {} " , VirtualAddress ( from_thread ) , * from_thread ) ;
// m_in_critical is restored in enter_thread_context
from_thread - > save_critical ( m_in_critical ) ;
// clang-format off
// Switch to new thread context, passing from_thread and to_thread
// through to the new context using registers rdx and rax
asm volatile (
// NOTE: changing how much we push to the stack affects thread_context_first_enter()!
" pushfq \n "
" pushq %%rbx \n "
" pushq %%rcx \n "
" pushq %%rbp \n "
" pushq %%rsi \n "
" pushq %%rdi \n "
" pushq %%r8 \n "
" pushq %%r9 \n "
" pushq %%r10 \n "
" pushq %%r11 \n "
" pushq %%r12 \n "
" pushq %%r13 \n "
" pushq %%r14 \n "
" pushq %%r15 \n "
" movq %%rsp, %[from_rsp] \n "
" leaq 1f(%%rip), %%rbx \n "
" movq %%rbx, %[from_rip] \n "
" movq %[to_rsp0], %%rbx \n "
" movl %%ebx, %[tss_rsp0l] \n "
" shrq $32, %%rbx \n "
" movl %%ebx, %[tss_rsp0h] \n "
" movq %[to_rsp], %%rsp \n "
2023-04-17 13:10:40 +00:00
" movq %%rbp, %[from_rbp] \n "
2022-10-04 10:46:11 +00:00
" pushq %[to_thread] \n "
" pushq %[from_thread] \n "
" pushq %[to_rip] \n "
" cld \n "
" movq 16(%%rsp), %%rsi \n "
" movq 8(%%rsp), %%rdi \n "
" jmp enter_thread_context \n "
" 1: \n "
" popq %%rdx \n "
" popq %%rax \n "
" popq %%r15 \n "
" popq %%r14 \n "
" popq %%r13 \n "
" popq %%r12 \n "
" popq %%r11 \n "
" popq %%r10 \n "
" popq %%r9 \n "
" popq %%r8 \n "
" popq %%rdi \n "
" popq %%rsi \n "
" popq %%rbp \n "
" popq %%rcx \n "
" popq %%rbx \n "
" popfq \n "
: [ from_rsp ] " =m " ( from_thread - > regs ( ) . rsp ) ,
2023-04-17 13:10:40 +00:00
[ from_rbp ] " =m " ( from_thread - > regs ( ) . rbp ) ,
2022-10-04 10:46:11 +00:00
[ from_rip ] " =m " ( from_thread - > regs ( ) . rip ) ,
2023-09-18 19:45:14 +00:00
[ tss_rsp0l ] " =m " ( self - > m_tss . rsp0l ) ,
[ tss_rsp0h ] " =m " ( self - > m_tss . rsp0h ) ,
2022-10-04 10:46:11 +00:00
" =d " ( from_thread ) , // needed so that from_thread retains the correct value
" =a " ( to_thread ) // needed so that to_thread retains the correct value
: [ to_rsp ] " g " ( to_thread - > regs ( ) . rsp ) ,
[ to_rsp0 ] " g " ( to_thread - > regs ( ) . rsp0 ) ,
[ to_rip ] " c " ( to_thread - > regs ( ) . rip ) ,
[ from_thread ] " d " ( from_thread ) ,
[ to_thread ] " a " ( to_thread )
: " memory " , " rbx "
) ;
// clang-format on
dbgln_if ( CONTEXT_SWITCH_DEBUG , " switch_context <-- from {} {} to {} {} " , VirtualAddress ( from_thread ) , * from_thread , VirtualAddress ( to_thread ) , * to_thread ) ;
}
2023-09-18 19:45:14 +00:00
template < typename T >
UNMAP_AFTER_INIT void ProcessorBase < T > : : initialize_context_switching ( Thread & initial_thread )
2022-10-04 10:46:11 +00:00
{
VERIFY ( initial_thread . process ( ) . is_kernel_process ( ) ) ;
2023-09-18 19:45:14 +00:00
auto * self = static_cast < Processor * > ( this ) ;
2022-10-04 10:46:11 +00:00
auto & regs = initial_thread . regs ( ) ;
2023-09-18 19:45:14 +00:00
self - > m_tss . iomapbase = sizeof ( self - > m_tss ) ;
self - > m_tss . rsp0l = regs . rsp0 & 0xffffffff ;
self - > m_tss . rsp0h = regs . rsp0 > > 32 ;
2022-10-04 10:46:11 +00:00
2024-04-22 10:30:09 +00:00
m_scheduler_initialized . set ( ) ;
2022-10-04 10:46:11 +00:00
// clang-format off
asm volatile (
" movq %[new_rsp], %%rsp \n " // switch to new stack
" pushq %[from_to_thread] \n " // to_thread
" pushq %[from_to_thread] \n " // from_thread
" pushq %[new_rip] \n " // save the entry rip to the stack
" cld \n "
" pushq %[cpu] \n " // push argument for init_finished before register is clobbered
" call pre_init_finished \n "
" pop %%rdi \n " // move argument for init_finished into place
" call init_finished \n "
" call post_init_finished \n "
" movq 24(%%rsp), %%rdi \n " // move pointer to TrapFrame into place
" call enter_trap_no_irq \n "
" retq \n "
: : [ new_rsp ] " g " ( regs . rsp ) ,
[ new_rip ] " a " ( regs . rip ) ,
[ from_to_thread ] " b " ( & initial_thread ) ,
[ cpu ] " c " ( ( u64 ) id ( ) )
) ;
// clang-format on
VERIFY_NOT_REACHED ( ) ;
}
2024-04-13 08:36:53 +00:00
void Processor : : set_fs_base ( FlatPtr fs_base )
2023-02-15 18:49:37 +00:00
{
MSR fs_base_msr ( MSR_FS_BASE ) ;
2024-04-13 08:36:53 +00:00
fs_base_msr . set ( fs_base ) ;
2023-02-15 18:49:37 +00:00
}
2023-09-18 19:45:14 +00:00
template < typename T >
void ProcessorBase < T > : : idle_begin ( ) const
{
Processor : : s_idle_cpu_mask . fetch_or ( 1u < < m_cpu , AK : : MemoryOrder : : memory_order_relaxed ) ;
}
template < typename T >
void ProcessorBase < T > : : idle_end ( ) const
{
Processor : : s_idle_cpu_mask . fetch_and ( ~ ( 1u < < m_cpu ) , AK : : MemoryOrder : : memory_order_relaxed ) ;
}
template < typename T >
void ProcessorBase < T > : : wait_for_interrupt ( ) const
{
asm ( " hlt " ) ;
}
2021-06-21 15:34:09 +00:00
}
2023-09-18 19:45:14 +00:00
# include <Kernel/Arch/ProcessorFunctions.include>