Kernel: Add SMP IPI support

We can now properly initialize all processors without
crashing by sending SMP IPI messages to synchronize memory
between processors.

We now initialize the APs once we have the scheduler running.
This is so that we can process IPI messages from the other
cores.

Also rework interrupt handling a bit so that it's more of a
1:1 mapping. We need to allocate non-sharable interrupts for
IPIs.

This also fixes the occasional hang/crash because all
CPUs now synchronize memory with each other.
This commit is contained in:
Tom 2020-07-06 07:27:22 -06:00 committed by Andreas Kling
parent dec27e5e6f
commit bc107d0b33
27 changed files with 1236 additions and 627 deletions

View file

@ -228,7 +228,7 @@ void Parser::try_acpi_reboot()
auto fadt = map_typed<Structures::FADT>(m_fadt);
ASSERT(validate_reset_register());
access_generic_address(fadt->reset_reg, fadt->reset_value);
hang();
Processor::halt();
}
void Parser::try_acpi_shutdown()

View file

@ -49,6 +49,7 @@
//#define PAGE_FAULT_DEBUG
//#define CONTEXT_SWITCH_DEBUG
//#define SMP_DEBUG
namespace Kernel {
@ -153,7 +154,7 @@ void handle_crash(RegisterState& regs, const char* description, int signal, bool
auto process = Process::current();
if (!process) {
klog() << description << " with !current";
hang();
Processor::halt();
}
// If a process crashed while inspecting another process,
@ -166,7 +167,7 @@ void handle_crash(RegisterState& regs, const char* description, int signal, bool
if (process->is_ring0()) {
klog() << "Crash in ring 0 :(";
dump_backtrace();
hang();
Processor::halt();
}
cli();
@ -291,7 +292,7 @@ void debug_handler(TrapFrame* trap)
auto current_thread = Thread::current();
if (&current_thread->process() == nullptr || (regs.cs & 3) == 0) {
klog() << "Debug Exception in Ring0";
hang();
Processor::halt();
return;
}
constexpr u8 REASON_SINGLESTEP = 14;
@ -313,7 +314,7 @@ void breakpoint_handler(TrapFrame* trap)
auto current_thread = Thread::current();
if (&current_thread->process() == nullptr || (regs.cs & 3) == 0) {
klog() << "Breakpoint Trap in Ring0";
hang();
Processor::halt();
return;
}
if (current_thread->tracer()) {
@ -336,7 +337,7 @@ void breakpoint_handler(TrapFrame* trap)
asm("movl %%cr4, %%eax" \
: "=a"(cr4)); \
klog() << "CR0=" << String::format("%x", cr0) << " CR2=" << String::format("%x", cr2) << " CR3=" << String::format("%x", cr3) << " CR4=" << String::format("%x", cr4); \
hang(); \
Processor::halt(); \
}
EH(2, "Unknown error")
@ -358,7 +359,7 @@ const DescriptorTablePointer& get_idtr()
static void unimp_trap()
{
klog() << "Unhandled IRQ.";
hang();
Processor::Processor::halt();
}
GenericInterruptHandler& get_interrupt_handler(u8 interrupt_number)
@ -374,6 +375,7 @@ static void revert_to_unused_handler(u8 interrupt_number)
void register_generic_interrupt_handler(u8 interrupt_number, GenericInterruptHandler& handler)
{
ASSERT(interrupt_number < GENERIC_INTERRUPT_HANDLERS_COUNT);
if (s_interrupt_handler[interrupt_number] != nullptr) {
if (s_interrupt_handler[interrupt_number]->type() == HandlerType::UnhandledInterruptHandler) {
s_interrupt_handler[interrupt_number] = &handler;
@ -442,10 +444,7 @@ void flush_idt()
static void idt_init()
{
s_idtr.address = s_idt;
s_idtr.limit = 0x100 * 8 - 1;
for (u8 i = 0xff; i > 0x10; --i)
register_interrupt_handler(i, unimp_trap);
s_idtr.limit = 256 * 8 - 1;
register_interrupt_handler(0x00, divide_error_asm_entry);
register_user_callable_interrupt_handler(0x01, debug_asm_entry);
@ -465,134 +464,185 @@ static void idt_init()
register_interrupt_handler(0x0f, _exception15);
register_interrupt_handler(0x10, _exception16);
register_interrupt_handler(0x50, interrupt_0_asm_entry);
register_interrupt_handler(0x51, interrupt_1_asm_entry);
register_interrupt_handler(0x52, interrupt_2_asm_entry);
register_interrupt_handler(0x53, interrupt_3_asm_entry);
register_interrupt_handler(0x54, interrupt_4_asm_entry);
register_interrupt_handler(0x55, interrupt_5_asm_entry);
register_interrupt_handler(0x56, interrupt_6_asm_entry);
register_interrupt_handler(0x57, interrupt_7_asm_entry);
register_interrupt_handler(0x58, interrupt_8_asm_entry);
register_interrupt_handler(0x59, interrupt_9_asm_entry);
register_interrupt_handler(0x5a, interrupt_10_asm_entry);
register_interrupt_handler(0x5b, interrupt_11_asm_entry);
register_interrupt_handler(0x5c, interrupt_12_asm_entry);
register_interrupt_handler(0x5d, interrupt_13_asm_entry);
register_interrupt_handler(0x5e, interrupt_14_asm_entry);
register_interrupt_handler(0x5f, interrupt_15_asm_entry);
register_interrupt_handler(0x60, interrupt_16_asm_entry);
register_interrupt_handler(0x61, interrupt_17_asm_entry);
register_interrupt_handler(0x62, interrupt_18_asm_entry);
register_interrupt_handler(0x63, interrupt_19_asm_entry);
register_interrupt_handler(0x64, interrupt_20_asm_entry);
register_interrupt_handler(0x65, interrupt_21_asm_entry);
register_interrupt_handler(0x66, interrupt_22_asm_entry);
register_interrupt_handler(0x67, interrupt_23_asm_entry);
register_interrupt_handler(0x68, interrupt_24_asm_entry);
register_interrupt_handler(0x69, interrupt_25_asm_entry);
register_interrupt_handler(0x6a, interrupt_26_asm_entry);
register_interrupt_handler(0x6b, interrupt_27_asm_entry);
register_interrupt_handler(0x6c, interrupt_28_asm_entry);
register_interrupt_handler(0x6d, interrupt_29_asm_entry);
register_interrupt_handler(0x6e, interrupt_30_asm_entry);
register_interrupt_handler(0x6f, interrupt_31_asm_entry);
register_interrupt_handler(0x70, interrupt_32_asm_entry);
register_interrupt_handler(0x71, interrupt_33_asm_entry);
register_interrupt_handler(0x72, interrupt_34_asm_entry);
register_interrupt_handler(0x73, interrupt_35_asm_entry);
register_interrupt_handler(0x74, interrupt_36_asm_entry);
register_interrupt_handler(0x75, interrupt_37_asm_entry);
register_interrupt_handler(0x76, interrupt_38_asm_entry);
register_interrupt_handler(0x77, interrupt_39_asm_entry);
register_interrupt_handler(0x78, interrupt_40_asm_entry);
register_interrupt_handler(0x79, interrupt_41_asm_entry);
register_interrupt_handler(0x7a, interrupt_42_asm_entry);
register_interrupt_handler(0x7b, interrupt_43_asm_entry);
register_interrupt_handler(0x7c, interrupt_44_asm_entry);
register_interrupt_handler(0x7d, interrupt_45_asm_entry);
register_interrupt_handler(0x7e, interrupt_46_asm_entry);
register_interrupt_handler(0x7f, interrupt_47_asm_entry);
register_interrupt_handler(0x80, interrupt_48_asm_entry);
register_interrupt_handler(0x81, interrupt_49_asm_entry);
register_interrupt_handler(0x82, interrupt_50_asm_entry);
register_interrupt_handler(0x83, interrupt_51_asm_entry);
register_interrupt_handler(0x84, interrupt_52_asm_entry);
register_interrupt_handler(0x85, interrupt_53_asm_entry);
register_interrupt_handler(0x86, interrupt_54_asm_entry);
register_interrupt_handler(0x87, interrupt_55_asm_entry);
register_interrupt_handler(0x88, interrupt_56_asm_entry);
register_interrupt_handler(0x89, interrupt_57_asm_entry);
register_interrupt_handler(0x8a, interrupt_58_asm_entry);
register_interrupt_handler(0x8b, interrupt_59_asm_entry);
register_interrupt_handler(0x8c, interrupt_60_asm_entry);
register_interrupt_handler(0x8d, interrupt_61_asm_entry);
register_interrupt_handler(0x8e, interrupt_62_asm_entry);
register_interrupt_handler(0x8f, interrupt_63_asm_entry);
register_interrupt_handler(0x90, interrupt_64_asm_entry);
register_interrupt_handler(0x91, interrupt_65_asm_entry);
register_interrupt_handler(0x92, interrupt_66_asm_entry);
register_interrupt_handler(0x93, interrupt_67_asm_entry);
register_interrupt_handler(0x94, interrupt_68_asm_entry);
register_interrupt_handler(0x95, interrupt_69_asm_entry);
register_interrupt_handler(0x96, interrupt_70_asm_entry);
register_interrupt_handler(0x97, interrupt_71_asm_entry);
register_interrupt_handler(0x98, interrupt_72_asm_entry);
register_interrupt_handler(0x99, interrupt_73_asm_entry);
register_interrupt_handler(0x9a, interrupt_74_asm_entry);
register_interrupt_handler(0x9b, interrupt_75_asm_entry);
register_interrupt_handler(0x9c, interrupt_76_asm_entry);
register_interrupt_handler(0x9d, interrupt_77_asm_entry);
register_interrupt_handler(0x9e, interrupt_78_asm_entry);
register_interrupt_handler(0x9f, interrupt_79_asm_entry);
register_interrupt_handler(0xa0, interrupt_80_asm_entry);
register_interrupt_handler(0xa1, interrupt_81_asm_entry);
register_interrupt_handler(0xa2, interrupt_82_asm_entry);
register_interrupt_handler(0xa3, interrupt_83_asm_entry);
register_interrupt_handler(0xa4, interrupt_84_asm_entry);
register_interrupt_handler(0xa5, interrupt_85_asm_entry);
register_interrupt_handler(0xa6, interrupt_86_asm_entry);
register_interrupt_handler(0xa7, interrupt_87_asm_entry);
register_interrupt_handler(0xa8, interrupt_88_asm_entry);
register_interrupt_handler(0xa9, interrupt_89_asm_entry);
register_interrupt_handler(0xaa, interrupt_90_asm_entry);
register_interrupt_handler(0xab, interrupt_91_asm_entry);
register_interrupt_handler(0xac, interrupt_92_asm_entry);
register_interrupt_handler(0xad, interrupt_93_asm_entry);
register_interrupt_handler(0xae, interrupt_94_asm_entry);
register_interrupt_handler(0xaf, interrupt_95_asm_entry);
register_interrupt_handler(0xb0, interrupt_96_asm_entry);
register_interrupt_handler(0xb1, interrupt_97_asm_entry);
register_interrupt_handler(0xb2, interrupt_98_asm_entry);
register_interrupt_handler(0xb3, interrupt_99_asm_entry);
register_interrupt_handler(0xb4, interrupt_100_asm_entry);
register_interrupt_handler(0xb5, interrupt_101_asm_entry);
register_interrupt_handler(0xb6, interrupt_102_asm_entry);
register_interrupt_handler(0xb7, interrupt_103_asm_entry);
register_interrupt_handler(0xb8, interrupt_104_asm_entry);
register_interrupt_handler(0xb9, interrupt_105_asm_entry);
register_interrupt_handler(0xba, interrupt_106_asm_entry);
register_interrupt_handler(0xbb, interrupt_107_asm_entry);
register_interrupt_handler(0xbc, interrupt_108_asm_entry);
register_interrupt_handler(0xbd, interrupt_109_asm_entry);
register_interrupt_handler(0xbe, interrupt_110_asm_entry);
register_interrupt_handler(0xbf, interrupt_111_asm_entry);
register_interrupt_handler(0xc0, interrupt_112_asm_entry);
register_interrupt_handler(0xc1, interrupt_113_asm_entry);
register_interrupt_handler(0xc2, interrupt_114_asm_entry);
register_interrupt_handler(0xc3, interrupt_115_asm_entry);
register_interrupt_handler(0xc4, interrupt_116_asm_entry);
register_interrupt_handler(0xc5, interrupt_117_asm_entry);
register_interrupt_handler(0xc6, interrupt_118_asm_entry);
register_interrupt_handler(0xc7, interrupt_119_asm_entry);
register_interrupt_handler(0xc8, interrupt_120_asm_entry);
register_interrupt_handler(0xc9, interrupt_121_asm_entry);
register_interrupt_handler(0xca, interrupt_122_asm_entry);
register_interrupt_handler(0xcb, interrupt_123_asm_entry);
register_interrupt_handler(0xcc, interrupt_124_asm_entry);
register_interrupt_handler(0xcd, interrupt_125_asm_entry);
register_interrupt_handler(0xce, interrupt_126_asm_entry);
register_interrupt_handler(0xcf, interrupt_127_asm_entry);
for (u8 i = 0x11; i < 0x50; i++)
register_interrupt_handler(i, unimp_trap);
register_interrupt_handler(0x50, interrupt_80_asm_entry);
register_interrupt_handler(0x51, interrupt_81_asm_entry);
register_interrupt_handler(0x52, interrupt_82_asm_entry);
register_interrupt_handler(0x53, interrupt_83_asm_entry);
register_interrupt_handler(0x54, interrupt_84_asm_entry);
register_interrupt_handler(0x55, interrupt_85_asm_entry);
register_interrupt_handler(0x56, interrupt_86_asm_entry);
register_interrupt_handler(0x57, interrupt_87_asm_entry);
register_interrupt_handler(0x58, interrupt_88_asm_entry);
register_interrupt_handler(0x59, interrupt_89_asm_entry);
register_interrupt_handler(0x5a, interrupt_90_asm_entry);
register_interrupt_handler(0x5b, interrupt_91_asm_entry);
register_interrupt_handler(0x5c, interrupt_92_asm_entry);
register_interrupt_handler(0x5d, interrupt_93_asm_entry);
register_interrupt_handler(0x5e, interrupt_94_asm_entry);
register_interrupt_handler(0x5f, interrupt_95_asm_entry);
register_interrupt_handler(0x60, interrupt_96_asm_entry);
register_interrupt_handler(0x61, interrupt_97_asm_entry);
register_interrupt_handler(0x62, interrupt_98_asm_entry);
register_interrupt_handler(0x63, interrupt_99_asm_entry);
register_interrupt_handler(0x64, interrupt_100_asm_entry);
register_interrupt_handler(0x65, interrupt_101_asm_entry);
register_interrupt_handler(0x66, interrupt_102_asm_entry);
register_interrupt_handler(0x67, interrupt_103_asm_entry);
register_interrupt_handler(0x68, interrupt_104_asm_entry);
register_interrupt_handler(0x69, interrupt_105_asm_entry);
register_interrupt_handler(0x6a, interrupt_106_asm_entry);
register_interrupt_handler(0x6b, interrupt_107_asm_entry);
register_interrupt_handler(0x6c, interrupt_108_asm_entry);
register_interrupt_handler(0x6d, interrupt_109_asm_entry);
register_interrupt_handler(0x6e, interrupt_110_asm_entry);
register_interrupt_handler(0x6f, interrupt_111_asm_entry);
register_interrupt_handler(0x70, interrupt_112_asm_entry);
register_interrupt_handler(0x71, interrupt_113_asm_entry);
register_interrupt_handler(0x72, interrupt_114_asm_entry);
register_interrupt_handler(0x73, interrupt_115_asm_entry);
register_interrupt_handler(0x74, interrupt_116_asm_entry);
register_interrupt_handler(0x75, interrupt_117_asm_entry);
register_interrupt_handler(0x76, interrupt_118_asm_entry);
register_interrupt_handler(0x77, interrupt_119_asm_entry);
register_interrupt_handler(0x78, interrupt_120_asm_entry);
register_interrupt_handler(0x79, interrupt_121_asm_entry);
register_interrupt_handler(0x7a, interrupt_122_asm_entry);
register_interrupt_handler(0x7b, interrupt_123_asm_entry);
register_interrupt_handler(0x7c, interrupt_124_asm_entry);
register_interrupt_handler(0x7d, interrupt_125_asm_entry);
register_interrupt_handler(0x7e, interrupt_126_asm_entry);
register_interrupt_handler(0x7f, interrupt_127_asm_entry);
register_interrupt_handler(0x80, interrupt_128_asm_entry);
register_interrupt_handler(0x81, interrupt_129_asm_entry);
register_interrupt_handler(0x82, interrupt_130_asm_entry);
register_interrupt_handler(0x83, interrupt_131_asm_entry);
register_interrupt_handler(0x84, interrupt_132_asm_entry);
register_interrupt_handler(0x85, interrupt_133_asm_entry);
register_interrupt_handler(0x86, interrupt_134_asm_entry);
register_interrupt_handler(0x87, interrupt_135_asm_entry);
register_interrupt_handler(0x88, interrupt_136_asm_entry);
register_interrupt_handler(0x89, interrupt_137_asm_entry);
register_interrupt_handler(0x8a, interrupt_138_asm_entry);
register_interrupt_handler(0x8b, interrupt_139_asm_entry);
register_interrupt_handler(0x8c, interrupt_140_asm_entry);
register_interrupt_handler(0x8d, interrupt_141_asm_entry);
register_interrupt_handler(0x8e, interrupt_142_asm_entry);
register_interrupt_handler(0x8f, interrupt_143_asm_entry);
register_interrupt_handler(0x90, interrupt_144_asm_entry);
register_interrupt_handler(0x91, interrupt_145_asm_entry);
register_interrupt_handler(0x92, interrupt_146_asm_entry);
register_interrupt_handler(0x93, interrupt_147_asm_entry);
register_interrupt_handler(0x94, interrupt_148_asm_entry);
register_interrupt_handler(0x95, interrupt_149_asm_entry);
register_interrupt_handler(0x96, interrupt_150_asm_entry);
register_interrupt_handler(0x97, interrupt_151_asm_entry);
register_interrupt_handler(0x98, interrupt_152_asm_entry);
register_interrupt_handler(0x99, interrupt_153_asm_entry);
register_interrupt_handler(0x9a, interrupt_154_asm_entry);
register_interrupt_handler(0x9b, interrupt_155_asm_entry);
register_interrupt_handler(0x9c, interrupt_156_asm_entry);
register_interrupt_handler(0x9d, interrupt_157_asm_entry);
register_interrupt_handler(0x9e, interrupt_158_asm_entry);
register_interrupt_handler(0x9f, interrupt_159_asm_entry);
register_interrupt_handler(0xa0, interrupt_160_asm_entry);
register_interrupt_handler(0xa1, interrupt_161_asm_entry);
register_interrupt_handler(0xa2, interrupt_162_asm_entry);
register_interrupt_handler(0xa3, interrupt_163_asm_entry);
register_interrupt_handler(0xa4, interrupt_164_asm_entry);
register_interrupt_handler(0xa5, interrupt_165_asm_entry);
register_interrupt_handler(0xa6, interrupt_166_asm_entry);
register_interrupt_handler(0xa7, interrupt_167_asm_entry);
register_interrupt_handler(0xa8, interrupt_168_asm_entry);
register_interrupt_handler(0xa9, interrupt_169_asm_entry);
register_interrupt_handler(0xaa, interrupt_170_asm_entry);
register_interrupt_handler(0xab, interrupt_171_asm_entry);
register_interrupt_handler(0xac, interrupt_172_asm_entry);
register_interrupt_handler(0xad, interrupt_173_asm_entry);
register_interrupt_handler(0xae, interrupt_174_asm_entry);
register_interrupt_handler(0xaf, interrupt_175_asm_entry);
register_interrupt_handler(0xb0, interrupt_176_asm_entry);
register_interrupt_handler(0xb1, interrupt_177_asm_entry);
register_interrupt_handler(0xb2, interrupt_178_asm_entry);
register_interrupt_handler(0xb3, interrupt_179_asm_entry);
register_interrupt_handler(0xb4, interrupt_180_asm_entry);
register_interrupt_handler(0xb5, interrupt_181_asm_entry);
register_interrupt_handler(0xb6, interrupt_182_asm_entry);
register_interrupt_handler(0xb7, interrupt_183_asm_entry);
register_interrupt_handler(0xb8, interrupt_184_asm_entry);
register_interrupt_handler(0xb9, interrupt_185_asm_entry);
register_interrupt_handler(0xba, interrupt_186_asm_entry);
register_interrupt_handler(0xbb, interrupt_187_asm_entry);
register_interrupt_handler(0xbc, interrupt_188_asm_entry);
register_interrupt_handler(0xbd, interrupt_189_asm_entry);
register_interrupt_handler(0xbe, interrupt_190_asm_entry);
register_interrupt_handler(0xbf, interrupt_191_asm_entry);
register_interrupt_handler(0xc0, interrupt_192_asm_entry);
register_interrupt_handler(0xc1, interrupt_193_asm_entry);
register_interrupt_handler(0xc2, interrupt_194_asm_entry);
register_interrupt_handler(0xc3, interrupt_195_asm_entry);
register_interrupt_handler(0xc4, interrupt_196_asm_entry);
register_interrupt_handler(0xc5, interrupt_197_asm_entry);
register_interrupt_handler(0xc6, interrupt_198_asm_entry);
register_interrupt_handler(0xc7, interrupt_199_asm_entry);
register_interrupt_handler(0xc8, interrupt_200_asm_entry);
register_interrupt_handler(0xc9, interrupt_201_asm_entry);
register_interrupt_handler(0xca, interrupt_202_asm_entry);
register_interrupt_handler(0xcb, interrupt_203_asm_entry);
register_interrupt_handler(0xcc, interrupt_204_asm_entry);
register_interrupt_handler(0xcd, interrupt_205_asm_entry);
register_interrupt_handler(0xce, interrupt_206_asm_entry);
register_interrupt_handler(0xcf, interrupt_207_asm_entry);
register_interrupt_handler(0xd0, interrupt_208_asm_entry);
register_interrupt_handler(0xd1, interrupt_209_asm_entry);
register_interrupt_handler(0xd2, interrupt_210_asm_entry);
register_interrupt_handler(0xd3, interrupt_211_asm_entry);
register_interrupt_handler(0xd4, interrupt_212_asm_entry);
register_interrupt_handler(0xd5, interrupt_213_asm_entry);
register_interrupt_handler(0xd6, interrupt_214_asm_entry);
register_interrupt_handler(0xd7, interrupt_215_asm_entry);
register_interrupt_handler(0xd8, interrupt_216_asm_entry);
register_interrupt_handler(0xd9, interrupt_217_asm_entry);
register_interrupt_handler(0xda, interrupt_218_asm_entry);
register_interrupt_handler(0xdb, interrupt_219_asm_entry);
register_interrupt_handler(0xdc, interrupt_220_asm_entry);
register_interrupt_handler(0xdd, interrupt_221_asm_entry);
register_interrupt_handler(0xde, interrupt_222_asm_entry);
register_interrupt_handler(0xdf, interrupt_223_asm_entry);
register_interrupt_handler(0xe0, interrupt_224_asm_entry);
register_interrupt_handler(0xe1, interrupt_225_asm_entry);
register_interrupt_handler(0xe2, interrupt_226_asm_entry);
register_interrupt_handler(0xe3, interrupt_227_asm_entry);
register_interrupt_handler(0xe4, interrupt_228_asm_entry);
register_interrupt_handler(0xe5, interrupt_229_asm_entry);
register_interrupt_handler(0xe6, interrupt_230_asm_entry);
register_interrupt_handler(0xe7, interrupt_231_asm_entry);
register_interrupt_handler(0xe8, interrupt_232_asm_entry);
register_interrupt_handler(0xe9, interrupt_233_asm_entry);
register_interrupt_handler(0xea, interrupt_234_asm_entry);
register_interrupt_handler(0xeb, interrupt_235_asm_entry);
register_interrupt_handler(0xec, interrupt_236_asm_entry);
register_interrupt_handler(0xed, interrupt_237_asm_entry);
register_interrupt_handler(0xee, interrupt_238_asm_entry);
register_interrupt_handler(0xef, interrupt_239_asm_entry);
register_interrupt_handler(0xf0, interrupt_240_asm_entry);
register_interrupt_handler(0xf1, interrupt_241_asm_entry);
register_interrupt_handler(0xf2, interrupt_242_asm_entry);
register_interrupt_handler(0xf3, interrupt_243_asm_entry);
register_interrupt_handler(0xf4, interrupt_244_asm_entry);
register_interrupt_handler(0xf5, interrupt_245_asm_entry);
register_interrupt_handler(0xf6, interrupt_246_asm_entry);
register_interrupt_handler(0xf7, interrupt_247_asm_entry);
register_interrupt_handler(0xf8, interrupt_248_asm_entry);
register_interrupt_handler(0xf9, interrupt_249_asm_entry);
register_interrupt_handler(0xfa, interrupt_250_asm_entry);
register_interrupt_handler(0xfb, interrupt_251_asm_entry);
register_interrupt_handler(0xfc, interrupt_252_asm_entry);
register_interrupt_handler(0xfd, interrupt_253_asm_entry);
register_interrupt_handler(0xfe, interrupt_254_asm_entry);
register_interrupt_handler(0xff, interrupt_255_asm_entry);
dbg() << "Installing Unhandled Handlers";
@ -688,6 +738,8 @@ FPUState Processor::s_clean_fpu_state;
static Vector<Processor*>* s_processors;
static SpinLock s_processor_lock;
volatile u32 Processor::g_total_processors;
static volatile bool s_smp_enabled;
Vector<Processor*>& Processor::processors()
{
@ -707,6 +759,12 @@ Processor& Processor::by_id(u32 cpu)
return *procs[cpu];
}
[[noreturn]] static inline void halt_this()
{
for (;;) {
asm volatile("cli; hlt");
}
}
void Processor::cpu_detect()
{
@ -868,13 +926,21 @@ void Processor::early_initialize(u32 cpu)
m_invoke_scheduler_async = false;
m_scheduler_initialized = false;
m_message_queue = nullptr;
m_idle_thread = nullptr;
m_current_thread = nullptr;
m_mm_data = nullptr;
m_info = nullptr;
cpu_setup();
m_halt_requested = false;
if (cpu == 0) {
s_smp_enabled = false;
atomic_store(&g_total_processors, 1u, AK::MemoryOrder::memory_order_release);
} else {
atomic_fetch_add(&g_total_processors, 1u, AK::MemoryOrder::memory_order_acq_rel);
}
cpu_setup();
gdt_init();
ASSERT(&current() == this); // sanity check
}
@ -910,9 +976,9 @@ void Processor::initialize(u32 cpu)
if (cpu >= s_processors->size())
s_processors->resize(cpu + 1);
(*s_processors)[cpu] = this;
}
klog() << "CPU[" << cpu << "]: initialized Processor at " << VirtualAddress(FlatPtr(this));
klog() << "CPU[" << cpu << "]: initialized Processor at " << VirtualAddress(FlatPtr(this));
}
}
void Processor::write_raw_gdt_entry(u16 selector, u32 low, u32 high)
@ -979,7 +1045,7 @@ bool Processor::get_context_frame_ptr(Thread& thread, u32& frame_ptr, u32& eip)
// TODO: If this is the case, the thread is currently running
// on another processor. We can't trust the kernel stack as
// it may be changing at any time. We need to probably send
// an ICI to that processor, have it walk the stack and wait
// an IPI to that processor, have it walk the stack and wait
// until it returns the data back to us
dbg() << "CPU[" << proc.id() << "] getting stack for "
<< thread << " on other CPU# " << thread.cpu() << " not yet implemented!";
@ -1288,12 +1354,16 @@ void Processor::initialize_context_switching(Thread& initial_thread)
"addl $20, %%ebx \n" // calculate pointer to TrapFrame
"pushl %%ebx \n"
"cld \n"
"pushl %[cpu] \n"
"call init_finished \n"
"addl $4, %%esp \n"
"call enter_trap_no_irq \n"
"addl $4, %%esp \n"
"lret \n"
:: [new_esp] "g" (tss.esp),
[new_eip] "a" (tss.eip),
[from_to_thread] "b" (&initial_thread)
[from_to_thread] "b" (&initial_thread),
[cpu] "c" (id())
);
ASSERT_NOT_REACHED();
@ -1312,7 +1382,9 @@ void Processor::exit_trap(TrapFrame& trap)
InterruptDisabler disabler;
ASSERT(m_in_irq >= trap.prev_irq_level);
m_in_irq = trap.prev_irq_level;
smp_process_pending_messages();
if (!m_in_irq && !m_in_critical)
check_invoke_scheduler();
}
@ -1327,6 +1399,267 @@ void Processor::check_invoke_scheduler()
}
}
void Processor::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
{
auto ptr = vaddr.as_ptr();
while (page_count > 0) {
asm volatile("invlpg %0"
:
: "m"(*(char*)vaddr.get())
: "memory");
ptr += PAGE_SIZE;
page_count--;
}
}
void Processor::flush_tlb(VirtualAddress vaddr, size_t page_count)
{
flush_tlb_local(vaddr, page_count);
if (s_smp_enabled)
smp_broadcast_flush_tlb(vaddr, page_count);
}
static volatile ProcessorMessage* s_message_pool;
void Processor::smp_return_to_pool(ProcessorMessage& msg)
{
ProcessorMessage* next = nullptr;
do {
msg.next = next;
} while (!atomic_compare_exchange_strong(&s_message_pool, next, &msg, AK::MemoryOrder::memory_order_acq_rel));
}
ProcessorMessage& Processor::smp_get_from_pool()
{
ProcessorMessage* msg;
// The assumption is that messages are never removed from the pool!
for (;;) {
msg = atomic_load(&s_message_pool, AK::MemoryOrder::memory_order_consume);
if (!msg) {
if (!Processor::current().smp_process_pending_messages()) {
// TODO: pause for a bit?
}
continue;
}
// If another processor were to use this message in the meanwhile,
// "msg" is still valid (because it never gets freed). We'd detect
// this because the expected value "msg" and pool would
// no longer match, and the compare_exchange will fail. But accessing
// "msg->next" is always safe here.
if (atomic_compare_exchange_strong(&s_message_pool, msg, msg->next, AK::MemoryOrder::memory_order_acq_rel)) {
// We successfully "popped" this available message
break;
}
}
ASSERT(msg != nullptr);
return *msg;
}
void Processor::smp_enable()
{
size_t msg_pool_size = Processor::count() * 100u;
size_t msg_entries_cnt = Processor::count();
auto msgs = new ProcessorMessage[msg_pool_size];
auto msg_entries = new ProcessorMessageEntry[msg_pool_size * msg_entries_cnt];
size_t msg_entry_i = 0;
for (size_t i = 0; i < msg_pool_size; i++, msg_entry_i += msg_entries_cnt) {
auto& msg = msgs[i];
msg.next = i < msg_pool_size - 1 ? &msgs[i + 1] : nullptr;
msg.per_proc_entries = &msg_entries[msg_entry_i];
for (size_t k = 0; k < msg_entries_cnt; k++)
msg_entries[msg_entry_i + k].msg = &msg;
}
atomic_store(&s_message_pool, &msgs[0], AK::MemoryOrder::memory_order_release);
// Start sending IPI messages
s_smp_enabled = true;
}
void Processor::smp_cleanup_message(ProcessorMessage& msg)
{
switch (msg.type) {
case ProcessorMessage::CallbackWithData:
if (msg.callback_with_data.free)
msg.callback_with_data.free(msg.callback_with_data.data);
break;
default:
break;
}
}
bool Processor::smp_process_pending_messages()
{
bool did_process = false;
u32 prev_flags;
enter_critical(prev_flags);
if (auto pending_msgs = atomic_exchange(&m_message_queue, nullptr, AK::MemoryOrder::memory_order_acq_rel))
{
// We pulled the stack of pending messages in LIFO order, so we need to reverse the list first
auto reverse_list =
[](ProcessorMessageEntry* list) -> ProcessorMessageEntry*
{
ProcessorMessageEntry* rev_list = nullptr;
while (list) {
auto next = list->next;
list->next = rev_list;
rev_list = list;
list = next;
}
return rev_list;
};
pending_msgs = reverse_list(pending_msgs);
// now process in the right order
ProcessorMessageEntry* next_msg;
for (auto cur_msg = pending_msgs; cur_msg; cur_msg = next_msg) {
next_msg = cur_msg->next;
auto msg = cur_msg->msg;
#ifdef SMP_DEBUG
dbg() << "SMP[" << id() << "]: Processing message " << VirtualAddress(msg);
#endif
switch (msg->type) {
case ProcessorMessage::Callback:
msg->callback.handler();
break;
case ProcessorMessage::CallbackWithData:
msg->callback_with_data.handler(msg->callback_with_data.data);
break;
case ProcessorMessage::FlushTlb:
flush_tlb_local(VirtualAddress(msg->flush_tlb.ptr), msg->flush_tlb.page_count);
break;
}
bool is_async = msg->async; // Need to cache this value *before* dropping the ref count!
auto prev_refs = atomic_fetch_sub(&msg->refs, 1u, AK::MemoryOrder::memory_order_acq_rel);
ASSERT(prev_refs != 0);
if (prev_refs == 1) {
// All processors handled this. If this is an async message,
// we need to clean it up and return it to the pool
if (is_async) {
smp_cleanup_message(*msg);
smp_return_to_pool(*msg);
}
}
if (m_halt_requested)
halt_this();
}
did_process = true;
} else if (m_halt_requested) {
halt_this();
}
leave_critical(prev_flags);
return did_process;
}
bool Processor::smp_queue_message(ProcessorMessage& msg)
{
// Note that it's quite possible that the other processor may pop
// the queue at any given time. We rely on the fact that the messages
// are pooled and never get freed!
auto& msg_entry = msg.per_proc_entries[id()];
ASSERT(msg_entry.msg == &msg);
ProcessorMessageEntry* next = nullptr;
do {
msg_entry.next = next;
} while (!atomic_compare_exchange_strong(&m_message_queue, next, &msg_entry, AK::MemoryOrder::memory_order_acq_rel));
return next == nullptr;
}
void Processor::smp_broadcast_message(ProcessorMessage& msg, bool async)
{
auto& cur_proc = Processor::current();
msg.async = async;
#ifdef SMP_DEBUG
dbg() << "SMP[" << cur_proc.id() << "]: Broadcast message " << VirtualAddress(&msg) << " to cpus: " << (count()) << " proc: " << VirtualAddress(&cur_proc);
#endif
atomic_store(&msg.refs, count() - 1, AK::MemoryOrder::memory_order_release);
ASSERT(msg.refs > 0);
for_each(
[&](Processor& proc) -> IterationDecision
{
if (&proc != &cur_proc) {
if (proc.smp_queue_message(msg)) {
// TODO: only send IPI to that CPU if we queued the first
}
}
return IterationDecision::Continue;
});
// Now trigger an IPI on all other APs
APIC::the().broadcast_ipi();
if (!async) {
// If synchronous then we must cleanup and return the message back
// to the pool. Otherwise, the last processor to complete it will return it
while (atomic_load(&msg.refs, AK::MemoryOrder::memory_order_consume) != 0) {
// TODO: pause for a bit?
}
smp_cleanup_message(msg);
smp_return_to_pool(msg);
}
}
void Processor::smp_broadcast(void(*callback)(void*), void* data, void(*free_data)(void*), bool async)
{
auto& msg = smp_get_from_pool();
msg.type = ProcessorMessage::CallbackWithData;
msg.callback_with_data.handler = callback;
msg.callback_with_data.data = data;
msg.callback_with_data.free = free_data;
smp_broadcast_message(msg, async);
}
void Processor::smp_broadcast(void(*callback)(), bool async)
{
auto& msg = smp_get_from_pool();
msg.type = ProcessorMessage::CallbackWithData;
msg.callback.handler = callback;
smp_broadcast_message(msg, async);
}
void Processor::smp_broadcast_flush_tlb(VirtualAddress vaddr, size_t page_count)
{
auto& msg = smp_get_from_pool();
msg.type = ProcessorMessage::FlushTlb;
msg.flush_tlb.ptr = vaddr.as_ptr();
msg.flush_tlb.page_count = page_count;
smp_broadcast_message(msg, false);
}
void Processor::smp_broadcast_halt()
{
// We don't want to use a message, because this could have been triggered
// by being out of memory and we might not be able to get a message
for_each(
[&](Processor& proc) -> IterationDecision
{
proc.m_halt_requested = true;
return IterationDecision::Continue;
});
// Now trigger an IPI on all other APs
APIC::the().broadcast_ipi();
}
void Processor::Processor::halt()
{
if (s_smp_enabled)
smp_broadcast_halt();
halt_this();
}
void Processor::gdt_init()
{
m_gdt_length = 0;

View file

@ -26,6 +26,7 @@
#pragma once
#include <AK/Atomic.h>
#include <AK/Badge.h>
#include <AK/Noncopyable.h>
#include <AK/Vector.h>
@ -33,7 +34,7 @@
#include <Kernel/VirtualAddress.h>
#define PAGE_SIZE 4096
#define GENERIC_INTERRUPT_HANDLERS_COUNT 128
#define GENERIC_INTERRUPT_HANDLERS_COUNT (256 - IRQ_VECTOR_BASE)
#define PAGE_MASK ((FlatPtr)0xfffff000u)
namespace Kernel {
@ -276,13 +277,6 @@ void flush_idt();
void load_task_register(u16 selector);
void handle_crash(RegisterState&, const char* description, int signal, bool out_of_memory = false);
[[noreturn]] static inline void hang()
{
asm volatile("cli; hlt");
for (;;) {
}
}
#define LSW(x) ((u32)(x)&0xFFFF)
#define MSW(x) (((u32)(x) >> 16) & 0xFFFF)
#define LSB(x) ((x)&0xFF)
@ -307,20 +301,17 @@ inline u32 cpu_flags()
inline void set_fs(u32 segment)
{
asm volatile(
"movl %%eax, %%fs" :: "a"(segment)
: "memory"
);
"movl %%eax, %%fs" ::"a"(segment)
: "memory");
}
inline void set_gs(u32 segment)
{
asm volatile(
"movl %%eax, %%gs" :: "a"(segment)
: "memory"
);
"movl %%eax, %%gs" ::"a"(segment)
: "memory");
}
inline u32 get_fs()
{
u32 fs;
@ -532,9 +523,9 @@ u32 read_dr6();
static inline bool is_kernel_mode()
{
u32 cs;
asm volatile (
asm volatile(
"movl %%cs, %[cs] \n"
: [cs] "=g" (cs));
: [ cs ] "=g"(cs));
return (cs & 3) == 0;
}
@ -624,10 +615,48 @@ struct TrapFrame;
class ProcessorInfo;
struct MemoryManagerData;
struct ProcessorMessageEntry;
struct ProcessorMessage {
enum Type {
FlushTlb,
Callback,
CallbackWithData
};
Type type;
volatile u32 refs; // atomic
union {
ProcessorMessage* next; // only valid while in the pool
struct {
void (*handler)();
} callback;
struct {
void* data;
void (*handler)(void*);
void (*free)(void*);
} callback_with_data;
struct {
u8* ptr;
size_t page_count;
} flush_tlb;
};
volatile bool async;
ProcessorMessageEntry* per_proc_entries;
};
struct ProcessorMessageEntry {
ProcessorMessageEntry* next;
ProcessorMessage* msg;
};
class Processor {
friend class ProcessorInfo;
AK_MAKE_NONCOPYABLE(Processor);
AK_MAKE_NONMOVABLE(Processor);
Processor* m_self; // must be first field (%fs offset 0x0)
DescriptorTablePointer m_gdtr;
@ -641,48 +670,85 @@ class Processor {
TSS32 m_tss;
static FPUState s_clean_fpu_state;
CPUFeature m_features;
static volatile u32 g_total_processors; // atomic
ProcessorInfo* m_info;
MemoryManagerData* m_mm_data;
Thread* m_current_thread;
Thread* m_idle_thread;
volatile ProcessorMessageEntry* m_message_queue; // atomic, LIFO
bool m_invoke_scheduler_async;
bool m_scheduler_initialized;
bool m_halt_requested;
void gdt_init();
void write_raw_gdt_entry(u16 selector, u32 low, u32 high);
void write_gdt_entry(u16 selector, Descriptor& descriptor);
static Vector<Processor*>& processors();
static void smp_return_to_pool(ProcessorMessage& msg);
static ProcessorMessage& smp_get_from_pool();
static void smp_cleanup_message(ProcessorMessage& msg);
bool smp_queue_message(ProcessorMessage& msg);
static void smp_broadcast_message(ProcessorMessage& msg, bool async);
static void smp_broadcast_halt();
void cpu_detect();
void cpu_setup();
String features_string() const;
public:
Processor() = default;
void early_initialize(u32 cpu);
void initialize(u32 cpu);
static u32 count()
{
// NOTE: because this value never changes once all APs are booted,
// we don't really need to do an atomic_load() on this variable
return g_total_processors;
}
ALWAYS_INLINE static void wait_check()
{
Processor::current().smp_process_pending_messages();
// TODO: pause
}
[[noreturn]] static void halt();
static void flush_entire_tlb_local()
{
write_cr3(read_cr3());
}
static void flush_tlb_local(VirtualAddress vaddr, size_t page_count);
static void flush_tlb(VirtualAddress vaddr, size_t page_count);
Descriptor& get_gdt_entry(u16 selector);
void flush_gdt();
const DescriptorTablePointer& get_gdtr();
static Processor& by_id(u32 cpu);
template <typename Callback>
template<typename Callback>
static inline IterationDecision for_each(Callback callback)
{
auto& procs = processors();
for (auto it = procs.begin(); it != procs.end(); ++it) {
if (callback(**it) == IterationDecision::Break)
size_t count = procs.size();
for (size_t i = 0; i < count; i++) {
if (callback(*procs[i]) == IterationDecision::Break)
return IterationDecision::Break;
}
return IterationDecision::Continue;
}
ALWAYS_INLINE ProcessorInfo& info() { return *m_info; }
ALWAYS_INLINE static Processor& current()
{
return *(Processor*)read_fs_u32(0);
@ -729,19 +795,30 @@ public:
{
return m_cpu;
}
ALWAYS_INLINE u32 raise_irq()
{
return m_in_irq++;
}
ALWAYS_INLINE void restore_irq(u32 prev_irq)
{
ASSERT(prev_irq <= m_in_irq);
m_in_irq = prev_irq;
}
ALWAYS_INLINE u32& in_irq()
{
return m_in_irq;
}
ALWAYS_INLINE void enter_critical(u32& prev_flags)
{
m_in_critical++;
prev_flags = cpu_flags();
cli();
}
ALWAYS_INLINE void leave_critical(u32 prev_flags)
{
ASSERT(m_in_critical > 0);
@ -754,7 +831,7 @@ public:
else
cli();
}
ALWAYS_INLINE u32 clear_critical(u32& prev_flags, bool enable_interrupts)
{
u32 prev_crit = m_in_critical;
@ -766,7 +843,7 @@ public:
sti();
return prev_crit;
}
ALWAYS_INLINE void restore_critical(u32 prev_crit, u32 prev_flags)
{
ASSERT(m_in_critical == 0);
@ -784,6 +861,27 @@ public:
return s_clean_fpu_state;
}
static void smp_enable();
bool smp_process_pending_messages();
template<typename Callback>
static void smp_broadcast(Callback callback, bool async)
{
auto* data = new Callback(move(callback));
smp_broadcast(
[](void* data) {
(*reinterpret_cast<Callback*>(data))();
},
data,
[](void* data) {
delete reinterpret_cast<Callback*>(data);
},
async);
}
static void smp_broadcast(void (*callback)(), bool async);
static void smp_broadcast(void (*callback)(void*), void* data, void (*free_data)(void*), bool async);
static void smp_broadcast_flush_tlb(VirtualAddress vaddr, size_t page_count);
ALWAYS_INLINE bool has_feature(CPUFeature f) const
{
return (static_cast<u32>(m_features) & static_cast<u32>(f)) != 0;
@ -793,6 +891,7 @@ public:
void invoke_scheduler_async() { m_invoke_scheduler_async = true; }
void enter_trap(TrapFrame& trap, bool raise_irq);
void exit_trap(TrapFrame& trap);
[[noreturn]] void initialize_context_switching(Thread& initial_thread);

View file

@ -28,260 +28,179 @@
#include <Kernel/Arch/i386/Interrupts.h>
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(0, 0x50)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(1, 0x51)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(2, 0x52)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(3, 0x53)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(4, 0x54)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(5, 0x55)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(6, 0x56)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(7, 0x57)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(8, 0x58)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(9, 0x59)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(10, 0x5a)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(11, 0x5b)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(12, 0x5c)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(13, 0x5d)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(14, 0x5e)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(15, 0x5f)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(16, 0x60)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(17, 0x61)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(18, 0x62)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(19, 0x63)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(20, 0x64)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(21, 0x65)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(22, 0x66)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(23, 0x67)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(24, 0x68)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(25, 0x69)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(26, 0x6a)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(27, 0x6b)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(28, 0x6c)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(29, 0x6d)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(30, 0x6e)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(31, 0x6f)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(32, 0x70)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(33, 0x71)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(34, 0x72)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(35, 0x73)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(36, 0x74)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(37, 0x75)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(38, 0x76)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(39, 0x77)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(40, 0x78)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(41, 0x79)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(42, 0x7a)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(43, 0x7b)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(44, 0x7c)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(45, 0x7d)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(46, 0x7e)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(47, 0x7f)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(48, 0x80)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(49, 0x81)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(50, 0x82)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(51, 0x83)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(52, 0x84)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(53, 0x85)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(54, 0x86)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(55, 0x87)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(56, 0x88)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(57, 0x89)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(58, 0x8a)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(59, 0x8b)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(60, 0x8c)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(61, 0x8d)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(62, 0x8e)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(63, 0x8f)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(64, 0x90)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(65, 0x91)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(66, 0x92)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(67, 0x93)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(68, 0x94)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(69, 0x95)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(70, 0x96)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(71, 0x97)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(72, 0x98)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(73, 0x99)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(74, 0x9a)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(75, 0x9b)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(76, 0x9c)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(77, 0x9d)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(78, 0x9e)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(79, 0x9f)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(80, 0xa0)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(81, 0xa1)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(82, 0xa2)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(83, 0xa3)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(84, 0xa4)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(85, 0xa5)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(86, 0xa6)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(87, 0xa7)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(88, 0xa8)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(89, 0xa9)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(90, 0xaa)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(91, 0xab)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(92, 0xac)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(93, 0xad)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(94, 0xae)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(95, 0xaf)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(96, 0xb0)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(97, 0xb1)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(98, 0xb2)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(99, 0xb3)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(100, 0xb4)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(101, 0xb5)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(102, 0xb6)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(103, 0xb7)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(104, 0xb8)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(105, 0xb9)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(106, 0xba)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(107, 0xbb)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(108, 0xbc)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(109, 0xbd)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(110, 0xbe)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(111, 0xbf)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(112, 0xc0)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(113, 0xc1)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(114, 0xc2)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(115, 0xc3)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(116, 0xc4)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(117, 0xc5)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(118, 0xc6)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(119, 0xc7)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(120, 0xc8)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(121, 0xc9)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(122, 0xca)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(123, 0xcb)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(124, 0xcc)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(125, 0xcd)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(126, 0xce)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(127, 0xcf)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(128, 0xd0)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(129, 0xd1)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(130, 0xd2)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(131, 0xd3)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(132, 0xd4)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(133, 0xd5)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(134, 0xd6)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(135, 0xd7)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(136, 0xd8)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(137, 0xd9)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(138, 0xda)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(139, 0xdb)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(140, 0xdc)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(141, 0xdd)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(142, 0xde)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(143, 0xdf)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(144, 0xe0)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(145, 0xe1)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(146, 0xe2)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(147, 0xe3)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(148, 0xe4)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(149, 0xe5)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(150, 0xe6)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(151, 0xe7)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(152, 0xe8)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(153, 0xe9)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(154, 0xea)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(155, 0xeb)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(156, 0xec)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(157, 0xed)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(158, 0xee)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(159, 0xef)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(160, 0xf0)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(161, 0xf1)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(162, 0xf2)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(163, 0xf3)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(164, 0xf4)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(165, 0xf5)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(166, 0xf6)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(167, 0xf7)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(168, 0xf8)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(169, 0xf9)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(170, 0xfa)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(171, 0xfb)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(172, 0xfc)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(173, 0xfd)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(174, 0xfe)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(175, 0xff)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(176, 0x100)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(177, 0x101)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(178, 0x102)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(179, 0x103)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(180, 0x104)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(181, 0x105)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(182, 0x106)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(183, 0x107)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(184, 0x108)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(185, 0x109)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(186, 0x10a)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(187, 0x10b)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(188, 0x10c)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(189, 0x10d)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(190, 0x10e)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(191, 0x10f)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(192, 0x110)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(193, 0x111)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(194, 0x112)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(195, 0x113)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(196, 0x114)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(197, 0x115)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(198, 0x116)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(199, 0x117)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(200, 0x118)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(201, 0x119)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(202, 0x11a)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(203, 0x11b)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(204, 0x11c)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(205, 0x11d)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(206, 0x11e)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(207, 0x11f)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(208, 0x120)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(209, 0x121)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(210, 0x122)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(211, 0x123)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(212, 0x124)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(213, 0x125)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(214, 0x126)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(215, 0x127)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(216, 0x128)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(217, 0x129)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(218, 0x12a)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(219, 0x12b)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(220, 0x12c)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(221, 0x12d)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(222, 0x12e)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(223, 0x12f)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(224, 0x130)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(225, 0x131)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(226, 0x132)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(227, 0x133)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(228, 0x134)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(229, 0x135)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(230, 0x136)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(231, 0x137)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(232, 0x138)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(233, 0x139)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(234, 0x13a)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(235, 0x13b)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(236, 0x13c)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(237, 0x13d)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(238, 0x13e)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(239, 0x13f)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(240, 0x140)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(241, 0x141)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(242, 0x142)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(243, 0x143)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(244, 0x144)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(245, 0x145)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(246, 0x146)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(247, 0x147)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(248, 0x148)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(249, 0x149)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(250, 0x14a)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(251, 0x14b)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(252, 0x14c)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(253, 0x14d)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(254, 0x14e)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(255, 0x14f)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(256, 0x150)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(80)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(81)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(82)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(83)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(84)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(85)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(86)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(87)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(88)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(89)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(90)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(91)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(92)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(93)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(94)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(95)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(96)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(97)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(98)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(99)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(100)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(101)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(102)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(103)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(104)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(105)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(106)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(107)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(108)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(109)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(110)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(111)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(112)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(113)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(114)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(115)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(116)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(117)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(118)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(119)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(120)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(121)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(122)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(123)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(124)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(125)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(126)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(127)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(128)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(129)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(130)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(131)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(132)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(133)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(134)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(135)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(136)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(137)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(138)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(139)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(140)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(141)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(142)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(143)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(144)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(145)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(146)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(147)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(148)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(149)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(150)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(151)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(152)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(153)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(154)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(155)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(156)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(157)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(158)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(159)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(160)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(161)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(162)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(163)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(164)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(165)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(166)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(167)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(168)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(169)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(170)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(171)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(172)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(173)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(174)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(175)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(176)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(177)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(178)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(179)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(180)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(181)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(182)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(183)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(184)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(185)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(186)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(187)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(188)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(189)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(190)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(191)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(192)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(193)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(194)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(195)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(196)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(197)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(198)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(199)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(200)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(201)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(202)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(203)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(204)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(205)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(206)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(207)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(208)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(209)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(210)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(211)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(212)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(213)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(214)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(215)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(216)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(217)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(218)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(219)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(220)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(221)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(222)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(223)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(224)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(225)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(226)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(227)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(228)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(229)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(230)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(231)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(232)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(233)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(234)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(235)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(236)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(237)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(238)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(239)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(240)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(241)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(242)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(243)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(244)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(245)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(246)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(247)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(248)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(249)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(250)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(251)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(252)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(253)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(254)
GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(255)

View file

@ -32,12 +32,12 @@
extern "C" void interrupt_common_asm_entry();
#define GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(interrupt_vector, isr_number) \
extern "C" void interrupt_##interrupt_vector##_asm_entry(); \
asm(".globl interrupt_" #interrupt_vector "_asm_entry\n" \
"interrupt_" #interrupt_vector "_asm_entry:\n" \
" pushw $" #isr_number "\n" \
" pushw $0\n" \
#define GENERATE_GENERIC_INTERRUPT_HANDLER_ASM_ENTRY(isr_number) \
extern "C" void interrupt_##isr_number##_asm_entry(); \
asm(".globl interrupt_" #isr_number "_asm_entry\n" \
"interrupt_" #isr_number "_asm_entry:\n" \
" pushw $" #isr_number "\n" \
" pushw $0\n" \
" jmp interrupt_common_asm_entry\n");
asm(

View file

@ -42,6 +42,7 @@ class ProcessorInfo
u32 m_display_family;
u32 m_stepping;
u32 m_type;
u32 m_apic_id;
public:
ProcessorInfo(Processor& processor);
@ -53,6 +54,9 @@ public:
u32 display_family() const { return m_display_family; }
u32 stepping() const { return m_stepping; }
u32 type() const { return m_type; }
u32 apic_id() const { return m_apic_id; }
void set_apic_id(u32 apic_id) { m_apic_id = apic_id; }
};
}

View file

@ -1129,7 +1129,7 @@ unsigned Ext2FS::find_a_free_inode(GroupIndex preferred_group, off_t expected_si
LOCKER(m_lock);
#ifdef EXT2_DEBUG
dbg() << "Ext2FS: find_a_free_inode(preferred_group: " << preferred_group << ", expected_size: " << expected_size) << ")";
dbg() << "Ext2FS: find_a_free_inode(preferred_group: " << preferred_group << ", expected_size: " << expected_size << ")";
#endif
unsigned needed_blocks = ceil_div(static_cast<size_t>(expected_size), block_size());

View file

@ -148,7 +148,7 @@ void* kmalloc_impl(size_t size)
if (g_kmalloc_bytes_free < real_size) {
Kernel::dump_backtrace();
klog() << "kmalloc(): PANIC! Out of memory\nsum_free=" << g_kmalloc_bytes_free << ", real_size=" << real_size;
Kernel::hang();
Processor::halt();
}
size_t chunks_needed = (real_size + CHUNK_SIZE - 1) / CHUNK_SIZE;
@ -167,7 +167,7 @@ void* kmalloc_impl(size_t size)
if (!first_chunk.has_value()) {
klog() << "kmalloc(): PANIC! Out of memory (no suitable block for size " << size << ")";
Kernel::dump_backtrace();
Kernel::hang();
Processor::halt();
}
return kmalloc_allocate(first_chunk.value(), chunks_needed);

View file

@ -30,6 +30,7 @@
#include <AK/Types.h>
#include <Kernel/ACPI/Parser.h>
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Arch/i386/ProcessorInfo.h>
#include <Kernel/IO.h>
#include <Kernel/Interrupts/APIC.h>
#include <Kernel/Interrupts/SpuriousInterruptHandler.h>
@ -39,8 +40,15 @@
#include <Kernel/VM/TypedMapping.h>
//#define APIC_DEBUG
//#define APIC_SMP_DEBUG
#define IRQ_APIC_SPURIOUS 0x7f
#define IRQ_APIC_IPI (0xfd - IRQ_VECTOR_BASE)
#define IRQ_APIC_ERR (0xfe - IRQ_VECTOR_BASE)
#define IRQ_APIC_SPURIOUS (0xff - IRQ_VECTOR_BASE)
#define APIC_ICR_DELIVERY_PENDING (1 << 12)
#define APIC_ENABLED (1 << 8)
#define APIC_BASE_MSR 0x1b
@ -60,7 +68,67 @@
namespace Kernel {
static APIC *s_apic;
static APIC* s_apic;
class APICIPIInterruptHandler final : public GenericInterruptHandler {
public:
explicit APICIPIInterruptHandler(u8 interrupt_vector)
: GenericInterruptHandler(interrupt_vector, true)
{
}
virtual ~APICIPIInterruptHandler()
{
}
static void initialize(u8 interrupt_number)
{
new APICIPIInterruptHandler(interrupt_number);
}
virtual void handle_interrupt(const RegisterState&) override;
virtual bool eoi() override;
virtual HandlerType type() const override { return HandlerType::IRQHandler; }
virtual const char* purpose() const override { return "IPI Handler"; }
virtual const char* controller() const override { ASSERT_NOT_REACHED(); }
virtual size_t sharing_devices_count() const override { return 0; }
virtual bool is_shared_handler() const override { return false; }
virtual bool is_sharing_with_others() const override { return false; }
private:
};
class APICErrInterruptHandler final : public GenericInterruptHandler {
public:
explicit APICErrInterruptHandler(u8 interrupt_vector)
: GenericInterruptHandler(interrupt_vector, true)
{
}
virtual ~APICErrInterruptHandler()
{
}
static void initialize(u8 interrupt_number)
{
new APICErrInterruptHandler(interrupt_number);
}
virtual void handle_interrupt(const RegisterState&) override;
virtual bool eoi() override;
virtual HandlerType type() const override { return HandlerType::IRQHandler; }
virtual const char* purpose() const override { return "SMP Error Handler"; }
virtual const char* controller() const override { ASSERT_NOT_REACHED(); }
virtual size_t sharing_devices_count() const override { return 0; }
virtual bool is_shared_handler() const override { return false; }
virtual bool is_sharing_with_others() const override { return false; }
private:
};
bool APIC::initialized()
{
@ -79,8 +147,6 @@ void APIC::initialize()
s_apic = new APIC();
}
PhysicalAddress APIC::get_base()
{
u32 lo, hi;
@ -107,6 +173,23 @@ u32 APIC::read_register(u32 offset)
return *reinterpret_cast<volatile u32*>(m_apic_base->vaddr().offset(offset).as_ptr());
}
void APIC::set_lvt(u32 offset, u8 interrupt)
{
write_register(offset, (read_register(offset) & 0xffffffff) | interrupt);
}
void APIC::set_siv(u32 offset, u8 interrupt)
{
write_register(offset, (read_register(offset) & 0xffffffff) | interrupt | APIC_ENABLED);
}
void APIC::wait_for_pending_icr()
{
while ((read_register(APIC_REG_ICR_LOW) & APIC_ICR_DELIVERY_PENDING) != 0) {
IO::delay(200);
}
}
void APIC::write_icr(const ICRReg& icr)
{
write_register(APIC_REG_ICR_HIGH, icr.high());
@ -137,9 +220,9 @@ u8 APIC::spurious_interrupt_vector()
return IRQ_APIC_SPURIOUS;
}
#define APIC_INIT_VAR_PTR(tpe,vaddr,varname) \
#define APIC_INIT_VAR_PTR(tpe, vaddr, varname) \
reinterpret_cast<volatile tpe*>(reinterpret_cast<ptrdiff_t>(vaddr) \
+ reinterpret_cast<ptrdiff_t>(&varname) \
+ reinterpret_cast<ptrdiff_t>(&varname) \
- reinterpret_cast<ptrdiff_t>(&apic_ap_start))
bool APIC::init_bsp()
@ -172,8 +255,6 @@ bool APIC::init_bsp()
return false;
}
u32 processor_cnt = 0;
u32 processor_enabled_cnt = 0;
auto madt = map_typed<ACPI::Structures::MADT>(madt_address);
size_t entry_index = 0;
size_t entries_length = madt->h.length - sizeof(ACPI::Structures::MADT);
@ -184,29 +265,33 @@ bool APIC::init_bsp()
auto* plapic_entry = (const ACPI::Structures::MADTEntries::ProcessorLocalAPIC*)madt_entry;
#ifdef APIC_DEBUG
klog() << "APIC: AP found @ MADT entry " << entry_index << ", Processor Id: " << String::format("%02x", plapic_entry->acpi_processor_id)
<< " APIC Id: " << String::format("%02x", plapic_entry->apic_id) << " Flags: " << String::format("%08x", plapic_entry->flags);
<< " APIC Id: " << String::format("%02x", plapic_entry->apic_id) << " Flags: " << String::format("%08x", plapic_entry->flags);
#endif
processor_cnt++;
m_processor_cnt++;
if ((plapic_entry->flags & 0x1) != 0)
processor_enabled_cnt++;
m_processor_enabled_cnt++;
}
madt_entry = (ACPI::Structures::MADTEntryHeader*)(VirtualAddress(madt_entry).offset(entry_length).get());
entries_length -= entry_length;
entry_index++;
}
if (processor_enabled_cnt < 1)
processor_enabled_cnt = 1;
if (processor_cnt < 1)
processor_cnt = 1;
klog() << "APIC Processors found: " << processor_cnt << ", enabled: " << processor_enabled_cnt;
if (m_processor_enabled_cnt < 1)
m_processor_enabled_cnt = 1;
if (m_processor_cnt < 1)
m_processor_cnt = 1;
enable_bsp();
klog() << "APIC Processors found: " << m_processor_cnt << ", enabled: " << m_processor_enabled_cnt;
enable(0);
return true;
}
void APIC::do_boot_aps()
{
if (m_processor_enabled_cnt > 1) {
u32 aps_to_enable = m_processor_enabled_cnt - 1;
if (processor_enabled_cnt > 1) {
u32 aps_to_enable = processor_enabled_cnt - 1;
// Copy the APIC startup code and variables to P0x00008000
// Also account for the data appended to:
// * aps_to_enable u32 values for ap_cpu_init_stacks
@ -219,17 +304,17 @@ bool APIC::init_bsp()
auto stack_region = MM.allocate_kernel_region(Thread::default_kernel_stack_size, {}, Region::Access::Read | Region::Access::Write, false, true, true);
if (!stack_region) {
klog() << "APIC: Failed to allocate stack for AP #" << i;
return false;
return;
}
stack_region->set_stack(true);
m_apic_ap_stacks.append(stack_region.release_nonnull());
m_apic_ap_stacks.append(move(stack_region));
}
// Store pointers to all stacks for the APs to use
auto ap_stack_array = APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_stacks);
ASSERT(aps_to_enable == m_apic_ap_stacks.size());
for (size_t i = 0; i < aps_to_enable; i++) {
ap_stack_array[i] = m_apic_ap_stacks[i].vaddr().get() + Thread::default_kernel_stack_size;
ap_stack_array[i] = m_apic_ap_stacks[i]->vaddr().get() + Thread::default_kernel_stack_size;
#ifdef APIC_DEBUG
klog() << "APIC: CPU[" << (i + 1) << "] stack at " << VirtualAddress(ap_stack_array[i]);
#endif
@ -237,9 +322,11 @@ bool APIC::init_bsp()
// Allocate Processor structures for all APs and store the pointer to the data
m_ap_processor_info.resize(aps_to_enable);
for (size_t i = 0; i < aps_to_enable; i++)
m_ap_processor_info[i] = make<Processor>();
auto ap_processor_info_array = &ap_stack_array[aps_to_enable];
for (size_t i = 0; i < aps_to_enable; i++) {
ap_processor_info_array[i] = FlatPtr(&m_ap_processor_info.at(i));
ap_processor_info_array[i] = FlatPtr(m_ap_processor_info[i].ptr());
#ifdef APIC_DEBUG
klog() << "APIC: CPU[" << (i + 1) << "] Processor at " << VirtualAddress(ap_processor_info_array[i]);
#endif
@ -248,17 +335,25 @@ bool APIC::init_bsp()
// Store the BSP's CR3 value for the APs to use
*APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr3) = MM.kernel_page_directory().cr3();
// Store the BSP's GDT and IDT for the APs to use
const auto& gdtr = Processor::current().get_gdtr();
*APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_gdtr) = FlatPtr(&gdtr);
const auto& idtr = get_idtr();
*APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_idtr) = FlatPtr(&idtr);
// Store the BSP's CR0 and CR4 values for the APs to use
*APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr0) = read_cr0();
*APIC_INIT_VAR_PTR(u32, apic_startup_region->vaddr().as_ptr(), ap_cpu_init_cr4) = read_cr4();
// Create an idle thread for each processor. We have to do this here
// because we won't be able to send FlushTLB messages, so we have to
// have all memory set up for the threads so that when the APs are
// starting up, they can access all the memory properly
m_ap_idle_threads.resize(aps_to_enable);
for (u32 i = 0; i < aps_to_enable; i++)
m_ap_idle_threads[i] = Scheduler::create_ap_idle_thread(i + 1);
#ifdef APIC_DEBUG
klog() << "APIC: Starting " << aps_to_enable << " AP(s)";
#endif
@ -287,50 +382,153 @@ bool APIC::init_bsp()
}
#ifdef APIC_DEBUG
klog() << "APIC: " << processor_enabled_cnt << " processors are initialized and running";
klog() << "APIC: " << m_processor_enabled_cnt << " processors are initialized and running";
#endif
}
return true;
}
void APIC::enable_bsp()
void APIC::boot_aps()
{
// FIXME: Ensure this method can only be executed by the BSP.
enable(0);
// We split this into another call because do_boot_aps() will cause
// MM calls upon exit, and we don't want to call smp_enable before that
do_boot_aps();
// Enable SMP, which means IPIs may now be sent
Processor::smp_enable();
// Now trigger all APs to continue execution (need to do this after
// the regions have been freed so that we don't trigger IPIs
if (m_processor_enabled_cnt > 1)
m_apic_ap_continue.store(1, AK::MemoryOrder::memory_order_release);
}
void APIC::enable(u32 cpu)
{
if (cpu >= 8) {
// TODO: x2apic support?
klog() << "SMP support is currently limited to 8 CPUs!";
Processor::halt();
}
u32 apic_id = (1u << cpu);
write_register(APIC_REG_LD, (read_register(APIC_REG_LD) & 0x00ffffff) | (apic_id << 24)); // TODO: only if not in x2apic mode
// read it back to make sure it's actually set
apic_id = read_register(APIC_REG_LD) >> 24;
Processor::current().info().set_apic_id(apic_id);
#ifdef APIC_DEBUG
klog() << "Enabling local APIC for cpu #" << cpu;
klog() << "Enabling local APIC for cpu #" << cpu << " apic id: " << apic_id;
#endif
if (cpu == 0) {
// dummy read, apparently to avoid a bug in old CPUs.
read_register(APIC_REG_SIV);
// set spurious interrupt vector
write_register(APIC_REG_SIV, (IRQ_APIC_SPURIOUS + IRQ_VECTOR_BASE) | 0x100);
// local destination mode (flat mode)
write_register(APIC_REG_DF, 0xf0000000);
// set destination id (note that this limits it to 8 cpus)
write_register(APIC_REG_LD, 0);
SpuriousInterruptHandler::initialize(IRQ_APIC_SPURIOUS);
write_register(APIC_REG_LVT_TIMER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_THERMAL, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_PERFORMANCE_COUNTER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_LINT0, APIC_LVT(0, 7) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_LINT1, APIC_LVT(0, 0) | APIC_LVT_TRIGGER_LEVEL);
write_register(APIC_REG_LVT_ERR, APIC_LVT(0, 0) | APIC_LVT_MASKED);
// set error interrupt vector
set_lvt(APIC_REG_LVT_ERR, IRQ_APIC_ERR);
APICErrInterruptHandler::initialize(IRQ_APIC_ERR);
write_register(APIC_REG_TPR, 0);
} else {
// register IPI interrupt vector
APICIPIInterruptHandler::initialize(IRQ_APIC_IPI);
}
// set spurious interrupt vector
set_siv(APIC_REG_SIV, IRQ_APIC_SPURIOUS);
// local destination mode (flat mode)
write_register(APIC_REG_DF, 0xf0000000);
write_register(APIC_REG_LVT_TIMER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_THERMAL, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_PERFORMANCE_COUNTER, APIC_LVT(0, 0) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_LINT0, APIC_LVT(0, 7) | APIC_LVT_MASKED);
write_register(APIC_REG_LVT_LINT1, APIC_LVT(0, 0) | APIC_LVT_TRIGGER_LEVEL);
write_register(APIC_REG_TPR, 0);
if (cpu > 0) {
// Notify the BSP that we are done initializing. It will unmap the startup data at P8000
m_apic_ap_count++;
m_apic_ap_count.fetch_add(1, AK::MemoryOrder::memory_order_acq_rel);
#ifdef APIC_DEBUG
klog() << "APIC: cpu #" << cpu << " initialized, waiting for all others";
#endif
// The reason we're making all APs wait until the BSP signals them is that
// we don't want APs to trigger IPIs (e.g. through MM) while the BSP
// is unable to process them
while (!m_apic_ap_continue.load(AK::MemoryOrder::memory_order_consume)) {
IO::delay(200);
}
// boot_aps() freed memory, so we need to update our tlb
Processor::flush_entire_tlb_local();
}
}
Thread* APIC::get_idle_thread(u32 cpu) const
{
ASSERT(cpu > 0);
return m_ap_idle_threads[cpu - 1];
}
void APIC::init_finished(u32 cpu)
{
// This method is called once the boot stack is no longer needed
ASSERT(cpu > 0);
ASSERT(cpu <= m_apic_ap_count);
ASSERT(m_apic_ap_stacks[cpu - 1]);
#ifdef APIC_DEBUG
klog() << "APIC: boot stack for for cpu #" << cpu << " no longer needed";
#endif
m_apic_ap_stacks[cpu - 1].clear();
}
void APIC::broadcast_ipi()
{
#ifdef APIC_SMP_DEBUG
klog() << "SMP: Broadcast IPI from cpu #" << Processor::current().id();
#endif
wait_for_pending_icr();
write_icr(ICRReg(IRQ_APIC_IPI + IRQ_VECTOR_BASE, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::AllExcludingSelf));
}
void APIC::send_ipi(u32 cpu)
{
auto& proc = Processor::current();
#ifdef APIC_SMP_DEBUG
klog() << "SMP: Send IPI from cpu #" << proc.id() << " to cpu #" << cpu;
#endif
ASSERT(cpu != proc.id());
ASSERT(cpu < 8);
wait_for_pending_icr();
write_icr(ICRReg(IRQ_APIC_IPI + IRQ_VECTOR_BASE, ICRReg::Fixed, ICRReg::Logical, ICRReg::Assert, ICRReg::TriggerMode::Edge, ICRReg::NoShorthand, 1u << cpu));
}
void APICIPIInterruptHandler::handle_interrupt(const RegisterState&)
{
#ifdef APIC_SMP_DEBUG
klog() << "APIC IPI on cpu #" << Processor::current().id();
#endif
}
bool APICIPIInterruptHandler::eoi()
{
#ifdef APIC_SMP_DEBUG
klog() << "SMP: IPI eoi";
#endif
APIC::the().eoi();
return true;
}
void APICErrInterruptHandler::handle_interrupt(const RegisterState&)
{
klog() << "APIC: SMP error on cpu #" << Processor::current().id();
}
bool APICErrInterruptHandler::eoi()
{
APIC::the().eoi();
return true;
}
}

View file

@ -27,11 +27,14 @@
#pragma once
#include <AK/Types.h>
#include <AK/NonnullOwnPtrVector.h>
#include <Kernel/VM/MemoryManager.h>
namespace Kernel {
struct LocalAPIC {
u32 apic_id;
};
class APIC {
public:
static APIC& the();
@ -39,14 +42,20 @@ public:
static bool initialized();
bool init_bsp();
void enable_bsp();
void eoi();
void boot_aps();
void enable(u32 cpu);
void init_finished(u32 cpu);
void broadcast_ipi();
void send_ipi(u32 cpu);
static u8 spurious_interrupt_vector();
Thread* get_idle_thread(u32 cpu) const;
u32 enabled_processor_count() const { return m_processor_enabled_cnt; }
private:
class ICRReg {
u32 m_reg { 0 };
u32 m_low { 0 };
u32 m_high { 0 };
public:
enum DeliveryMode {
@ -76,25 +85,34 @@ private:
AllExcludingSelf = 0x3,
};
ICRReg(u8 vector, DeliveryMode delivery_mode, DestinationMode destination_mode, Level level, TriggerMode trigger_mode, DestinationShorthand destination)
: m_reg(vector | (delivery_mode << 8) | (destination_mode << 11) | (level << 14) | (static_cast<u32>(trigger_mode) << 15) | (destination << 18))
ICRReg(u8 vector, DeliveryMode delivery_mode, DestinationMode destination_mode, Level level, TriggerMode trigger_mode, DestinationShorthand destinationShort, u8 destination = 0)
: m_low(vector | (delivery_mode << 8) | (destination_mode << 11) | (level << 14) | (static_cast<u32>(trigger_mode) << 15) | (destinationShort << 18)),
m_high((u32)destination << 24)
{
}
u32 low() const { return m_reg; }
u32 high() const { return 0; }
u32 low() const { return m_low; }
u32 high() const { return m_high; }
};
OwnPtr<Region> m_apic_base;
NonnullOwnPtrVector<Region> m_apic_ap_stacks;
Vector<Processor> m_ap_processor_info;
AK::Atomic<u32> m_apic_ap_count{0};
Vector<OwnPtr<Region>> m_apic_ap_stacks;
Vector<OwnPtr<Processor>> m_ap_processor_info;
Vector<Thread*> m_ap_idle_threads;
AK::Atomic<u8> m_apic_ap_count{0};
AK::Atomic<u8> m_apic_ap_continue{0};
u32 m_processor_cnt{0};
u32 m_processor_enabled_cnt{0};
static PhysicalAddress get_base();
static void set_base(const PhysicalAddress& base);
void write_register(u32 offset, u32 value);
u32 read_register(u32 offset);
void set_lvt(u32 offset, u8 interrupt);
void set_siv(u32 offset, u8 interrupt);
void wait_for_pending_icr();
void write_icr(const ICRReg& icr);
void do_boot_aps();
};
}

View file

@ -36,20 +36,28 @@ GenericInterruptHandler& GenericInterruptHandler::from(u8 interrupt_number)
return get_interrupt_handler(interrupt_number);
}
GenericInterruptHandler::GenericInterruptHandler(u8 interrupt_number)
: m_interrupt_number(interrupt_number)
GenericInterruptHandler::GenericInterruptHandler(u8 interrupt_number, bool disable_remap)
: m_interrupt_number(interrupt_number),
m_disable_remap(disable_remap)
{
register_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(m_interrupt_number), *this);
if (m_disable_remap)
register_generic_interrupt_handler(m_interrupt_number, *this);
else
register_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(m_interrupt_number), *this);
}
GenericInterruptHandler::~GenericInterruptHandler()
{
unregister_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(m_interrupt_number), *this);
if (m_disable_remap)
unregister_generic_interrupt_handler(m_interrupt_number, *this);
else
unregister_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(m_interrupt_number), *this);
}
void GenericInterruptHandler::change_interrupt_number(u8 number)
{
ASSERT_INTERRUPTS_ENABLED();
ASSERT(!m_disable_remap);
unregister_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(interrupt_number()), *this);
m_interrupt_number = number;
register_generic_interrupt_handler(InterruptManagement::acquire_mapped_interrupt_number(interrupt_number()), *this);

View file

@ -65,11 +65,12 @@ public:
protected:
void change_interrupt_number(u8 number);
explicit GenericInterruptHandler(u8 interrupt_number);
explicit GenericInterruptHandler(u8 interrupt_number, bool disable_remap = false);
private:
size_t m_invoking_count { 0 };
bool m_enabled { false };
u8 m_interrupt_number { 0 };
bool m_disable_remap { false };
};
}

View file

@ -188,11 +188,12 @@ void InterruptManagement::switch_to_ioapic_mode()
dbg() << "Interrupts: Detected " << irq_controller->model();
}
}
APIC::the().init_bsp();
if (auto mp_parser = MultiProcessorParser::autodetect()) {
m_pci_interrupt_overrides = mp_parser->get_pci_interrupt_redirections();
}
APIC::the().init_bsp();
}
void InterruptManagement::locate_apic_data()

View file

@ -35,13 +35,13 @@ UnhandledInterruptHandler::UnhandledInterruptHandler(u8 interrupt_vector)
void UnhandledInterruptHandler::handle_interrupt(const RegisterState&)
{
dbg() << "Interrupt: Unhandled vector " << interrupt_number() << " was invoked for handle_interrupt(RegisterState&).";
hang();
Processor::halt();
}
[[noreturn]] bool UnhandledInterruptHandler::eoi()
{
dbg() << "Interrupt: Unhandled vector " << interrupt_number() << " was invoked for eoi().";
hang();
Processor::halt();
}
UnhandledInterruptHandler::~UnhandledInterruptHandler()

View file

@ -125,7 +125,7 @@ NEVER_INLINE void dump_backtrace_impl(FlatPtr base_pointer, bool use_ksyms)
}
#endif
if (use_ksyms && !g_kernel_symbols_available) {
hang();
Processor::halt();
return;
}

View file

@ -48,7 +48,7 @@ void Lock::lock(Mode mode)
if (!are_interrupts_enabled()) {
klog() << "Interrupts disabled when trying to take Lock{" << m_name << "}";
dump_backtrace();
hang();
Processor::halt();
}
auto current_thread = Thread::current();
for (;;) {

View file

@ -47,7 +47,7 @@ static Access::Type detect_optimal_access_type(bool mmio_allowed)
return Access::Type::IO;
klog() << "No PCI bus access method detected!";
hang();
Processor::halt();
}
void initialize()

View file

@ -1271,7 +1271,7 @@ int Process::exec(String path, Vector<String> arguments, Vector<String> environm
// We need to enter the scheduler lock before changing the state
// and it will be released after the context switch into that
// thread. We should also still be in our critical section
ASSERT(!g_scheduler_lock.is_locked());
ASSERT(!g_scheduler_lock.own_lock());
ASSERT(Processor::current().in_critical() == 1);
g_scheduler_lock.lock();
current_thread->set_state(Thread::State::Running);

View file

@ -566,57 +566,51 @@ void Scheduler::enter_current(Thread& prev_thread)
Process* Scheduler::colonel()
{
ASSERT(s_colonel_process);
return s_colonel_process;
}
void Scheduler::initialize(u32 cpu)
void Scheduler::initialize()
{
static Atomic<u32> s_bsp_is_initialized;
ASSERT(&Processor::current() != nullptr); // sanity check
Thread* idle_thread = nullptr;
if (cpu == 0) {
ASSERT(s_bsp_is_initialized.load(AK::MemoryOrder::memory_order_consume) == 0);
g_scheduler_data = new SchedulerData;
g_finalizer_wait_queue = new WaitQueue;
g_scheduler_data = new SchedulerData;
g_finalizer_wait_queue = new WaitQueue;
g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
s_colonel_process = Process::create_kernel_process(idle_thread, "colonel", idle_loop, 1 << cpu);
ASSERT(s_colonel_process);
ASSERT(idle_thread);
idle_thread->set_priority(THREAD_PRIORITY_MIN);
idle_thread->set_name(String::format("idle thread #%u", cpu));
} else {
// We need to make sure the BSP initialized the global data first
if (s_bsp_is_initialized.load(AK::MemoryOrder::memory_order_consume) == 0) {
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << cpu << "]: waiting for BSP to initialize...";
#endif
while (s_bsp_is_initialized.load(AK::MemoryOrder::memory_order_consume) == 0) {
}
#ifdef SCHEDULER_DEBUG
dbg() << "Scheduler[" << cpu << "]: initializing now";
#endif
}
g_finalizer_has_work.store(false, AK::MemoryOrder::memory_order_release);
s_colonel_process = Process::create_kernel_process(idle_thread, "colonel", idle_loop, 1);
ASSERT(s_colonel_process);
ASSERT(idle_thread);
idle_thread->set_priority(THREAD_PRIORITY_MIN);
idle_thread->set_name("idle thread #0");
ASSERT(s_colonel_process);
idle_thread = s_colonel_process->create_kernel_thread(idle_loop, THREAD_PRIORITY_MIN, String::format("idle thread #%u", cpu), 1 << cpu, false);
ASSERT(idle_thread);
}
set_idle_thread(idle_thread);
}
void Scheduler::set_idle_thread(Thread* idle_thread)
{
Processor::current().set_idle_thread(*idle_thread);
Processor::current().set_current_thread(*idle_thread);
}
if (cpu == 0)
s_bsp_is_initialized.store(1, AK::MemoryOrder::memory_order_release);
Thread* Scheduler::create_ap_idle_thread(u32 cpu)
{
ASSERT(cpu != 0);
// This function is called on the bsp, but creates an idle thread for another AP
ASSERT(Processor::current().id() == 0);
ASSERT(s_colonel_process);
Thread* idle_thread = s_colonel_process->create_kernel_thread(idle_loop, THREAD_PRIORITY_MIN, String::format("idle thread #%u", cpu), 1 << cpu, false);
ASSERT(idle_thread);
return idle_thread;
}
void Scheduler::timer_tick(const RegisterState& regs)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_irq());
if (Processor::current().id() > 0) return;
auto current_thread = Processor::current().current_thread();
if (!current_thread)
return;
@ -664,9 +658,11 @@ void Scheduler::idle_loop()
{
dbg() << "Scheduler[" << Processor::current().id() << "]: idle loop running";
ASSERT(are_interrupts_enabled());
for (;;) {
asm("hlt");
yield();
if (Processor::current().id() == 0) yield();
}
}

View file

@ -51,7 +51,9 @@ extern RecursiveSpinLock g_scheduler_lock;
class Scheduler {
public:
static void initialize(u32 cpu);
static void initialize();
static Thread* create_ap_idle_thread(u32 cpu);
static void set_idle_thread(Thread* idle_thread);
static void timer_tick(const RegisterState&);
[[noreturn]] static void start();
static bool pick_next();

View file

@ -47,6 +47,7 @@ public:
Processor::current().enter_critical(prev_flags);
BaseType expected;
do {
Processor::wait_check();
expected = 0;
} while (!m_lock.compare_exchange_strong(expected, 1, AK::memory_order_acq_rel));
return prev_flags;
@ -90,6 +91,7 @@ public:
while (!m_lock.compare_exchange_strong(expected, cpu, AK::memory_order_acq_rel)) {
if (expected == cpu)
break;
Processor::wait_check();
expected = 0;
}
m_recursions++;
@ -110,6 +112,11 @@ public:
return m_lock.load(AK::memory_order_consume) != 0;
}
ALWAYS_INLINE bool own_lock() const
{
return m_lock.load(AK::memory_order_consume) == FlatPtr(&Processor::current());
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_release);

View file

@ -38,6 +38,7 @@ namespace Kernel {
static u8* s_vga_buffer;
static VirtualConsole* s_consoles[6];
static int s_active_console;
static RecursiveSpinLock s_lock;
void VirtualConsole::flush_vga_cursor()
{
@ -85,7 +86,7 @@ void VirtualConsole::switch_to(unsigned index)
ASSERT(index < 6);
ASSERT(s_consoles[index]);
InterruptDisabler disabler;
ScopedSpinLock lock(s_lock);
if (s_active_console != -1) {
auto* active_console = s_consoles[s_active_console];
// We won't know how to switch away from a graphical console until we
@ -107,7 +108,7 @@ void VirtualConsole::set_active(bool active)
if (active == m_active)
return;
InterruptDisabler disabler;
ScopedSpinLock lock(s_lock);
m_active = active;
@ -240,7 +241,7 @@ void VirtualConsole::on_key_pressed(KeyboardDevice::Event event)
ssize_t VirtualConsole::on_tty_write(const u8* data, ssize_t size)
{
InterruptDisabler disabler;
ScopedSpinLock lock(s_lock);
for (ssize_t i = 0; i < size; ++i)
m_terminal.on_input(data[i]);
if (m_active)

View file

@ -196,7 +196,7 @@ void Thread::die_if_needed()
// actual context switch
u32 prev_flags;
Processor::current().clear_critical(prev_flags, false);
dbg() << "die_if_needed returned form clear_critical!!! in irq: " << Processor::current().in_irq();
// We should never get here, but the scoped scheduler lock
// will be released by Scheduler::context_switch again
ASSERT_NOT_REACHED();

View file

@ -51,7 +51,7 @@ extern FlatPtr end_of_kernel_bss;
namespace Kernel {
static MemoryManager* s_the;
RecursiveSpinLock MemoryManager::s_lock;
RecursiveSpinLock s_mm_lock;
MemoryManager& MM
{
@ -165,7 +165,7 @@ void MemoryManager::parse_memory_map()
const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, VirtualAddress vaddr)
{
ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@ -181,7 +181,7 @@ const PageTableEntry* MemoryManager::pte(const PageDirectory& page_directory, Vi
PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
{
ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x3;
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
@ -221,7 +221,7 @@ void MemoryManager::initialize(u32 cpu)
Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
for (auto& region : MM.m_kernel_regions) {
if (region.contains(vaddr))
return &region;
@ -231,7 +231,7 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
Region* MemoryManager::user_region_from_vaddr(Process& process, VirtualAddress vaddr)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
// FIXME: Use a binary search tree (maybe red/black?) or some other more appropriate data structure!
for (auto& region : process.m_regions) {
if (region.contains(vaddr))
@ -245,7 +245,7 @@ Region* MemoryManager::user_region_from_vaddr(Process& process, VirtualAddress v
Region* MemoryManager::region_from_vaddr(Process& process, VirtualAddress vaddr)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
if (auto* region = user_region_from_vaddr(process, vaddr))
return region;
return kernel_region_from_vaddr(vaddr);
@ -253,7 +253,7 @@ Region* MemoryManager::region_from_vaddr(Process& process, VirtualAddress vaddr)
const Region* MemoryManager::region_from_vaddr(const Process& process, VirtualAddress vaddr)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
if (auto* region = user_region_from_vaddr(const_cast<Process&>(process), vaddr))
return region;
return kernel_region_from_vaddr(vaddr);
@ -261,7 +261,7 @@ const Region* MemoryManager::region_from_vaddr(const Process& process, VirtualAd
Region* MemoryManager::region_from_vaddr(VirtualAddress vaddr)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
if (auto* region = kernel_region_from_vaddr(vaddr))
return region;
auto page_directory = PageDirectory::find_by_cr3(read_cr3());
@ -275,7 +275,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Thread::current() != nullptr);
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
if (Processor::current().in_irq()) {
dbg() << "CPU[" << Processor::current().id() << "] BUG! Page fault while handling IRQ! code=" << fault.code() << ", vaddr=" << fault.vaddr() << ", irq level: " << Processor::current().in_irq();
dump_kernel_regions();
@ -296,7 +296,7 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.is_valid())
return nullptr;
@ -310,7 +310,7 @@ OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, con
OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, bool should_commit, bool cacheable)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.is_valid())
return nullptr;
@ -326,7 +326,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, const StringVi
OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.is_valid())
return nullptr;
@ -339,7 +339,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size);
if (!range.is_valid())
return nullptr;
@ -356,7 +356,7 @@ OwnPtr<Region> MemoryManager::allocate_user_accessible_kernel_region(size_t size
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, const StringView& name, u8 access, bool user_accessible, bool cacheable)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
OwnPtr<Region> region;
if (user_accessible)
region = Region::create_user_accessible(range, vmobject, 0, name, access, cacheable);
@ -370,7 +370,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range&
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
if (!range.is_valid())
return nullptr;
@ -379,7 +379,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
for (auto& region : m_user_physical_regions) {
if (!region.contains(page)) {
klog() << "MM: deallocate_user_physical_page: " << page.paddr() << " not in " << region.lower() << " -> " << region.upper();
@ -398,7 +398,7 @@ void MemoryManager::deallocate_user_physical_page(PhysicalPage&& page)
RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page()
{
ASSERT(s_lock.is_locked());
ASSERT(s_mm_lock.is_locked());
RefPtr<PhysicalPage> page;
for (auto& region : m_user_physical_regions) {
page = region.take_free_page(false);
@ -410,7 +410,7 @@ RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page()
RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
auto page = find_free_user_physical_page();
if (!page) {
@ -449,7 +449,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage&& page)
{
ASSERT(s_lock.is_locked());
ASSERT(s_mm_lock.is_locked());
for (auto& region : m_super_physical_regions) {
if (!region.contains(page)) {
klog() << "MM: deallocate_supervisor_physical_page: " << page.paddr() << " not in " << region.lower() << " -> " << region.upper();
@ -468,7 +468,7 @@ void MemoryManager::deallocate_supervisor_physical_page(PhysicalPage&& page)
NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size)
{
ASSERT(!(size % PAGE_SIZE));
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
size_t count = ceil_div(size, PAGE_SIZE);
NonnullRefPtrVector<PhysicalPage> physical_pages;
@ -496,7 +496,7 @@ NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_
RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
RefPtr<PhysicalPage> page;
for (auto& region : m_super_physical_regions) {
@ -528,33 +528,33 @@ void MemoryManager::enter_process_paging_scope(Process& process)
{
auto current_thread = Thread::current();
ASSERT(current_thread != nullptr);
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
current_thread->tss().cr3 = process.page_directory().cr3();
write_cr3(process.page_directory().cr3());
}
void MemoryManager::flush_entire_tlb()
{
write_cr3(read_cr3());
}
void MemoryManager::flush_tlb(VirtualAddress vaddr)
void MemoryManager::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
{
#ifdef MM_DEBUG
dbg() << "MM: Flush page " << vaddr;
dbg() << "MM: Flush " << page_count << " pages at " << vaddr << " on CPU#" << Processor::current().id();
#endif
asm volatile("invlpg %0"
:
: "m"(*(char*)vaddr.get())
: "memory");
Processor::flush_tlb_local(vaddr, page_count);
}
void MemoryManager::flush_tlb(VirtualAddress vaddr, size_t page_count)
{
#ifdef MM_DEBUG
dbg() << "MM: Flush " << page_count << " pages at " << vaddr;
#endif
Processor::flush_tlb(vaddr, page_count);
}
extern "C" PageTableEntry boot_pd3_pt1023[1024];
PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
auto& pte = boot_pd3_pt1023[4];
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
if (pte.physical_page_base() != pd_paddr.as_ptr()) {
@ -572,7 +572,7 @@ PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t
PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
auto& pte = boot_pd3_pt1023[0];
if (pte.physical_page_base() != pt_paddr.as_ptr()) {
#ifdef MM_DEBUG
@ -592,7 +592,7 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
ASSERT_INTERRUPTS_DISABLED();
auto& mm_data = get_data();
mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
u32 pte_idx = 8 + Processor::current().id();
VirtualAddress vaddr(0xffe00000 + pte_idx * PAGE_SIZE);
@ -606,7 +606,7 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
pte.set_present(true);
pte.set_writable(true);
pte.set_user_allowed(false);
flush_tlb(vaddr);
flush_tlb_local(vaddr, 1);
}
return vaddr.as_ptr();
}
@ -614,21 +614,21 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
void MemoryManager::unquickmap_page()
{
ASSERT_INTERRUPTS_DISABLED();
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
auto& mm_data = get_data();
ASSERT(mm_data.m_quickmap_in_use.is_locked());
u32 pte_idx = 8 + Processor::current().id();
VirtualAddress vaddr(0xffe00000 + pte_idx * PAGE_SIZE);
auto& pte = boot_pd3_pt1023[pte_idx];
pte.clear();
flush_tlb(vaddr);
flush_tlb_local(vaddr, 1);
mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);
}
template<MemoryManager::AccessSpace space, MemoryManager::AccessType access_type>
bool MemoryManager::validate_range(const Process& process, VirtualAddress base_vaddr, size_t size) const
{
ASSERT(s_lock.is_locked());
ASSERT(s_mm_lock.is_locked());
ASSERT(size);
if (base_vaddr > base_vaddr.offset(size)) {
dbg() << "Shenanigans! Asked to validate wrappy " << base_vaddr << " size=" << size;
@ -664,14 +664,14 @@ bool MemoryManager::validate_user_stack(const Process& process, VirtualAddress v
{
if (!is_user_address(vaddr))
return false;
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
auto* region = user_region_from_vaddr(const_cast<Process&>(process), vaddr);
return region && region->is_user_accessible() && region->is_stack();
}
bool MemoryManager::validate_kernel_read(const Process& process, VirtualAddress vaddr, size_t size) const
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
return validate_range<AccessSpace::Kernel, AccessType::Read>(process, vaddr, size);
}
@ -679,7 +679,7 @@ bool MemoryManager::can_read_without_faulting(const Process& process, VirtualAdd
{
// FIXME: Use the size argument!
UNUSED_PARAM(size);
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
auto* pte = const_cast<MemoryManager*>(this)->pte(process.page_directory(), vaddr);
if (!pte)
return false;
@ -690,7 +690,7 @@ bool MemoryManager::validate_user_read(const Process& process, VirtualAddress va
{
if (!is_user_address(vaddr))
return false;
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
return validate_range<AccessSpace::User, AccessType::Read>(process, vaddr, size);
}
@ -698,25 +698,25 @@ bool MemoryManager::validate_user_write(const Process& process, VirtualAddress v
{
if (!is_user_address(vaddr))
return false;
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
return validate_range<AccessSpace::User, AccessType::Write>(process, vaddr, size);
}
void MemoryManager::register_vmobject(VMObject& vmobject)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
m_vmobjects.append(&vmobject);
}
void MemoryManager::unregister_vmobject(VMObject& vmobject)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
m_vmobjects.remove(&vmobject);
}
void MemoryManager::register_region(Region& region)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
if (region.is_kernel())
m_kernel_regions.append(&region);
else
@ -725,7 +725,7 @@ void MemoryManager::register_region(Region& region)
void MemoryManager::unregister_region(Region& region)
{
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
if (region.is_kernel())
m_kernel_regions.remove(&region);
else
@ -736,7 +736,7 @@ void MemoryManager::dump_kernel_regions()
{
klog() << "Kernel regions:";
klog() << "BEGIN END SIZE ACCESS NAME";
ScopedSpinLock lock(s_lock);
ScopedSpinLock lock(s_mm_lock);
for (auto& region : MM.m_kernel_regions) {
klog() << String::format("%08x", region.vaddr().get()) << " -- " << String::format("%08x", region.vaddr().offset(region.size() - 1).get()) << " " << String::format("%08x", region.size()) << " " << (region.is_readable() ? 'R' : ' ') << (region.is_writable() ? 'W' : ' ') << (region.is_executable() ? 'X' : ' ') << (region.is_shared() ? 'S' : ' ') << (region.is_stack() ? 'T' : ' ') << (region.vmobject().is_purgeable() ? 'P' : ' ') << " " << region.name().characters();
}

View file

@ -72,6 +72,8 @@ struct MemoryManagerData {
u32 m_quickmap_prev_flags;
};
extern RecursiveSpinLock s_mm_lock;
class MemoryManager {
AK_MAKE_ETERNAL
friend class PageDirectory;
@ -176,8 +178,8 @@ private:
void detect_cpu_features();
void protect_kernel_image();
void parse_memory_map();
void flush_entire_tlb();
void flush_tlb(VirtualAddress);
static void flush_tlb_local(VirtualAddress, size_t page_count = 1);
static void flush_tlb(VirtualAddress, size_t page_count = 1);
static Region* user_region_from_vaddr(Process&, VirtualAddress);
static Region* kernel_region_from_vaddr(VirtualAddress);
@ -212,8 +214,6 @@ private:
InlineLinkedList<VMObject> m_vmobjects;
static RecursiveSpinLock s_lock;
RefPtr<PhysicalPage> m_low_pseudo_identity_mapping_pages[4];
};

View file

@ -57,7 +57,7 @@ Region::~Region()
// Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
// Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would
// find the address<->region mappings in an invalid state there.
InterruptDisabler disabler;
ScopedSpinLock lock(s_mm_lock);
if (m_page_directory) {
unmap(ShouldDeallocateVirtualMemoryRange::Yes);
ASSERT(!m_page_directory);
@ -117,7 +117,7 @@ NonnullOwnPtr<Region> Region::clone()
bool Region::commit()
{
InterruptDisabler disabler;
ScopedSpinLock lock(s_mm_lock);
#ifdef MM_DEBUG
dbg() << "MM: Commit " << page_count() << " pages in Region " << this << " (VMO=" << &vmobject() << ") at " << vaddr();
#endif
@ -131,7 +131,7 @@ bool Region::commit()
bool Region::commit(size_t page_index)
{
ASSERT(vmobject().is_anonymous() || vmobject().is_purgeable());
InterruptDisabler disabler;
ScopedSpinLock lock(s_mm_lock);
auto& vmobject_physical_page_entry = physical_page_slot(page_index);
if (!vmobject_physical_page_entry.is_null() && !vmobject_physical_page_entry->is_shared_zero_page())
return true;
@ -250,14 +250,14 @@ void Region::map_individual_page_impl(size_t page_index)
void Region::remap_page(size_t page_index)
{
ASSERT(m_page_directory);
InterruptDisabler disabler;
ScopedSpinLock lock(s_mm_lock);
ASSERT(physical_page(page_index));
map_individual_page_impl(page_index);
}
void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
{
InterruptDisabler disabler;
ScopedSpinLock lock(s_mm_lock);
ASSERT(m_page_directory);
for (size_t i = 0; i < page_count(); ++i) {
auto vaddr = this->vaddr().offset(i * PAGE_SIZE);
@ -281,13 +281,13 @@ void Region::unmap(ShouldDeallocateVirtualMemoryRange deallocate_range)
void Region::set_page_directory(PageDirectory& page_directory)
{
ASSERT(!m_page_directory || m_page_directory == &page_directory);
InterruptDisabler disabler;
ScopedSpinLock lock(s_mm_lock);
m_page_directory = page_directory;
}
void Region::map(PageDirectory& page_directory)
{
set_page_directory(page_directory);
InterruptDisabler disabler;
ScopedSpinLock lock(s_mm_lock);
#ifdef MM_DEBUG
dbg() << "MM: Region::map() will map VMO pages " << first_page_index() << " - " << last_page_index() << " (VMO page count: " << vmobject().page_count() << ")";
#endif

View file

@ -148,7 +148,7 @@ extern "C" [[noreturn]] void init()
VirtualConsole::switch_to(0);
Process::initialize();
Scheduler::initialize(0);
Scheduler::initialize();
Thread* init_stage2_thread = nullptr;
Process::create_kernel_process(init_stage2_thread, "init_stage2", init_stage2);
@ -169,17 +169,39 @@ extern "C" [[noreturn]] void init_ap(u32 cpu, Processor* processor_info)
klog() << "CPU #" << cpu << " processor_info at " << VirtualAddress(FlatPtr(processor_info));
processor_info->initialize(cpu);
APIC::the().enable(cpu);
MemoryManager::initialize(cpu);
APIC::the().enable(cpu);
Scheduler::set_idle_thread(APIC::the().get_idle_thread(cpu));
Scheduler::initialize(cpu);
Scheduler::start();
ASSERT_NOT_REACHED();
}
//
// This method is called once a CPU enters the scheduler and its idle thread
// At this point the initial boot stack can be freed
//
extern "C" void init_finished(u32 cpu)
{
klog() << "CPU #" << cpu << " finished initialization";
if (cpu == 0) {
// TODO: we can reuse the boot stack, maybe for kmalloc()?
} else {
APIC::the().init_finished(cpu);
}
}
void init_stage2()
{
if (APIC::initialized() && APIC::the().enabled_processor_count() > 1) {
// We can't start the APs until we have a scheduler up and running.
// We need to be able to process ICI messages, otherwise another
// core may send too many and end up deadlocking once the pool is
// exhausted
APIC::the().boot_aps();
}
SyncTask::spawn();
FinalizerTask::spawn();
@ -231,7 +253,7 @@ void init_stage2()
if (!root.starts_with("/dev/hda")) {
klog() << "init_stage2: root filesystem must be on the first IDE hard drive (/dev/hda)";
hang();
Processor::halt();
}
auto pata0 = PATAChannel::create(PATAChannel::ChannelType::Primary, force_pio);
@ -244,14 +266,14 @@ void init_stage2()
if (!partition_number.has_value()) {
klog() << "init_stage2: couldn't parse partition number from root kernel parameter";
hang();
Processor::halt();
}
MBRPartitionTable mbr(root_dev);
if (!mbr.initialize()) {
klog() << "init_stage2: couldn't read MBR from disk";
hang();
Processor::halt();
}
if (mbr.is_protective_mbr()) {
@ -259,12 +281,12 @@ void init_stage2()
GPTPartitionTable gpt(root_dev);
if (!gpt.initialize()) {
klog() << "init_stage2: couldn't read GPT from disk";
hang();
Processor::halt();
}
auto partition = gpt.partition(partition_number.value());
if (!partition) {
klog() << "init_stage2: couldn't get partition " << partition_number.value();
hang();
Processor::halt();
}
root_dev = *partition;
} else {
@ -273,23 +295,23 @@ void init_stage2()
EBRPartitionTable ebr(root_dev);
if (!ebr.initialize()) {
klog() << "init_stage2: couldn't read EBR from disk";
hang();
Processor::halt();
}
auto partition = ebr.partition(partition_number.value());
if (!partition) {
klog() << "init_stage2: couldn't get partition " << partition_number.value();
hang();
Processor::halt();
}
root_dev = *partition;
} else {
if (partition_number.value() < 1 || partition_number.value() > 4) {
klog() << "init_stage2: invalid partition number " << partition_number.value() << "; expected 1 to 4";
hang();
Processor::halt();
}
auto partition = mbr.partition(partition_number.value());
if (!partition) {
klog() << "init_stage2: couldn't get partition " << partition_number.value();
hang();
Processor::halt();
}
root_dev = *partition;
}
@ -298,12 +320,12 @@ void init_stage2()
auto e2fs = Ext2FS::create(*FileDescription::create(root_dev));
if (!e2fs->initialize()) {
klog() << "init_stage2: couldn't open root filesystem";
hang();
Processor::halt();
}
if (!VFS::the().mount_root(e2fs)) {
klog() << "VFS::mount_root failed";
hang();
Processor::halt();
}
Process::current()->set_root_directory(VFS::the().root_custody());
@ -319,7 +341,7 @@ void init_stage2()
Process::create_user_process(thread, userspace_init, (uid_t)0, (gid_t)0, (pid_t)0, error, {}, {}, tty0);
if (error != 0) {
klog() << "init_stage2: error spawning SystemServer: " << error;
hang();
Processor::halt();
}
thread->set_priority(THREAD_PRIORITY_HIGH);