Kernel: Fix kernel panic when blocking on the process' big lock

Another thread might end up marking the blocking thread as holding
the lock before it gets a chance to finish invoking the scheduler.
This commit is contained in:
Gunnar Beutner 2021-08-10 21:20:45 +02:00 committed by Andreas Kling
parent 309a20c014
commit 3322efd4cd
3 changed files with 17 additions and 8 deletions

View file

@ -12,7 +12,7 @@ KResultOr<FlatPtr> Process::sys$yield()
{
VERIFY_NO_PROCESS_BIG_LOCK(this);
REQUIRE_PROMISE(stdio);
Thread::current()->yield_assuming_not_holding_big_lock();
Thread::current()->yield_without_releasing_big_lock();
return 0;
}

View file

@ -220,7 +220,10 @@ void Thread::block(Kernel::Mutex& lock, ScopedSpinLock<SpinLock<u8>>& lock_lock,
// We need to release the big lock
yield_and_release_relock_big_lock();
} else {
yield_assuming_not_holding_big_lock();
// By the time we've reached this another thread might have
// marked us as holding the big lock, so this call must not
// verify that we're not holding it.
yield_without_releasing_big_lock(VerifyLockNotHeld::No);
}
VERIFY(Processor::in_critical());
@ -412,10 +415,10 @@ void Thread::exit(void* exit_value)
die_if_needed();
}
void Thread::yield_assuming_not_holding_big_lock()
void Thread::yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held)
{
VERIFY(!g_scheduler_lock.own_lock());
VERIFY(!process().big_lock().own_lock());
VERIFY(verify_lock_not_held == VerifyLockNotHeld::No || !process().big_lock().own_lock());
// Disable interrupts here. This ensures we don't accidentally switch contexts twice
InterruptDisabler disable;
Scheduler::yield(); // flag a switch
@ -613,7 +616,7 @@ void Thread::check_dispatch_pending_signal()
switch (result) {
case DispatchSignalResult::Yield:
yield_assuming_not_holding_big_lock();
yield_without_releasing_big_lock();
break;
default:
break;

View file

@ -836,7 +836,7 @@ public:
while (state() == Thread::Stopped) {
lock.unlock();
// We shouldn't be holding the big lock here
yield_assuming_not_holding_big_lock();
yield_without_releasing_big_lock();
lock.lock();
}
}
@ -922,7 +922,7 @@ public:
// Yield to the scheduler, and wait for us to resume unblocked.
VERIFY(!g_scheduler_lock.own_lock());
VERIFY(Processor::in_critical());
yield_assuming_not_holding_big_lock();
yield_without_releasing_big_lock();
VERIFY(Processor::in_critical());
ScopedSpinLock block_lock2(m_block_lock);
@ -1371,7 +1371,13 @@ private:
bool m_is_profiling_suppressed { false };
void yield_and_release_relock_big_lock();
void yield_assuming_not_holding_big_lock();
enum class VerifyLockNotHeld {
Yes,
No
};
void yield_without_releasing_big_lock(VerifyLockNotHeld verify_lock_not_held = VerifyLockNotHeld::Yes);
void drop_thread_count(bool);
public: