Kernel: Stop swallowing thread unblocks while process is stopped

This easily led to kernel deadlocks if the stopped thread held an
important global mutex (like the disk cache lock) while blocking.
Resolve this by ensuring stopped threads have a chance to return to the
userland boundary before actually stopping.
This commit is contained in:
Idan Horowitz 2024-02-09 21:13:41 +02:00 committed by Andreas Kling
parent 458e990b7b
commit e38ccebfc8
4 changed files with 24 additions and 23 deletions

View file

@ -129,7 +129,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread! process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
} }
current_thread->yield_if_stopped(); current_thread->yield_if_should_be_stopped();
#if ARCH(X86_64) #if ARCH(X86_64)
// Apply a random offset in the range 0-255 to the stack pointer, // Apply a random offset in the range 0-255 to the stack pointer,
@ -178,7 +178,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread! process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
} }
current_thread->yield_if_stopped(); current_thread->yield_if_should_be_stopped();
current_thread->check_dispatch_pending_signal(); current_thread->check_dispatch_pending_signal();

View file

@ -268,9 +268,14 @@ void Scheduler::context_switch(Thread* thread)
return; return;
// If the last process hasn't blocked (still marked as running), // If the last process hasn't blocked (still marked as running),
// mark it as runnable for the next round. // mark it as runnable for the next round, unless it's supposed
if (from_thread->state() == Thread::State::Running) // to be stopped, in which case just mark it as such.
from_thread->set_state(Thread::State::Runnable); if (from_thread->state() == Thread::State::Running) {
if (from_thread->should_be_stopped())
from_thread->set_state(Thread::State::Stopped);
else
from_thread->set_state(Thread::State::Runnable);
}
#ifdef LOG_EVERY_CONTEXT_SWITCH #ifdef LOG_EVERY_CONTEXT_SWITCH
auto const msg = "Scheduler[{}]: {} -> {} [prio={}] {:p}"; auto const msg = "Scheduler[{}]: {} -> {} [prio={}] {:p}";

View file

@ -314,8 +314,8 @@ void Thread::unblock_from_blocker(Blocker& blocker)
SpinlockLocker block_lock(m_block_lock); SpinlockLocker block_lock(m_block_lock);
if (m_blocker != &blocker) if (m_blocker != &blocker)
return; return;
if (!should_be_stopped() && !is_stopped()) VERIFY(!is_stopped());
unblock(); unblock();
}; };
if (Processor::current_in_irq() != 0) { if (Processor::current_in_irq() != 0) {
Processor::deferred_call_queue([do_unblock = move(do_unblock), self = try_make_weak_ptr().release_value_but_fixme_should_propagate_errors()]() { Processor::deferred_call_queue([do_unblock = move(do_unblock), self = try_make_weak_ptr().release_value_but_fixme_should_propagate_errors()]() {
@ -1273,14 +1273,8 @@ void Thread::set_state(State new_state, u8 stop_signal)
m_stop_state = previous_state != Thread::State::Running ? previous_state : Thread::State::Runnable; m_stop_state = previous_state != Thread::State::Running ? previous_state : Thread::State::Runnable;
auto& process = this->process(); auto& process = this->process();
if (!process.set_stopped(true)) { if (!process.set_stopped(true)) {
process.for_each_thread([&](auto& thread) { // Note that we don't explicitly stop peer threads, we let them stop on their own the next time they
if (&thread == this) // enter/exit a syscall, or once their current time slice runs out.
return;
if (thread.is_stopped())
return;
dbgln_if(THREAD_DEBUG, "Stopping peer thread {}", thread);
thread.set_state(Thread::State::Stopped, stop_signal);
});
process.unblock_waiters(Thread::WaitBlocker::UnblockFlags::Stopped, stop_signal); process.unblock_waiters(Thread::WaitBlocker::UnblockFlags::Stopped, stop_signal);
// Tell the parent process (if any) about this change. // Tell the parent process (if any) about this change.
if (auto parent = Process::from_pid_ignoring_jails(process.ppid())) { if (auto parent = Process::from_pid_ignoring_jails(process.ppid())) {

View file

@ -32,6 +32,7 @@
#include <Kernel/Locking/LockRank.h> #include <Kernel/Locking/LockRank.h>
#include <Kernel/Locking/SpinlockProtected.h> #include <Kernel/Locking/SpinlockProtected.h>
#include <Kernel/Memory/VirtualRange.h> #include <Kernel/Memory/VirtualRange.h>
#include <Kernel/Tasks/Scheduler.h>
#include <Kernel/UnixTypes.h> #include <Kernel/UnixTypes.h>
namespace Kernel { namespace Kernel {
@ -789,16 +790,17 @@ public:
VirtualAddress thread_specific_data() const { return m_thread_specific_data; } VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
ALWAYS_INLINE void yield_if_stopped() ALWAYS_INLINE void yield_if_should_be_stopped()
{ {
// If some thread stopped us, we need to yield to someone else // A thread may continue to execute in user land until the next timer
// We check this when entering/exiting a system call. A thread // tick or until entering the next system call/exiting the current one.
// may continue to execute in user land until the next timer if (!is_stopped() && should_be_stopped()) {
// tick or entering the next system call, or if it's in kernel SpinlockLocker scheduler_lock(g_scheduler_lock);
// mode then we will intercept prior to returning back to user set_state(State::Stopped);
// mode. }
// If we're stopped, we need to yield to someone else.
SpinlockLocker lock(m_lock); SpinlockLocker lock(m_lock);
while (state() == Thread::State::Stopped) { while (is_stopped()) {
lock.unlock(); lock.unlock();
// We shouldn't be holding the big lock here // We shouldn't be holding the big lock here
yield_without_releasing_big_lock(); yield_without_releasing_big_lock();