Kernel: Stop swallowing thread unblocks while process is stopped

This easily led to kernel deadlocks if the stopped thread held an
important global mutex (like the disk cache lock) while blocking.
Resolve this by ensuring stopped threads have a chance to return to the
userland boundary before actually stopping.
This commit is contained in:
Idan Horowitz 2024-02-09 21:13:41 +02:00 committed by Andreas Kling
parent 458e990b7b
commit e38ccebfc8
4 changed files with 24 additions and 23 deletions

View file

@ -129,7 +129,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
}
current_thread->yield_if_stopped();
current_thread->yield_if_should_be_stopped();
#if ARCH(X86_64)
// Apply a random offset in the range 0-255 to the stack pointer,
@ -178,7 +178,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap)
process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread!
}
current_thread->yield_if_stopped();
current_thread->yield_if_should_be_stopped();
current_thread->check_dispatch_pending_signal();

View file

@ -268,9 +268,14 @@ void Scheduler::context_switch(Thread* thread)
return;
// If the last process hasn't blocked (still marked as running),
// mark it as runnable for the next round.
if (from_thread->state() == Thread::State::Running)
from_thread->set_state(Thread::State::Runnable);
// mark it as runnable for the next round, unless it's supposed
// to be stopped, in which case just mark it as such.
if (from_thread->state() == Thread::State::Running) {
if (from_thread->should_be_stopped())
from_thread->set_state(Thread::State::Stopped);
else
from_thread->set_state(Thread::State::Runnable);
}
#ifdef LOG_EVERY_CONTEXT_SWITCH
auto const msg = "Scheduler[{}]: {} -> {} [prio={}] {:p}";

View file

@ -314,8 +314,8 @@ void Thread::unblock_from_blocker(Blocker& blocker)
SpinlockLocker block_lock(m_block_lock);
if (m_blocker != &blocker)
return;
if (!should_be_stopped() && !is_stopped())
unblock();
VERIFY(!is_stopped());
unblock();
};
if (Processor::current_in_irq() != 0) {
Processor::deferred_call_queue([do_unblock = move(do_unblock), self = try_make_weak_ptr().release_value_but_fixme_should_propagate_errors()]() {
@ -1273,14 +1273,8 @@ void Thread::set_state(State new_state, u8 stop_signal)
m_stop_state = previous_state != Thread::State::Running ? previous_state : Thread::State::Runnable;
auto& process = this->process();
if (!process.set_stopped(true)) {
process.for_each_thread([&](auto& thread) {
if (&thread == this)
return;
if (thread.is_stopped())
return;
dbgln_if(THREAD_DEBUG, "Stopping peer thread {}", thread);
thread.set_state(Thread::State::Stopped, stop_signal);
});
// Note that we don't explicitly stop peer threads, we let them stop on their own the next time they
// enter/exit a syscall, or once their current time slice runs out.
process.unblock_waiters(Thread::WaitBlocker::UnblockFlags::Stopped, stop_signal);
// Tell the parent process (if any) about this change.
if (auto parent = Process::from_pid_ignoring_jails(process.ppid())) {

View file

@ -32,6 +32,7 @@
#include <Kernel/Locking/LockRank.h>
#include <Kernel/Locking/SpinlockProtected.h>
#include <Kernel/Memory/VirtualRange.h>
#include <Kernel/Tasks/Scheduler.h>
#include <Kernel/UnixTypes.h>
namespace Kernel {
@ -789,16 +790,17 @@ public:
VirtualAddress thread_specific_data() const { return m_thread_specific_data; }
ALWAYS_INLINE void yield_if_stopped()
ALWAYS_INLINE void yield_if_should_be_stopped()
{
// If some thread stopped us, we need to yield to someone else
// We check this when entering/exiting a system call. A thread
// may continue to execute in user land until the next timer
// tick or entering the next system call, or if it's in kernel
// mode then we will intercept prior to returning back to user
// mode.
// A thread may continue to execute in user land until the next timer
// tick or until entering the next system call/exiting the current one.
if (!is_stopped() && should_be_stopped()) {
SpinlockLocker scheduler_lock(g_scheduler_lock);
set_state(State::Stopped);
}
// If we're stopped, we need to yield to someone else.
SpinlockLocker lock(m_lock);
while (state() == Thread::State::Stopped) {
while (is_stopped()) {
lock.unlock();
// We shouldn't be holding the big lock here
yield_without_releasing_big_lock();