diff --git a/Kernel/Syscalls/SyscallHandler.cpp b/Kernel/Syscalls/SyscallHandler.cpp index 4da95059d3..4e443199c7 100644 --- a/Kernel/Syscalls/SyscallHandler.cpp +++ b/Kernel/Syscalls/SyscallHandler.cpp @@ -129,7 +129,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap) process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread! } - current_thread->yield_if_stopped(); + current_thread->yield_if_should_be_stopped(); #if ARCH(X86_64) // Apply a random offset in the range 0-255 to the stack pointer, @@ -178,7 +178,7 @@ NEVER_INLINE void syscall_handler(TrapFrame* trap) process.tracer_trap(*current_thread, regs); // this triggers SIGTRAP and stops the thread! } - current_thread->yield_if_stopped(); + current_thread->yield_if_should_be_stopped(); current_thread->check_dispatch_pending_signal(); diff --git a/Kernel/Tasks/Scheduler.cpp b/Kernel/Tasks/Scheduler.cpp index f60ad7915a..c3f6bbf17b 100644 --- a/Kernel/Tasks/Scheduler.cpp +++ b/Kernel/Tasks/Scheduler.cpp @@ -268,9 +268,14 @@ void Scheduler::context_switch(Thread* thread) return; // If the last process hasn't blocked (still marked as running), - // mark it as runnable for the next round. - if (from_thread->state() == Thread::State::Running) - from_thread->set_state(Thread::State::Runnable); + // mark it as runnable for the next round, unless it's supposed + // to be stopped, in which case just mark it as such. + if (from_thread->state() == Thread::State::Running) { + if (from_thread->should_be_stopped()) + from_thread->set_state(Thread::State::Stopped); + else + from_thread->set_state(Thread::State::Runnable); + } #ifdef LOG_EVERY_CONTEXT_SWITCH auto const msg = "Scheduler[{}]: {} -> {} [prio={}] {:p}"; diff --git a/Kernel/Tasks/Thread.cpp b/Kernel/Tasks/Thread.cpp index a5bf2938d1..d45eb190f0 100644 --- a/Kernel/Tasks/Thread.cpp +++ b/Kernel/Tasks/Thread.cpp @@ -314,8 +314,8 @@ void Thread::unblock_from_blocker(Blocker& blocker) SpinlockLocker block_lock(m_block_lock); if (m_blocker != &blocker) return; - if (!should_be_stopped() && !is_stopped()) - unblock(); + VERIFY(!is_stopped()); + unblock(); }; if (Processor::current_in_irq() != 0) { Processor::deferred_call_queue([do_unblock = move(do_unblock), self = try_make_weak_ptr().release_value_but_fixme_should_propagate_errors()]() { @@ -1273,14 +1273,8 @@ void Thread::set_state(State new_state, u8 stop_signal) m_stop_state = previous_state != Thread::State::Running ? previous_state : Thread::State::Runnable; auto& process = this->process(); if (!process.set_stopped(true)) { - process.for_each_thread([&](auto& thread) { - if (&thread == this) - return; - if (thread.is_stopped()) - return; - dbgln_if(THREAD_DEBUG, "Stopping peer thread {}", thread); - thread.set_state(Thread::State::Stopped, stop_signal); - }); + // Note that we don't explicitly stop peer threads, we let them stop on their own the next time they + // enter/exit a syscall, or once their current time slice runs out. process.unblock_waiters(Thread::WaitBlocker::UnblockFlags::Stopped, stop_signal); // Tell the parent process (if any) about this change. if (auto parent = Process::from_pid_ignoring_jails(process.ppid())) { diff --git a/Kernel/Tasks/Thread.h b/Kernel/Tasks/Thread.h index 345db6a5a1..8121466d75 100644 --- a/Kernel/Tasks/Thread.h +++ b/Kernel/Tasks/Thread.h @@ -32,6 +32,7 @@ #include #include #include +#include #include namespace Kernel { @@ -789,16 +790,17 @@ public: VirtualAddress thread_specific_data() const { return m_thread_specific_data; } - ALWAYS_INLINE void yield_if_stopped() + ALWAYS_INLINE void yield_if_should_be_stopped() { - // If some thread stopped us, we need to yield to someone else - // We check this when entering/exiting a system call. A thread - // may continue to execute in user land until the next timer - // tick or entering the next system call, or if it's in kernel - // mode then we will intercept prior to returning back to user - // mode. + // A thread may continue to execute in user land until the next timer + // tick or until entering the next system call/exiting the current one. + if (!is_stopped() && should_be_stopped()) { + SpinlockLocker scheduler_lock(g_scheduler_lock); + set_state(State::Stopped); + } + // If we're stopped, we need to yield to someone else. SpinlockLocker lock(m_lock); - while (state() == Thread::State::Stopped) { + while (is_stopped()) { lock.unlock(); // We shouldn't be holding the big lock here yield_without_releasing_big_lock();