Kernel: Fix crash when delivering signal to barely created thread

We need to wait until a thread is fully set up and ready for running
before attempting to deliver a signal. Otherwise we may not have a
user stack yet.

Also, remove the Skip0SchedulerPasses and Skip1SchedulerPass thread
states that we don't really need anymore with software context switching.

Fixes the kernel crash reported in #3419
This commit is contained in:
Tom 2020-09-07 08:31:00 -06:00 committed by Andreas Kling
parent f1a65d1d70
commit c3d231616c
5 changed files with 32 additions and 21 deletions

View file

@ -306,12 +306,6 @@ void Thread::consider_unblock(time_t now_sec, long now_usec)
unblock();
return;
}
case Thread::Skip1SchedulerPass:
set_state(Thread::Skip0SchedulerPasses);
return;
case Thread::Skip0SchedulerPasses:
set_state(Thread::Runnable);
return;
}
}

View file

@ -306,7 +306,7 @@ int Process::do_exec(NonnullRefPtr<FileDescription> main_program_description, Ve
if (was_profiling)
Profiling::did_exec(path);
new_main_thread->set_state(Thread::State::Skip1SchedulerPass);
new_main_thread->set_state(Thread::State::Runnable);
big_lock().force_unlock_if_locked();
ASSERT_INTERRUPTS_DISABLED();
ASSERT(Processor::current().in_critical());

View file

@ -94,7 +94,8 @@ pid_t Process::sys$fork(RegisterState& regs)
g_processes->prepend(child);
}
child_first_thread->set_state(Thread::State::Skip1SchedulerPass);
child_first_thread->set_affinity(Thread::current()->affinity());
child_first_thread->set_state(Thread::State::Runnable);
return child->pid().value();
}

View file

@ -242,10 +242,6 @@ const char* Thread::state_string() const
return "Dead";
case Thread::Stopped:
return "Stopped";
case Thread::Skip1SchedulerPass:
return "Skip1";
case Thread::Skip0SchedulerPasses:
return "Skip0";
case Thread::Queued:
return "Queued";
case Thread::Blocked:
@ -334,6 +330,7 @@ void Thread::send_signal(u8 signal, [[maybe_unused]] Process* sender)
ScopedSpinLock lock(g_scheduler_lock);
m_pending_signals |= 1 << (signal - 1);
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
}
// Certain exceptions, such as SIGSEGV and SIGILL, put a
@ -352,7 +349,7 @@ void Thread::send_urgent_signal_to_self(u8 signal)
ShouldUnblockThread Thread::dispatch_one_pending_signal()
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(m_lock.own_lock());
u32 signal_candidates = m_pending_signals & ~m_signal_mask;
ASSERT(signal_candidates);
@ -459,7 +456,7 @@ void Thread::resume_from_stopped()
ShouldUnblockThread Thread::dispatch_signal(u8 signal)
{
ASSERT_INTERRUPTS_DISABLED();
ASSERT(g_scheduler_lock.is_locked());
ASSERT(g_scheduler_lock.own_lock());
ASSERT(signal > 0 && signal <= 32);
ASSERT(!process().is_ring0());
@ -467,12 +464,22 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
klog() << "signal: dispatch signal " << signal << " to " << *this;
#endif
if (m_state == Invalid || !is_initialized()) {
// Thread has barely been created, we need to wait until it is
// at least in Runnable state and is_initialized() returns true,
// which indicates that it is fully set up an we actually have
// a register state on the stack that we can modify
return ShouldUnblockThread::No;
}
auto& action = m_signal_action_data[signal];
// FIXME: Implement SA_SIGINFO signal handlers.
ASSERT(!(action.flags & SA_SIGINFO));
// Mark this signal as handled.
m_pending_signals &= ~(1 << (signal - 1));
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
if (signal == SIGSTOP) {
if (!is_stopped()) {
@ -544,8 +551,10 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
new_signal_mask |= 1 << (signal - 1);
m_signal_mask |= new_signal_mask;
m_have_any_unmasked_pending_signals.store(m_pending_signals & ~m_signal_mask, AK::memory_order_release);
auto setup_stack = [&]<typename ThreadState>(ThreadState state, u32* stack) {
auto setup_stack = [&](RegisterState& state) {
u32* stack = &state.userspace_esp;
u32 old_esp = *stack;
u32 ret_eip = state.eip;
u32 ret_eflags = state.eflags;
@ -587,8 +596,7 @@ ShouldUnblockThread Thread::dispatch_signal(u8 signal)
// Conversely, when the thread isn't blocking the RegisterState may not be
// valid (fork, exec etc) but the tss will, so we use that instead.
auto& regs = get_register_dump_from_stack();
u32* stack = &regs.userspace_esp;
setup_stack(regs, stack);
setup_stack(regs);
regs.eip = g_return_to_ring3_from_signal_trampoline.get();
#ifdef SIGNAL_DEBUG
@ -710,11 +718,20 @@ void Thread::set_state(State new_state)
ASSERT(m_blocker != nullptr);
}
auto previous_state = m_state;
if (previous_state == Invalid) {
// If we were *just* created, we may have already pending signals
ScopedSpinLock thread_lock(m_lock);
if (has_unmasked_pending_signals()) {
dbg() << "Dispatch pending signals to new thread " << *this;
dispatch_one_pending_signal();
}
}
if (new_state == Stopped) {
m_stop_state = m_state;
}
auto previous_state = m_state;
m_state = new_state;
#ifdef THREAD_DEBUG
dbg() << "Set Thread " << *this << " state to " << state_string();

View file

@ -114,8 +114,6 @@ public:
Invalid = 0,
Runnable,
Running,
Skip1SchedulerPass,
Skip0SchedulerPasses,
Dying,
Dead,
Stopped,
@ -417,7 +415,7 @@ public:
ShouldUnblockThread dispatch_one_pending_signal();
ShouldUnblockThread dispatch_signal(u8 signal);
bool has_unmasked_pending_signals() const { return m_pending_signals & ~m_signal_mask; }
bool has_unmasked_pending_signals() const { return m_have_any_unmasked_pending_signals.load(AK::memory_order_consume); }
void terminate_due_to_signal(u8 signal);
bool should_ignore_signal(u8 signal) const;
bool has_signal_handler(u8 signal) const;
@ -587,6 +585,7 @@ private:
bool m_dump_backtrace_on_finalization { false };
bool m_should_die { false };
bool m_initialized { false };
Atomic<bool> m_have_any_unmasked_pending_signals { false };
OwnPtr<ThreadTracer> m_tracer;