Kernel: Shrink instead of expand sigaltstack range to page boundaries

Since the POSIX sigaltstack manpage suggests allocating the stack
region using malloc(), and many heap implementations (including ours)
store heap chunk metadata in memory just before the vended pointer,
we would end up zeroing the metadata, leading to various crashes.
This commit is contained in:
Idan Horowitz 2023-12-24 15:53:16 +02:00 committed by Andreas Kling
parent 4c6fd454d0
commit a49b7e92eb
5 changed files with 29 additions and 34 deletions

View file

@ -117,7 +117,6 @@ ErrorOr<FlatPtr> Process::sys$fork(RegisterState& regs)
// A child process created via fork(2) inherits a copy of its parent's alternate signal stack settings.
child_first_thread->m_alternative_signal_stack = Thread::current()->m_alternative_signal_stack;
child_first_thread->m_alternative_signal_stack_size = Thread::current()->m_alternative_signal_stack_size;
auto& child_regs = child_first_thread->m_regs;
#if ARCH(X86_64)

View file

@ -117,17 +117,20 @@ ErrorOr<FlatPtr> Process::sys$sigreturn(RegisterState& registers)
return saved_ax;
}
ErrorOr<void> Process::remap_range_as_stack(FlatPtr address, size_t size)
ErrorOr<Memory::VirtualRange> Process::remap_range_as_stack(FlatPtr address, size_t size)
{
// FIXME: This duplicates a lot of logic from sys$mprotect, this should be abstracted out somehow
auto range_to_remap = TRY(Memory::expand_range_to_page_boundaries(address, size));
// NOTE: We shrink the given range to page boundaries (instead of expanding it), as sigaltstack's manpage suggests
// using malloc() to allocate the stack region, and many heap implementations (including ours) store heap chunk
// metadata in memory just before the vended pointer, which we would end up zeroing.
auto range_to_remap = TRY(Memory::shrink_range_to_page_boundaries(address, size));
if (!range_to_remap.size())
return EINVAL;
if (!is_user_range(range_to_remap))
return EFAULT;
return address_space().with([&](auto& space) -> ErrorOr<void> {
return address_space().with([&](auto& space) -> ErrorOr<Memory::VirtualRange> {
if (auto* whole_region = space->find_region_from_range(range_to_remap)) {
if (!whole_region->is_mmap())
return EPERM;
@ -141,7 +144,7 @@ ErrorOr<void> Process::remap_range_as_stack(FlatPtr address, size_t size)
whole_region->clear_to_zero();
whole_region->remap();
return {};
return range_to_remap;
}
if (auto* old_region = space->find_region_containing(range_to_remap)) {
@ -174,7 +177,7 @@ ErrorOr<void> Process::remap_range_as_stack(FlatPtr address, size_t size)
}
TRY(new_region->map(space->page_directory()));
return {};
return range_to_remap;
}
if (auto const& regions = TRY(space->find_regions_intersecting(range_to_remap)); regions.size()) {
@ -235,7 +238,7 @@ ErrorOr<void> Process::remap_range_as_stack(FlatPtr address, size_t size)
TRY(new_region->map(space->page_directory()));
}
return {};
return range_to_remap;
}
return EINVAL;
@ -249,13 +252,16 @@ ErrorOr<FlatPtr> Process::sys$sigaltstack(Userspace<stack_t const*> user_ss, Use
if (user_old_ss) {
stack_t old_ss_value {};
old_ss_value.ss_sp = (void*)Thread::current()->m_alternative_signal_stack;
old_ss_value.ss_size = Thread::current()->m_alternative_signal_stack_size;
old_ss_value.ss_flags = 0;
if (!Thread::current()->has_alternative_signal_stack())
if (Thread::current()->m_alternative_signal_stack.has_value()) {
old_ss_value.ss_sp = Thread::current()->m_alternative_signal_stack->base().as_ptr();
old_ss_value.ss_size = Thread::current()->m_alternative_signal_stack->size();
if (Thread::current()->is_in_alternative_signal_stack())
old_ss_value.ss_flags = SS_ONSTACK;
else
old_ss_value.ss_flags = 0;
} else {
old_ss_value.ss_flags = SS_DISABLE;
else if (Thread::current()->is_in_alternative_signal_stack())
old_ss_value.ss_flags = SS_ONSTACK;
}
TRY(copy_to_user(user_old_ss, &old_ss_value));
}
@ -266,8 +272,7 @@ ErrorOr<FlatPtr> Process::sys$sigaltstack(Userspace<stack_t const*> user_ss, Use
return EPERM;
if (ss.ss_flags == SS_DISABLE) {
Thread::current()->m_alternative_signal_stack_size = 0;
Thread::current()->m_alternative_signal_stack = 0;
Thread::current()->m_alternative_signal_stack.clear();
} else if (ss.ss_flags == 0) {
if (ss.ss_size <= MINSIGSTKSZ)
return ENOMEM;
@ -278,10 +283,7 @@ ErrorOr<FlatPtr> Process::sys$sigaltstack(Userspace<stack_t const*> user_ss, Use
// protections, sigaltstack ranges are carved out of their regions, zeroed, and
// turned into read/writable MAP_STACK-enabled regions.
// This is inspired by OpenBSD's solution: https://man.openbsd.org/sigaltstack.2
TRY(remap_range_as_stack((FlatPtr)ss.ss_sp, ss.ss_size));
Thread::current()->m_alternative_signal_stack = (FlatPtr)ss.ss_sp;
Thread::current()->m_alternative_signal_stack_size = ss.ss_size;
Thread::current()->m_alternative_signal_stack = TRY(remap_range_as_stack((FlatPtr)ss.ss_sp, ss.ss_size));
} else {
return EINVAL;
}

View file

@ -688,7 +688,7 @@ private:
ErrorOr<GlobalFutexKey> get_futex_key(FlatPtr user_address, bool shared);
ErrorOr<void> remap_range_as_stack(FlatPtr address, size_t size);
ErrorOr<Memory::VirtualRange> remap_range_as_stack(FlatPtr address, size_t size);
ErrorOr<FlatPtr> open_impl(Userspace<Syscall::SC_open_params const*>);
ErrorOr<FlatPtr> close_impl(int fd);

View file

@ -752,8 +752,7 @@ void Thread::reset_signals_for_exec()
m_have_any_unmasked_pending_signals.store(false, AK::memory_order_release);
m_signal_action_masks.fill({});
// A successful call to execve(2) removes any existing alternate signal stack
m_alternative_signal_stack = 0;
m_alternative_signal_stack_size = 0;
m_alternative_signal_stack.clear();
}
// Certain exceptions, such as SIGSEGV and SIGILL, put a
@ -881,15 +880,12 @@ bool Thread::is_signal_masked(u8 signal) const
return (1 << (signal - 1)) & m_signal_mask;
}
bool Thread::has_alternative_signal_stack() const
{
return m_alternative_signal_stack_size != 0;
}
bool Thread::is_in_alternative_signal_stack() const
{
auto sp = get_register_dump_from_stack().userspace_sp();
return sp >= m_alternative_signal_stack && sp < m_alternative_signal_stack + m_alternative_signal_stack_size;
if (!m_alternative_signal_stack.has_value())
return false;
return m_alternative_signal_stack->contains(VirtualAddress(sp));
}
static ErrorOr<void> push_value_on_user_stack(FlatPtr& stack, FlatPtr data)
@ -1025,12 +1021,12 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
m_signal_mask |= new_signal_mask;
m_have_any_unmasked_pending_signals.store((m_pending_signals & ~m_signal_mask) != 0, AK::memory_order_release);
bool use_alternative_stack = ((action.flags & SA_ONSTACK) != 0) && has_alternative_signal_stack() && !is_in_alternative_signal_stack();
bool use_alternative_stack = ((action.flags & SA_ONSTACK) != 0) && m_alternative_signal_stack.has_value() && !is_in_alternative_signal_stack();
auto setup_stack = [&](RegisterState& state) -> ErrorOr<void> {
FlatPtr stack;
if (use_alternative_stack)
stack = m_alternative_signal_stack + m_alternative_signal_stack_size;
stack = m_alternative_signal_stack->end().get();
else
stack = state.userspace_sp();
@ -1042,7 +1038,7 @@ DispatchSignalResult Thread::dispatch_signal(u8 signal)
.uc_stack = {
.ss_sp = bit_cast<void*>(stack),
.ss_flags = action.flags & SA_ONSTACK,
.ss_size = use_alternative_stack ? m_alternative_signal_stack_size : 0,
.ss_size = use_alternative_stack ? m_alternative_signal_stack->size() : 0,
},
.uc_mcontext = {},
};

View file

@ -884,7 +884,6 @@ public:
u32 pending_signals() const;
u32 pending_signals_for_state() const;
[[nodiscard]] bool has_alternative_signal_stack() const;
[[nodiscard]] bool is_in_alternative_signal_stack() const;
FPUState& fpu_state() { return m_fpu_state; }
@ -1177,8 +1176,7 @@ private:
u32 m_pending_signals { 0 };
u8 m_currently_handled_signal { 0 };
u32 m_signal_mask { 0 };
FlatPtr m_alternative_signal_stack { 0 };
FlatPtr m_alternative_signal_stack_size { 0 };
Optional<Memory::VirtualRange> m_alternative_signal_stack;
SignalBlockerSet m_signal_blocker_set;
FlatPtr m_kernel_stack_base { 0 };
FlatPtr m_kernel_stack_top { 0 };