serenity/Kernel/Syscalls/purge.cpp
Brian Gianforcaro 54b9a4ec1e Kernel: Handle promise violations in the syscall handler
Previously we would crash the process immediately when a promise
violation was found during a syscall. This is error prone, as we
don't unwind the stack. This means that in certain cases we can
leak resources, like an OwnPtr / RefPtr tracked on the stack. Or
even leak a lock acquired in a ScopeLockLocker.

To remedy this situation we move the promise violation handling to
the syscall handler, right before we return to user space. This
allows the code to follow the normal unwind path, and grantees
there is no longer any cleanup that needs to occur.

The Process::require_promise() and Process::require_no_promises()
functions were modified to return ErrorOr<void> so we enforce that
the errors are always propagated by the caller.
2021-12-29 18:08:15 +01:00

72 lines
2.6 KiB
C++

/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/NonnullRefPtrVector.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/InodeVMObject.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Process.h>
namespace Kernel {
ErrorOr<FlatPtr> Process::sys$purge(int mode)
{
VERIFY_PROCESS_BIG_LOCK_ACQUIRED(this)
TRY(require_no_promises());
if (!is_superuser())
return EPERM;
size_t purged_page_count = 0;
if (mode & PURGE_ALL_VOLATILE) {
NonnullRefPtrVector<Memory::AnonymousVMObject> vmobjects;
{
ErrorOr<void> result;
Memory::MemoryManager::for_each_vmobject([&](auto& vmobject) {
if (vmobject.is_anonymous()) {
// In the event that the append fails, only attempt to continue
// the purge if we have already appended something successfully.
if (auto append_result = vmobjects.try_append(static_cast<Memory::AnonymousVMObject&>(vmobject)); append_result.is_error() && vmobjects.is_empty()) {
result = append_result.release_error();
return IterationDecision::Break;
}
}
return IterationDecision::Continue;
});
if (result.is_error())
return result.release_error();
}
for (auto& vmobject : vmobjects) {
purged_page_count += vmobject.purge();
}
}
if (mode & PURGE_ALL_CLEAN_INODE) {
NonnullRefPtrVector<Memory::InodeVMObject> vmobjects;
{
ErrorOr<void> result;
Memory::MemoryManager::for_each_vmobject([&](auto& vmobject) {
if (vmobject.is_inode()) {
// In the event that the append fails, only attempt to continue
// the purge if we have already appended something successfully.
if (auto append_result = vmobjects.try_append(static_cast<Memory::InodeVMObject&>(vmobject)); append_result.is_error() && vmobjects.is_empty()) {
result = append_result.release_error();
return IterationDecision::Break;
}
}
return IterationDecision::Continue;
});
if (result.is_error())
return result.release_error();
}
for (auto& vmobject : vmobjects) {
purged_page_count += vmobject.release_all_clean_pages();
}
}
return purged_page_count;
}
}