Kernel/Memory: Add option to annotate region mapping as immutable

We add this basic functionality to the Kernel so Userspace can request a
particular virtual memory mapping to be immutable. This will be useful
later on in the DynamicLoader code.

The annotation of a particular Kernel Region as immutable implies that
the following restrictions apply, so these features are prohibited:
- Changing the region's protection bits
- Unmapping the region
- Annotating the region with other virtual memory flags
- Applying further memory advises on the region
- Changing the region name
- Re-mapping the region
This commit is contained in:
Liav A 2022-12-15 21:08:57 +02:00 committed by Andrew Kaster
parent 6c0486277e
commit 8585b2dc23
4 changed files with 35 additions and 4 deletions

View file

@ -14,6 +14,7 @@ namespace Kernel {
enum class VirtualMemoryRangeFlags : u32 {
None = 0,
SyscallCode = 1 << 0,
Immutable = 1 << 1,
};
AK_ENUM_BITWISE_OPERATORS(VirtualMemoryRangeFlags);

View file

@ -59,6 +59,8 @@ ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
if (auto* whole_region = find_region_from_range(range_to_unmap)) {
if (!whole_region->is_mmap())
return EPERM;
if (whole_region->is_immutable())
return EPERM;
PerformanceManager::add_unmap_perf_event(Process::current(), whole_region->range());
@ -69,6 +71,8 @@ ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
if (auto* old_region = find_region_containing(range_to_unmap)) {
if (!old_region->is_mmap())
return EPERM;
if (old_region->is_immutable())
return EPERM;
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address.
@ -99,6 +103,8 @@ ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
for (auto* region : regions) {
if (!region->is_mmap())
return EPERM;
if (region->is_immutable())
return EPERM;
}
Vector<Region*, 2> new_regions;

View file

@ -88,6 +88,9 @@ public:
[[nodiscard]] bool is_stack() const { return m_stack; }
void set_stack(bool stack) { m_stack = stack; }
[[nodiscard]] bool is_immutable() const { return m_immutable; }
void set_immutable() { m_immutable = true; }
[[nodiscard]] bool is_mmap() const { return m_mmap; }
void set_mmap(bool mmap, bool description_was_readable, bool description_was_writable)
@ -235,6 +238,7 @@ private:
bool m_cacheable : 1 { false };
bool m_stack : 1 { false };
bool m_mmap : 1 { false };
bool m_immutable : 1 { false };
bool m_syscall_region : 1 { false };
bool m_write_combine : 1 { false };
bool m_mmapped_from_readable : 1 { false };

View file

@ -284,6 +284,8 @@ ErrorOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int p
if (auto* whole_region = space->find_region_from_range(range_to_mprotect)) {
if (!whole_region->is_mmap())
return EPERM;
if (whole_region->is_immutable())
return EPERM;
TRY(validate_mmap_prot(prot, whole_region->is_stack(), whole_region->vmobject().is_anonymous(), whole_region));
if (whole_region->access() == Memory::prot_to_region_access_flags(prot))
return 0;
@ -301,6 +303,8 @@ ErrorOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int p
if (auto* old_region = space->find_region_containing(range_to_mprotect)) {
if (!old_region->is_mmap())
return EPERM;
if (old_region->is_immutable())
return EPERM;
TRY(validate_mmap_prot(prot, old_region->is_stack(), old_region->vmobject().is_anonymous(), old_region));
if (old_region->access() == Memory::prot_to_region_access_flags(prot))
return 0;
@ -336,6 +340,8 @@ ErrorOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int p
for (auto const* region : regions) {
if (!region->is_mmap())
return EPERM;
if (region->is_immutable())
return EPERM;
TRY(validate_mmap_prot(prot, region->is_stack(), region->vmobject().is_anonymous(), region));
if (region->vmobject().is_inode())
TRY(validate_inode_mmap_prot(prot, region->mmapped_from_readable(), region->mmapped_from_writable(), region->is_shared()));
@ -415,6 +421,8 @@ ErrorOr<FlatPtr> Process::sys$madvise(Userspace<void*> address, size_t size, int
return EINVAL;
if (!region->is_mmap())
return EPERM;
if (region->is_immutable())
return EPERM;
if (advice == MADV_SET_VOLATILE || advice == MADV_SET_NONVOLATILE) {
if (!region->vmobject().is_anonymous())
return EINVAL;
@ -448,6 +456,9 @@ ErrorOr<FlatPtr> Process::sys$set_mmap_name(Userspace<Syscall::SC_set_mmap_name_
if (!region->is_mmap())
return EPERM;
if (region->is_immutable())
return EPERM;
region->set_name(move(name));
PerformanceManager::add_mmap_perf_event(*this, *region);
@ -481,6 +492,9 @@ ErrorOr<FlatPtr> Process::sys$mremap(Userspace<Syscall::SC_mremap_params const*>
if (!old_region->is_mmap())
return EPERM;
if (old_region->is_immutable())
return EPERM;
if (old_region->vmobject().is_shared_inode() && params.flags & MAP_PRIVATE && !(params.flags & (MAP_ANONYMOUS | MAP_NORESERVE))) {
auto range = old_region->range();
auto old_prot = region_access_flags_to_prot(old_region->access());
@ -567,7 +581,10 @@ ErrorOr<FlatPtr> Process::sys$annotate_mapping(Userspace<void*> address, int fla
VERIFY_NO_PROCESS_BIG_LOCK(this);
return address_space().with([&](auto& space) -> ErrorOr<FlatPtr> {
if (space->enforces_syscall_regions())
if (flags == to_underlying(VirtualMemoryRangeFlags::None))
return EINVAL;
if (space->enforces_syscall_regions() && (flags & to_underlying(VirtualMemoryRangeFlags::SyscallCode)))
return EPERM;
if (!address) {
@ -584,10 +601,13 @@ ErrorOr<FlatPtr> Process::sys$annotate_mapping(Userspace<void*> address, int fla
if (!region->is_mmap())
return EINVAL;
if (region->is_immutable())
return EPERM;
if (flags == to_underlying(VirtualMemoryRangeFlags::None))
return EINVAL;
region->set_syscall_region(true);
if (flags & to_underlying(VirtualMemoryRangeFlags::SyscallCode))
region->set_syscall_region(true);
if (flags & to_underlying(VirtualMemoryRangeFlags::Immutable))
region->set_immutable();
return 0;
});
}