Kernel: Make file-backed memory regions remember description permissions

This allows sys$mprotect() to honor the original readable & writable
flags of the open file description as they were at the point we did the
original sys$mmap().

IIUC, this is what Dr. POSIX wants us to do:
https://pubs.opengroup.org/onlinepubs/9699919799/functions/mprotect.html

Also, remove the bogus and racy "W^X" checking we did against mappings
based on their current inode metadata. If we want to do this, we can do
it properly. For now, it was not only racy, but also did blocking I/O
while holding a spinlock.
This commit is contained in:
Andreas Kling 2022-08-23 20:18:39 +02:00
parent 30861daa93
commit d3e8eb5918
7 changed files with 33 additions and 32 deletions

View file

@ -142,7 +142,7 @@ ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_re
auto new_region = TRY(Region::create_unplaced(
source_region.vmobject(), offset_in_vmobject, move(region_name), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()));
new_region->set_syscall_region(source_region.is_syscall_region());
new_region->set_mmap(source_region.is_mmap());
new_region->set_mmap(source_region.is_mmap(), source_region.mmapped_from_readable(), source_region.mmapped_from_writable());
new_region->set_stack(source_region.is_stack());
size_t page_offset_in_source_region = (offset_in_vmobject - source_region.offset_in_vmobject()) / PAGE_SIZE;
for (size_t i = 0; i < new_region->page_count(); ++i) {

View file

@ -96,14 +96,4 @@ u32 InodeVMObject::writable_mappings() const
return count;
}
u32 InodeVMObject::executable_mappings() const
{
u32 count = 0;
const_cast<InodeVMObject&>(*this).for_each_region([&](auto& region) {
if (region.is_executable())
++count;
});
return count;
}
}

View file

@ -26,7 +26,6 @@ public:
int try_release_clean_pages(int page_amount);
u32 writable_mappings() const;
u32 executable_mappings() const;
protected:
explicit InodeVMObject(Inode&, FixedArray<LockRefPtr<PhysicalPage>>&&, Bitmap dirty_pages);

View file

@ -106,7 +106,7 @@ ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
auto region = TRY(Region::try_create_user_accessible(
m_range, vmobject(), m_offset_in_vmobject, move(region_name), access(), m_cacheable ? Cacheable::Yes : Cacheable::No, m_shared));
region->set_mmap(m_mmap);
region->set_mmap(m_mmap, m_mmapped_from_readable, m_mmapped_from_writable);
region->set_shared(m_shared);
region->set_syscall_region(is_syscall_region());
return region;
@ -133,7 +133,7 @@ ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
clone_region->set_stack(true);
}
clone_region->set_syscall_region(is_syscall_region());
clone_region->set_mmap(m_mmap);
clone_region->set_mmap(m_mmap, m_mmapped_from_readable, m_mmapped_from_writable);
return clone_region;
}

View file

@ -89,7 +89,13 @@ public:
void set_stack(bool stack) { m_stack = stack; }
[[nodiscard]] bool is_mmap() const { return m_mmap; }
void set_mmap(bool mmap) { m_mmap = mmap; }
void set_mmap(bool mmap, bool description_was_readable, bool description_was_writable)
{
m_mmap = mmap;
m_mmapped_from_readable = description_was_readable;
m_mmapped_from_writable = description_was_writable;
}
[[nodiscard]] bool is_write_combine() const { return m_write_combine; }
ErrorOr<void> set_write_combine(bool);
@ -194,6 +200,9 @@ public:
[[nodiscard]] bool is_syscall_region() const { return m_syscall_region; }
void set_syscall_region(bool b) { m_syscall_region = b; }
[[nodiscard]] bool mmapped_from_readable() const { return m_mmapped_from_readable; }
[[nodiscard]] bool mmapped_from_writable() const { return m_mmapped_from_writable; }
private:
Region();
Region(NonnullLockRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
@ -228,6 +237,8 @@ private:
bool m_mmap : 1 { false };
bool m_syscall_region : 1 { false };
bool m_write_combine : 1 { false };
bool m_mmapped_from_readable : 1 { false };
bool m_mmapped_from_writable : 1 { false };
IntrusiveRedBlackTreeNode<FlatPtr, Region, RawPtr<Region>> m_tree_node;
IntrusiveListNode<Region> m_vmobject_list_node;

View file

@ -570,7 +570,7 @@ public:
ErrorOr<void> require_no_promises() const;
ErrorOr<void> validate_mmap_prot(int prot, bool map_stack, bool map_anonymous, Memory::Region const* region = nullptr) const;
ErrorOr<void> validate_inode_mmap_prot(int prot, Inode const& inode, bool map_shared) const;
ErrorOr<void> validate_inode_mmap_prot(int prot, bool description_readable, bool description_writable, bool map_shared) const;
private:
friend class MemoryManager;

View file

@ -103,25 +103,18 @@ ErrorOr<void> Process::validate_mmap_prot(int prot, bool map_stack, bool map_ano
return {};
}
ErrorOr<void> Process::validate_inode_mmap_prot(int prot, Inode const& inode, bool map_shared) const
ErrorOr<void> Process::validate_inode_mmap_prot(int prot, bool readable_description, bool description_writable, bool map_shared) const
{
auto credentials = this->credentials();
auto metadata = inode.metadata();
if ((prot & PROT_READ) && !metadata.may_read(credentials))
if ((prot & PROT_READ) && !readable_description)
return EACCES;
if (map_shared) {
// FIXME: What about readonly filesystem mounts? We cannot make a
// decision here without knowing the mount flags, so we would need to
// keep a Custody or something from mmap time.
if ((prot & PROT_WRITE) && !metadata.may_write(credentials))
if ((prot & PROT_WRITE) && !description_writable)
return EACCES;
if (auto shared_vmobject = inode.shared_vmobject()) {
if ((prot & PROT_EXEC) && shared_vmobject->writable_mappings())
return EACCES;
if ((prot & PROT_WRITE) && shared_vmobject->executable_mappings())
return EACCES;
}
}
return {};
}
@ -227,7 +220,7 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<Syscall::SC_mmap_params const*> use
return EACCES;
}
if (description->inode())
TRY(validate_inode_mmap_prot(prot, *description->inode(), map_shared));
TRY(validate_inode_mmap_prot(prot, description->is_readable(), description->is_writable(), map_shared));
vmobject = TRY(description->vmobject_for_mmap(*this, requested_range, used_offset, map_shared));
}
@ -251,7 +244,11 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<Syscall::SC_mmap_params const*> use
if (!region)
return ENOMEM;
region->set_mmap(true);
if (description)
region->set_mmap(true, description->is_readable(), description->is_writable());
else
region->set_mmap(true, false, false);
if (map_shared)
region->set_shared(true);
if (map_stack)
@ -289,7 +286,7 @@ ErrorOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int p
if (whole_region->access() == Memory::prot_to_region_access_flags(prot))
return 0;
if (whole_region->vmobject().is_inode())
TRY(validate_inode_mmap_prot(prot, static_cast<Memory::InodeVMObject const&>(whole_region->vmobject()).inode(), whole_region->is_shared()));
TRY(validate_inode_mmap_prot(prot, whole_region->mmapped_from_readable(), whole_region->mmapped_from_writable(), whole_region->is_shared()));
whole_region->set_readable(prot & PROT_READ);
whole_region->set_writable(prot & PROT_WRITE);
whole_region->set_executable(prot & PROT_EXEC);
@ -306,7 +303,7 @@ ErrorOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int p
if (old_region->access() == Memory::prot_to_region_access_flags(prot))
return 0;
if (old_region->vmobject().is_inode())
TRY(validate_inode_mmap_prot(prot, static_cast<Memory::InodeVMObject const&>(old_region->vmobject()).inode(), old_region->is_shared()));
TRY(validate_inode_mmap_prot(prot, old_region->mmapped_from_readable(), old_region->mmapped_from_writable(), old_region->is_shared()));
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address.
@ -339,7 +336,8 @@ ErrorOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int p
return EPERM;
TRY(validate_mmap_prot(prot, region->is_stack(), region->vmobject().is_anonymous(), region));
if (region->vmobject().is_inode())
TRY(validate_inode_mmap_prot(prot, static_cast<Memory::InodeVMObject const&>(region->vmobject()).inode(), region->is_shared()));
TRY(validate_inode_mmap_prot(prot, region->mmapped_from_readable(), region->mmapped_from_writable(), region->is_shared()));
full_size_found += region->range().intersect(range_to_mprotect).size();
}
@ -490,11 +488,14 @@ ErrorOr<FlatPtr> Process::sys$mremap(Userspace<Syscall::SC_mremap_params const*>
auto new_vmobject = TRY(Memory::PrivateInodeVMObject::try_create_with_inode(inode));
auto old_name = old_region->take_name();
bool old_region_was_mmapped_from_readable = old_region->mmapped_from_readable();
bool old_region_was_mmapped_from_writable = old_region->mmapped_from_writable();
old_region->unmap();
space->deallocate_region(*old_region);
auto* new_region = TRY(space->allocate_region_with_vmobject(range, move(new_vmobject), old_offset, old_name->view(), old_prot, false));
new_region->set_mmap(true);
new_region->set_mmap(true, old_region_was_mmapped_from_readable, old_region_was_mmapped_from_writable);
return new_region->vaddr().get();
}