From da24a937f5a0d647777a5161880625c34fe37234 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Tue, 23 Aug 2022 20:30:12 +0200 Subject: [PATCH] Kernel: Don't wrap AddressSpace's RegionTree in SpinlockProtected Now that AddressSpace itself is always SpinlockProtected, we don't need to also wrap the RegionTree. Whoever has the AddressSpace locked is free to poke around its tree. --- Kernel/Coredump.cpp | 150 +++++++++++----------- Kernel/Memory/AddressSpace.cpp | 199 +++++++++++++----------------- Kernel/Memory/AddressSpace.h | 10 +- Kernel/PerformanceEventBuffer.cpp | 14 +-- Kernel/ProcessSpecificExposed.cpp | 78 ++++++------ Kernel/Syscalls/fork.cpp | 26 ++-- 6 files changed, 212 insertions(+), 265 deletions(-) diff --git a/Kernel/Coredump.cpp b/Kernel/Coredump.cpp index a57ac038c0..cbcb57b43b 100644 --- a/Kernel/Coredump.cpp +++ b/Kernel/Coredump.cpp @@ -47,18 +47,16 @@ Coredump::Coredump(NonnullLockRefPtr process, NonnullLockRefPtraddress_space().with([&](auto& space) { - space->region_tree().with([&](auto& region_tree) { - for (auto& region : region_tree.regions()) { + for (auto& region : space->region_tree().regions()) { #if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS - if (looks_like_userspace_heap_region(region)) - continue; + if (looks_like_userspace_heap_region(region)) + continue; #endif - if (region.access() == Memory::Region::Access::None) - continue; - ++m_num_program_headers; - } - }); + if (region.access() == Memory::Region::Access::None) + continue; + ++m_num_program_headers; + } }); ++m_num_program_headers; // +1 for NOTE segment } @@ -138,39 +136,37 @@ ErrorOr Coredump::write_program_headers(size_t notes_size) { size_t offset = sizeof(ElfW(Ehdr)) + m_num_program_headers * sizeof(ElfW(Phdr)); m_process->address_space().with([&](auto& space) { - space->region_tree().with([&](auto& region_tree) { - for (auto& region : region_tree.regions()) { + for (auto& region : space->region_tree().regions()) { #if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS - if (looks_like_userspace_heap_region(region)) - continue; + if (looks_like_userspace_heap_region(region)) + continue; #endif - if (region.access() == Memory::Region::Access::None) - continue; + if (region.access() == Memory::Region::Access::None) + continue; - ElfW(Phdr) phdr {}; + ElfW(Phdr) phdr {}; - phdr.p_type = PT_LOAD; - phdr.p_offset = offset; - phdr.p_vaddr = region.vaddr().get(); - phdr.p_paddr = 0; + phdr.p_type = PT_LOAD; + phdr.p_offset = offset; + phdr.p_vaddr = region.vaddr().get(); + phdr.p_paddr = 0; - phdr.p_filesz = region.page_count() * PAGE_SIZE; - phdr.p_memsz = region.page_count() * PAGE_SIZE; - phdr.p_align = 0; + phdr.p_filesz = region.page_count() * PAGE_SIZE; + phdr.p_memsz = region.page_count() * PAGE_SIZE; + phdr.p_align = 0; - phdr.p_flags = region.is_readable() ? PF_R : 0; - if (region.is_writable()) - phdr.p_flags |= PF_W; - if (region.is_executable()) - phdr.p_flags |= PF_X; + phdr.p_flags = region.is_readable() ? PF_R : 0; + if (region.is_writable()) + phdr.p_flags |= PF_W; + if (region.is_executable()) + phdr.p_flags |= PF_X; - offset += phdr.p_filesz; + offset += phdr.p_filesz; - [[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast(&phdr)), sizeof(ElfW(Phdr))); - } - }); + [[maybe_unused]] auto rc = m_description->write(UserOrKernelBuffer::for_kernel_buffer(reinterpret_cast(&phdr)), sizeof(ElfW(Phdr))); + } }); ElfW(Phdr) notes_pheader {}; @@ -192,39 +188,37 @@ ErrorOr Coredump::write_regions() { u8 zero_buffer[PAGE_SIZE] = {}; - return m_process->address_space().with([&](auto& space) { - return space->region_tree().with([&](auto& region_tree) -> ErrorOr { - for (auto& region : region_tree.regions()) { - VERIFY(!region.is_kernel()); + return m_process->address_space().with([&](auto& space) -> ErrorOr { + for (auto& region : space->region_tree().regions()) { + VERIFY(!region.is_kernel()); #if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS - if (looks_like_userspace_heap_region(region)) - continue; + if (looks_like_userspace_heap_region(region)) + continue; #endif - if (region.access() == Memory::Region::Access::None) - continue; + if (region.access() == Memory::Region::Access::None) + continue; - // If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call - if (!region.is_mapped()) - continue; + // If we crashed in the middle of mapping in Regions, they do not have a page directory yet, and will crash on a remap() call + if (!region.is_mapped()) + continue; - region.set_readable(true); - region.remap(); + region.set_readable(true); + region.remap(); - for (size_t i = 0; i < region.page_count(); i++) { - auto page = region.physical_page(i); - auto src_buffer = [&]() -> ErrorOr { - if (page) - return UserOrKernelBuffer::for_user_buffer(reinterpret_cast((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE); - // If the current page is not backed by a physical page, we zero it in the coredump file. - return UserOrKernelBuffer::for_kernel_buffer(zero_buffer); - }(); - TRY(m_description->write(src_buffer.value(), PAGE_SIZE)); - } + for (size_t i = 0; i < region.page_count(); i++) { + auto page = region.physical_page(i); + auto src_buffer = [&]() -> ErrorOr { + if (page) + return UserOrKernelBuffer::for_user_buffer(reinterpret_cast((region.vaddr().as_ptr() + (i * PAGE_SIZE))), PAGE_SIZE); + // If the current page is not backed by a physical page, we zero it in the coredump file. + return UserOrKernelBuffer::for_kernel_buffer(zero_buffer); + }(); + TRY(m_description->write(src_buffer.value(), PAGE_SIZE)); } - return {}; - }); + } + return {}; }); } @@ -285,36 +279,34 @@ ErrorOr Coredump::create_notes_threads_data(auto& builder) const ErrorOr Coredump::create_notes_regions_data(auto& builder) const { size_t region_index = 0; - return m_process->address_space().with([&](auto& space) { - return space->region_tree().with([&](auto& region_tree) -> ErrorOr { - for (auto const& region : region_tree.regions()) { + return m_process->address_space().with([&](auto& space) -> ErrorOr { + for (auto const& region : space->region_tree().regions()) { #if !INCLUDE_USERSPACE_HEAP_MEMORY_IN_COREDUMPS - if (looks_like_userspace_heap_region(region)) - continue; + if (looks_like_userspace_heap_region(region)) + continue; #endif - if (region.access() == Memory::Region::Access::None) - continue; + if (region.access() == Memory::Region::Access::None) + continue; - ELF::Core::MemoryRegionInfo info {}; - info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo; + ELF::Core::MemoryRegionInfo info {}; + info.header.type = ELF::Core::NotesEntryHeader::Type::MemoryRegionInfo; - info.region_start = region.vaddr().get(); - info.region_end = region.vaddr().offset(region.size()).get(); - info.program_header_index = region_index++; + info.region_start = region.vaddr().get(); + info.region_end = region.vaddr().offset(region.size()).get(); + info.program_header_index = region_index++; - TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) })); + TRY(builder.append_bytes(ReadonlyBytes { (void*)&info, sizeof(info) })); - // NOTE: The region name *is* null-terminated, so the following is ok: - auto name = region.name(); - if (name.is_empty()) - TRY(builder.append('\0')); - else - TRY(builder.append(name.characters_without_null_termination(), name.length() + 1)); - } - return {}; - }); + // NOTE: The region name *is* null-terminated, so the following is ok: + auto name = region.name(); + if (name.is_empty()) + TRY(builder.append('\0')); + else + TRY(builder.append(name.characters_without_null_termination(), name.length() + 1)); + } + return {}; }); } diff --git a/Kernel/Memory/AddressSpace.cpp b/Kernel/Memory/AddressSpace.cpp index abcad85fbd..e37d3f22c3 100644 --- a/Kernel/Memory/AddressSpace.cpp +++ b/Kernel/Memory/AddressSpace.cpp @@ -25,7 +25,7 @@ ErrorOr> AddressSpace::try_create(AddressSpace const VirtualRange total_range = [&]() -> VirtualRange { if (parent) - return parent->m_total_range; + return parent->m_region_tree.total_range(); constexpr FlatPtr userspace_range_base = USER_RANGE_BASE; FlatPtr const userspace_range_ceiling = USER_RANGE_CEILING; size_t random_offset = (get_fast_random() % 2 * MiB) & PAGE_MASK; @@ -40,8 +40,7 @@ ErrorOr> AddressSpace::try_create(AddressSpace const AddressSpace::AddressSpace(NonnullLockRefPtr page_directory, VirtualRange total_range) : m_page_directory(move(page_directory)) - , m_total_range(total_range) - , m_region_tree(LockRank::None, total_range) + , m_region_tree(total_range) { } @@ -149,10 +148,7 @@ ErrorOr AddressSpace::try_allocate_split_region(Region const& source_re if (source_region.should_cow(page_offset_in_source_region + i)) TRY(new_region->set_should_cow(i, true)); } - TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr { - TRY(region_tree.place_specifically(*new_region, range)); - return {}; - })); + TRY(m_region_tree.place_specifically(*new_region, range)); return new_region.leak_ptr(); } @@ -167,14 +163,11 @@ ErrorOr AddressSpace::allocate_region(RandomizeVirtualAddress randomize region_name = TRY(KString::try_create(name)); auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy)); auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot))); - TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr { - if (requested_address.is_null()) { - TRY(region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment)); - } else { - TRY(region_tree.place_specifically(*region, VirtualRange { requested_address, size })); - } - return {}; - })); + if (requested_address.is_null()) { + TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment)); + } else { + TRY(m_region_tree.place_specifically(*region, VirtualRange { requested_address, size })); + } TRY(region->map(page_directory(), ShouldFlushTLB::No)); return region.leak_ptr(); } @@ -210,29 +203,27 @@ ErrorOr AddressSpace::allocate_region_with_vmobject(RandomizeVirtualAdd auto region = TRY(Region::create_unplaced(move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared)); - return m_region_tree.with([&](auto& region_tree) -> ErrorOr { - if (requested_address.is_null()) - TRY(region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment)); - else - TRY(region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size })); + if (requested_address.is_null()) + TRY(m_region_tree.place_anywhere(*region, randomize_virtual_address, size, alignment)); + else + TRY(m_region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size })); - ArmedScopeGuard remove_region_from_tree_on_failure = [&] { - // At this point the region is already part of the Process region tree, so we have to make sure - // we remove it from the tree before returning an error, or else the Region tree will contain - // a dangling pointer to the free'd Region instance - region_tree.remove(*region); - }; + ArmedScopeGuard remove_region_from_tree_on_failure = [&] { + // At this point the region is already part of the Process region tree, so we have to make sure + // we remove it from the tree before returning an error, or else the Region tree will contain + // a dangling pointer to the free'd Region instance + m_region_tree.remove(*region); + }; - if (prot == PROT_NONE) { - // For PROT_NONE mappings, we don't have to set up any page table mappings. - // We do still need to attach the region to the page_directory though. - region->set_page_directory(page_directory()); - } else { - TRY(region->map(page_directory(), ShouldFlushTLB::No)); - } - remove_region_from_tree_on_failure.disarm(); - return region.leak_ptr(); - }); + if (prot == PROT_NONE) { + // For PROT_NONE mappings, we don't have to set up any page table mappings. + // We do still need to attach the region to the page_directory though. + region->set_page_directory(page_directory()); + } else { + TRY(region->map(page_directory(), ShouldFlushTLB::No)); + } + remove_region_from_tree_on_failure.disarm(); + return region.leak_ptr(); } void AddressSpace::deallocate_region(Region& region) @@ -242,14 +233,14 @@ void AddressSpace::deallocate_region(Region& region) NonnullOwnPtr AddressSpace::take_region(Region& region) { - auto did_remove = m_region_tree.with([&](auto& region_tree) { return region_tree.remove(region); }); + auto did_remove = m_region_tree.remove(region); VERIFY(did_remove); return NonnullOwnPtr { NonnullOwnPtr::Adopt, region }; } Region* AddressSpace::find_region_from_range(VirtualRange const& range) { - auto* found_region = m_region_tree.with([&](auto& region_tree) { return region_tree.regions().find(range.base().get()); }); + auto* found_region = m_region_tree.regions().find(range.base().get()); if (!found_region) return nullptr; auto& region = *found_region; @@ -261,9 +252,7 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range) Region* AddressSpace::find_region_containing(VirtualRange const& range) { - return m_region_tree.with([&](auto& region_tree) { - return region_tree.find_region_containing(range); - }); + return m_region_tree.find_region_containing(range); } ErrorOr> AddressSpace::find_regions_intersecting(VirtualRange const& range) @@ -271,23 +260,21 @@ ErrorOr> AddressSpace::find_regions_intersecting(VirtualRange Vector regions = {}; size_t total_size_collected = 0; - return m_region_tree.with([&](auto& region_tree) -> ErrorOr> { - auto* found_region = region_tree.regions().find_largest_not_above(range.base().get()); - if (!found_region) - return regions; - for (auto iter = region_tree.regions().begin_from(*found_region); !iter.is_end(); ++iter) { - auto const& iter_range = (*iter).range(); - if (iter_range.base() < range.end() && iter_range.end() > range.base()) { - TRY(regions.try_append(&*iter)); - - total_size_collected += (*iter).size() - iter_range.intersect(range).size(); - if (total_size_collected == range.size()) - break; - } - } - + auto* found_region = m_region_tree.regions().find_largest_not_above(range.base().get()); + if (!found_region) return regions; - }); + for (auto iter = m_region_tree.regions().begin_from(*found_region); !iter.is_end(); ++iter) { + auto const& iter_range = (*iter).range(); + if (iter_range.base() < range.end() && iter_range.end() > range.base()) { + TRY(regions.try_append(&*iter)); + + total_size_collected += (*iter).size() - iter_range.intersect(range).size(); + if (total_size_collected == range.size()) + break; + } + } + + return regions; } // Carve out a virtual address range from a region and return the two regions on either side @@ -321,18 +308,16 @@ void AddressSpace::dump_regions() dbgln("BEGIN{} END{} SIZE{} ACCESS NAME", addr_padding, addr_padding, addr_padding); - m_region_tree.with([&](auto& region_tree) { - for (auto const& region : region_tree.regions()) { - dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(), - region.is_readable() ? 'R' : ' ', - region.is_writable() ? 'W' : ' ', - region.is_executable() ? 'X' : ' ', - region.is_shared() ? 'S' : ' ', - region.is_stack() ? 'T' : ' ', - region.is_syscall_region() ? 'C' : ' ', - region.name()); - } - }); + for (auto const& region : m_region_tree.regions()) { + dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}", region.vaddr().get(), region.vaddr().offset(region.size() - 1).get(), region.size(), + region.is_readable() ? 'R' : ' ', + region.is_writable() ? 'W' : ' ', + region.is_executable() ? 'X' : ' ', + region.is_shared() ? 'S' : ' ', + region.is_stack() ? 'T' : ' ', + region.is_syscall_region() ? 'C' : ' ', + region.name()); + } MM.dump_kernel_regions(); } @@ -341,15 +326,11 @@ void AddressSpace::remove_all_regions(Badge) VERIFY(Thread::current() == g_finalizer); { SpinlockLocker pd_locker(m_page_directory->get_lock()); - m_region_tree.with([&](auto& region_tree) { - for (auto& region : region_tree.regions()) - region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker); - }); + for (auto& region : m_region_tree.regions()) + region.unmap_with_locks_held(ShouldFlushTLB::No, pd_locker); } - m_region_tree.with([&](auto& region_tree) { - region_tree.delete_all_regions_assuming_they_are_unmapped(); - }); + m_region_tree.delete_all_regions_assuming_they_are_unmapped(); } size_t AddressSpace::amount_dirty_private() const @@ -358,25 +339,20 @@ size_t AddressSpace::amount_dirty_private() const // The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping. // That's probably a situation that needs to be looked at in general. size_t amount = 0; - m_region_tree.with([&](auto& region_tree) { - for (auto const& region : region_tree.regions()) { - if (!region.is_shared()) - amount += region.amount_dirty(); - } - }); + for (auto const& region : m_region_tree.regions()) { + if (!region.is_shared()) + amount += region.amount_dirty(); + } return amount; } ErrorOr AddressSpace::amount_clean_inode() const { HashTable> vmobjects; - TRY(m_region_tree.with([&](auto& region_tree) -> ErrorOr { - for (auto const& region : region_tree.regions()) { - if (region.vmobject().is_inode()) - TRY(vmobjects.try_set(&static_cast(region.vmobject()))); - } - return {}; - })); + for (auto const& region : m_region_tree.regions()) { + if (region.vmobject().is_inode()) + TRY(vmobjects.try_set(&static_cast(region.vmobject()))); + } size_t amount = 0; for (auto& vmobject : vmobjects) amount += vmobject->amount_clean(); @@ -386,11 +362,9 @@ ErrorOr AddressSpace::amount_clean_inode() const size_t AddressSpace::amount_virtual() const { size_t amount = 0; - m_region_tree.with([&](auto& region_tree) { - for (auto const& region : region_tree.regions()) { - amount += region.size(); - } - }); + for (auto const& region : m_region_tree.regions()) { + amount += region.size(); + } return amount; } @@ -398,11 +372,9 @@ size_t AddressSpace::amount_resident() const { // FIXME: This will double count if multiple regions use the same physical page. size_t amount = 0; - m_region_tree.with([&](auto& region_tree) { - for (auto const& region : region_tree.regions()) { - amount += region.amount_resident(); - } - }); + for (auto const& region : m_region_tree.regions()) { + amount += region.amount_resident(); + } return amount; } @@ -413,40 +385,35 @@ size_t AddressSpace::amount_shared() const // and each PhysicalPage is only reffed by its VMObject. This needs to be refactored // so that every Region contributes +1 ref to each of its PhysicalPages. size_t amount = 0; - m_region_tree.with([&](auto& region_tree) { - for (auto const& region : region_tree.regions()) { - amount += region.amount_shared(); - } - }); + for (auto const& region : m_region_tree.regions()) { + amount += region.amount_shared(); + } return amount; } size_t AddressSpace::amount_purgeable_volatile() const { size_t amount = 0; - m_region_tree.with([&](auto& region_tree) { - for (auto const& region : region_tree.regions()) { - if (!region.vmobject().is_anonymous()) - continue; - auto const& vmobject = static_cast(region.vmobject()); - if (vmobject.is_purgeable() && vmobject.is_volatile()) - amount += region.amount_resident(); - } - }); + for (auto const& region : m_region_tree.regions()) { + if (!region.vmobject().is_anonymous()) + continue; + auto const& vmobject = static_cast(region.vmobject()); + if (vmobject.is_purgeable() && vmobject.is_volatile()) + amount += region.amount_resident(); + } return amount; } size_t AddressSpace::amount_purgeable_nonvolatile() const { size_t amount = 0; - m_region_tree.with([&](auto& region_tree) { - for (auto const& region : region_tree.regions()) { + for (auto const& region : m_region_tree.regions()) { if (!region.vmobject().is_anonymous()) continue; auto const& vmobject = static_cast(region.vmobject()); if (vmobject.is_purgeable() && !vmobject.is_volatile()) amount += region.amount_resident(); - } }); + } return amount; } diff --git a/Kernel/Memory/AddressSpace.h b/Kernel/Memory/AddressSpace.h index e2df65e0b6..d1617fc7ee 100644 --- a/Kernel/Memory/AddressSpace.h +++ b/Kernel/Memory/AddressSpace.h @@ -27,8 +27,8 @@ public: PageDirectory& page_directory() { return *m_page_directory; } PageDirectory const& page_directory() const { return *m_page_directory; } - SpinlockProtected& region_tree() { return m_region_tree; } - SpinlockProtected const& region_tree() const { return m_region_tree; } + RegionTree& region_tree() { return m_region_tree; } + RegionTree const& region_tree() const { return m_region_tree; } void dump_regions(); @@ -66,11 +66,7 @@ private: LockRefPtr m_page_directory; - // NOTE: The total range is also in the RegionTree, but since it never changes, - // it's nice to have it in a place where we can access it without locking. - VirtualRange m_total_range; - - SpinlockProtected m_region_tree; + RegionTree m_region_tree; bool m_enforces_syscall_regions { false }; }; diff --git a/Kernel/PerformanceEventBuffer.cpp b/Kernel/PerformanceEventBuffer.cpp index 191a50daab..123cb26013 100644 --- a/Kernel/PerformanceEventBuffer.cpp +++ b/Kernel/PerformanceEventBuffer.cpp @@ -352,14 +352,12 @@ ErrorOr PerformanceEventBuffer::add_process(Process const& process, Proces }); TRY(result); - return process.address_space().with([&](auto& space) { - return space->region_tree().with([&](auto& region_tree) -> ErrorOr { - for (auto const& region : region_tree.regions()) { - TRY(append_with_ip_and_bp(process.pid(), 0, - 0, 0, PERF_EVENT_MMAP, 0, region.range().base().get(), region.range().size(), region.name())); - } - return {}; - }); + return process.address_space().with([&](auto& space) -> ErrorOr { + for (auto const& region : space->region_tree().regions()) { + TRY(append_with_ip_and_bp(process.pid(), 0, + 0, 0, PERF_EVENT_MMAP, 0, region.range().base().get(), region.range().size(), region.name())); + } + return {}; }); } diff --git a/Kernel/ProcessSpecificExposed.cpp b/Kernel/ProcessSpecificExposed.cpp index 2e4cea6c29..f049aacb05 100644 --- a/Kernel/ProcessSpecificExposed.cpp +++ b/Kernel/ProcessSpecificExposed.cpp @@ -267,47 +267,45 @@ ErrorOr Process::procfs_get_fds_stats(KBufferBuilder& builder) const ErrorOr Process::procfs_get_virtual_memory_stats(KBufferBuilder& builder) const { auto array = TRY(JsonArraySerializer<>::try_create(builder)); - TRY(address_space().with([&](auto& space) { - return space->region_tree().with([&](auto& region_tree) -> ErrorOr { - for (auto const& region : region_tree.regions()) { - auto current_process_credentials = Process::current().credentials(); - if (!region.is_user() && !current_process_credentials->is_superuser()) - continue; - auto region_object = TRY(array.add_object()); - TRY(region_object.add("readable"sv, region.is_readable())); - TRY(region_object.add("writable"sv, region.is_writable())); - TRY(region_object.add("executable"sv, region.is_executable())); - TRY(region_object.add("stack"sv, region.is_stack())); - TRY(region_object.add("shared"sv, region.is_shared())); - TRY(region_object.add("syscall"sv, region.is_syscall_region())); - TRY(region_object.add("purgeable"sv, region.vmobject().is_anonymous())); - if (region.vmobject().is_anonymous()) { - TRY(region_object.add("volatile"sv, static_cast(region.vmobject()).is_volatile())); - } - TRY(region_object.add("cacheable"sv, region.is_cacheable())); - TRY(region_object.add("address"sv, region.vaddr().get())); - TRY(region_object.add("size"sv, region.size())); - TRY(region_object.add("amount_resident"sv, region.amount_resident())); - TRY(region_object.add("amount_dirty"sv, region.amount_dirty())); - TRY(region_object.add("cow_pages"sv, region.cow_pages())); - TRY(region_object.add("name"sv, region.name())); - TRY(region_object.add("vmobject"sv, region.vmobject().class_name())); - - StringBuilder pagemap_builder; - for (size_t i = 0; i < region.page_count(); ++i) { - auto page = region.physical_page(i); - if (!page) - pagemap_builder.append('N'); - else if (page->is_shared_zero_page() || page->is_lazy_committed_page()) - pagemap_builder.append('Z'); - else - pagemap_builder.append('P'); - } - TRY(region_object.add("pagemap"sv, pagemap_builder.string_view())); - TRY(region_object.finish()); + TRY(address_space().with([&](auto& space) -> ErrorOr { + for (auto const& region : space->region_tree().regions()) { + auto current_process_credentials = Process::current().credentials(); + if (!region.is_user() && !current_process_credentials->is_superuser()) + continue; + auto region_object = TRY(array.add_object()); + TRY(region_object.add("readable"sv, region.is_readable())); + TRY(region_object.add("writable"sv, region.is_writable())); + TRY(region_object.add("executable"sv, region.is_executable())); + TRY(region_object.add("stack"sv, region.is_stack())); + TRY(region_object.add("shared"sv, region.is_shared())); + TRY(region_object.add("syscall"sv, region.is_syscall_region())); + TRY(region_object.add("purgeable"sv, region.vmobject().is_anonymous())); + if (region.vmobject().is_anonymous()) { + TRY(region_object.add("volatile"sv, static_cast(region.vmobject()).is_volatile())); } - return {}; - }); + TRY(region_object.add("cacheable"sv, region.is_cacheable())); + TRY(region_object.add("address"sv, region.vaddr().get())); + TRY(region_object.add("size"sv, region.size())); + TRY(region_object.add("amount_resident"sv, region.amount_resident())); + TRY(region_object.add("amount_dirty"sv, region.amount_dirty())); + TRY(region_object.add("cow_pages"sv, region.cow_pages())); + TRY(region_object.add("name"sv, region.name())); + TRY(region_object.add("vmobject"sv, region.vmobject().class_name())); + + StringBuilder pagemap_builder; + for (size_t i = 0; i < region.page_count(); ++i) { + auto page = region.physical_page(i); + if (!page) + pagemap_builder.append('N'); + else if (page->is_shared_zero_page() || page->is_lazy_committed_page()) + pagemap_builder.append('Z'); + else + pagemap_builder.append('P'); + } + TRY(region_object.add("pagemap"sv, pagemap_builder.string_view())); + TRY(region_object.finish()); + } + return {}; })); TRY(array.finish()); return {}; diff --git a/Kernel/Syscalls/fork.cpp b/Kernel/Syscalls/fork.cpp index 130a491294..d91df90824 100644 --- a/Kernel/Syscalls/fork.cpp +++ b/Kernel/Syscalls/fork.cpp @@ -123,23 +123,19 @@ ErrorOr Process::sys$fork(RegisterState& regs) #endif TRY(address_space().with([&](auto& parent_space) { - return child->address_space().with([&](auto& child_space) { + return child->address_space().with([&](auto& child_space) -> ErrorOr { child_space->set_enforces_syscall_regions(parent_space->enforces_syscall_regions()); - return parent_space->region_tree().with([&](auto& parent_region_tree) -> ErrorOr { - return child_space->region_tree().with([&](auto& child_region_tree) -> ErrorOr { - for (auto& region : parent_region_tree.regions()) { - dbgln_if(FORK_DEBUG, "fork: cloning Region '{}' @ {}", region.name(), region.vaddr()); - auto region_clone = TRY(region.try_clone()); - TRY(region_clone->map(child_space->page_directory(), Memory::ShouldFlushTLB::No)); - TRY(child_region_tree.place_specifically(*region_clone, region.range())); - auto* child_region = region_clone.leak_ptr(); + for (auto& region : parent_space->region_tree().regions()) { + dbgln_if(FORK_DEBUG, "fork: cloning Region '{}' @ {}", region.name(), region.vaddr()); + auto region_clone = TRY(region.try_clone()); + TRY(region_clone->map(child_space->page_directory(), Memory::ShouldFlushTLB::No)); + TRY(child_space->region_tree().place_specifically(*region_clone, region.range())); + auto* child_region = region_clone.leak_ptr(); - if (®ion == m_master_tls_region.unsafe_ptr()) - child->m_master_tls_region = TRY(child_region->try_make_weak_ptr()); - } - return {}; - }); - }); + if (®ion == m_master_tls_region.unsafe_ptr()) + child->m_master_tls_region = TRY(child_region->try_make_weak_ptr()); + } + return {}; }); }));