Kernel: Remove double RedBlackTree lookup in VM/Space region removal

We should never request a regions removal that we don't currently
own. We currently assert this everywhere else by all callers.

Instead lets just push the assert down into the RedBlackTree removal
and assume that we will always successfully remove the region.
This commit is contained in:
Brian Gianforcaro 2021-07-17 06:07:02 -07:00 committed by Andreas Kling
parent d879709ec7
commit c0987453e6
4 changed files with 13 additions and 26 deletions

View file

@ -334,7 +334,6 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address, but dont deallocate it yet
auto region = space().take_region(*old_region);
VERIFY(region);
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
@ -398,7 +397,6 @@ KResultOr<FlatPtr> Process::sys$mprotect(Userspace<void*> addr, size_t size, int
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address, but dont deallocate it yet
auto region = space().take_region(*old_region);
VERIFY(region);
// Unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
@ -564,8 +562,7 @@ KResultOr<FlatPtr> Process::sys$mremap(Userspace<const Syscall::SC_mremap_params
// Unmap without deallocating the VM range since we're going to reuse it.
old_region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
bool success = space().deallocate_region(*old_region);
VERIFY(success);
space().deallocate_region(*old_region);
auto new_region_or_error = space().allocate_region_with_vmobject(range, new_vmobject.release_nonnull(), old_offset, old_name->view(), old_prot, false);
if (new_region_or_error.is_error())

View file

@ -404,9 +404,7 @@ void Thread::exit(void* exit_value)
[[maybe_unused]] auto rc = unlock_process_if_locked(unlock_count);
if (m_thread_specific_range.has_value()) {
auto* region = process().space().find_region_from_range(m_thread_specific_range.value());
VERIFY(region);
if (!process().space().deallocate_region(*region))
dbgln("Failed to unmap TLS range, exiting thread anyway.");
process().space().deallocate_region(*region);
}
die_if_needed();
}

View file

@ -57,8 +57,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
PerformanceManager::add_unmap_perf_event(*Process::current(), whole_region->range());
bool success = deallocate_region(*whole_region);
VERIFY(success);
deallocate_region(*whole_region);
return KSuccess;
}
@ -69,7 +68,6 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address, but dont deallocate it yet
auto region = take_region(*old_region);
VERIFY(region);
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
@ -108,15 +106,13 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
for (auto* old_region : regions) {
// if it's a full match we can delete the complete old region
if (old_region->range().intersect(range_to_unmap).size() == old_region->size()) {
bool res = deallocate_region(*old_region);
VERIFY(res);
deallocate_region(*old_region);
continue;
}
// Remove the old region from our regions tree, since were going to add another region
// with the exact same start address, but dont deallocate it yet
auto region = take_region(*old_region);
VERIFY(region);
// We manually unmap the old region here, specifying that we *don't* want the VM deallocated.
region->unmap(Region::ShouldDeallocateVirtualMemoryRange::No);
@ -219,25 +215,21 @@ KResultOr<Region*> Space::allocate_region_with_vmobject(Range const& range, Nonn
return added_region;
}
bool Space::deallocate_region(Region& region)
void Space::deallocate_region(Region& region)
{
return take_region(region);
take_region(region);
}
OwnPtr<Region> Space::take_region(Region& region)
NonnullOwnPtr<Region> Space::take_region(Region& region)
{
ScopedSpinLock lock(m_lock);
if (m_region_lookup_cache.region.unsafe_ptr() == &region)
m_region_lookup_cache.region = nullptr;
// FIXME: currently we traverse the RBTree twice, once to check if the region in the tree starting at region.vaddr()
// is the same region and once to actually remove it, maybe we can add some kind of remove_if()?
auto found_region = m_regions.find(region.vaddr().get());
if (!found_region)
return {};
if (found_region->ptr() != &region)
return {};
return m_regions.unsafe_remove(region.vaddr().get());
auto found_region = m_regions.unsafe_remove(region.vaddr().get());
VERIFY(found_region.ptr() == &region);
return found_region;
}
Region* Space::find_region_from_range(const Range& range)

View file

@ -39,8 +39,8 @@ public:
KResultOr<Region*> allocate_region_with_vmobject(const Range&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
KResultOr<Region*> allocate_region(const Range&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
bool deallocate_region(Region& region);
OwnPtr<Region> take_region(Region& region);
void deallocate_region(Region& region);
NonnullOwnPtr<Region> take_region(Region& region);
KResultOr<Region*> try_allocate_split_region(Region const& source_region, Range const&, size_t offset_in_vmobject);
KResultOr<Vector<Region*, 2>> try_split_region_around_range(Region const& source_region, Range const&);