Kernel: Make VM allocation atomic for userspace regions

This patch move AddressSpace (the per-process memory manager) to using
the new atomic "place" APIs in RegionTree as well, just like we did for
MemoryManager in the previous commit.

This required updating quite a few places where VM allocation and
actually committing a Region object to the AddressSpace were separated
by other code.

All you have to do now is call into AddressSpace once and it'll take
care of everything for you.
This commit is contained in:
Andreas Kling 2022-04-03 17:12:39 +02:00
parent e852a69a06
commit 07f3d09c55
13 changed files with 84 additions and 91 deletions

View file

@ -319,16 +319,6 @@ UNMAP_AFTER_INIT bool APIC::init_bsp()
return true;
}
UNMAP_AFTER_INIT static ErrorOr<NonnullOwnPtr<Memory::Region>> create_identity_mapped_region(PhysicalAddress paddr, size_t size)
{
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size));
auto region = TRY(Memory::Region::create_unplaced(move(vmobject), 0, {}, Memory::Region::Access::ReadWriteExecute));
Memory::VirtualRange range { VirtualAddress { paddr.get() }, size };
TRY(MM.region_tree().place_specifically(*region, range));
TRY(region->map(MM.kernel_page_directory()));
return region;
}
UNMAP_AFTER_INIT void APIC::setup_ap_boot_environment()
{
VERIFY(!m_ap_boot_environment);
@ -342,7 +332,7 @@ UNMAP_AFTER_INIT void APIC::setup_ap_boot_environment()
constexpr u64 apic_startup_region_base = 0x8000;
auto apic_startup_region_size = Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(FlatPtr))).release_value_but_fixme_should_propagate_errors();
VERIFY(apic_startup_region_size < USER_RANGE_BASE);
auto apic_startup_region = MUST(create_identity_mapped_region(PhysicalAddress(apic_startup_region_base), apic_startup_region_size));
auto apic_startup_region = MUST(MM.region_tree().create_identity_mapped_region(PhysicalAddress(apic_startup_region_base), apic_startup_region_size));
u8* apic_startup_region_ptr = apic_startup_region->vaddr().as_ptr();
memcpy(apic_startup_region_ptr, reinterpret_cast<void const*>(apic_ap_start), apic_ap_start_size);

View file

@ -136,23 +136,14 @@ ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
return {};
}
ErrorOr<VirtualRange> AddressSpace::try_allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
{
vaddr.mask(PAGE_MASK);
size = TRY(page_round_up(size));
if (vaddr.is_null())
return m_region_tree.try_allocate_anywhere(size, alignment);
return m_region_tree.try_allocate_specific(vaddr, size);
}
ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject)
{
OwnPtr<KString> region_name;
if (!source_region.name().is_null())
region_name = TRY(KString::try_create(source_region.name()));
auto new_region = TRY(Region::try_create_user_accessible(
range, source_region.vmobject(), offset_in_vmobject, move(region_name), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()));
auto new_region = TRY(Region::create_unplaced(
source_region.vmobject(), offset_in_vmobject, move(region_name), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared()));
new_region->set_syscall_region(source_region.is_syscall_region());
new_region->set_mmap(source_region.is_mmap());
new_region->set_stack(source_region.is_stack());
@ -161,29 +152,46 @@ ErrorOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_re
if (source_region.should_cow(page_offset_in_source_region + i))
TRY(new_region->set_should_cow(i, true));
}
return add_region(move(new_region));
SpinlockLocker locker(m_lock);
TRY(m_region_tree.place_specifically(*new_region, range));
return new_region.leak_ptr();
}
ErrorOr<Region*> AddressSpace::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy)
ErrorOr<Region*> AddressSpace::allocate_region(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, StringView name, int prot, AllocationStrategy strategy)
{
VERIFY(range.is_valid());
if (!requested_address.is_page_aligned())
return EINVAL;
auto size = TRY(Memory::page_round_up(requested_size));
auto alignment = TRY(Memory::page_round_up(requested_alignment));
OwnPtr<KString> region_name;
if (!name.is_null())
region_name = TRY(KString::try_create(name));
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(range.size(), strategy));
auto region = TRY(Region::try_create_user_accessible(range, move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, false));
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(region_name), prot_to_region_access_flags(prot)));
if (requested_address.is_null())
TRY(m_region_tree.place_anywhere(*region, size, alignment));
else
TRY(m_region_tree.place_specifically(*region, VirtualRange { requested_address, size }));
TRY(region->map(page_directory(), ShouldFlushTLB::No));
return add_region(move(region));
return region.leak_ptr();
}
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange requested_range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
{
VERIFY(range.is_valid());
size_t end_in_vmobject = offset_in_vmobject + range.size();
if (end_in_vmobject <= offset_in_vmobject) {
dbgln("allocate_region_with_vmobject: Overflow (offset + size)");
return allocate_region_with_vmobject(requested_range.base(), requested_range.size(), PAGE_SIZE, move(vmobject), offset_in_vmobject, name, prot, shared);
}
ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
{
if (!requested_address.is_page_aligned())
return EINVAL;
}
auto size = TRY(page_round_up(requested_size));
auto alignment = TRY(page_round_up(requested_alignment));
if (Checked<size_t>::addition_would_overflow(offset_in_vmobject, requested_size))
return EOVERFLOW;
size_t end_in_vmobject = offset_in_vmobject + requested_size;
if (offset_in_vmobject >= vmobject->size()) {
dbgln("allocate_region_with_vmobject: Attempt to allocate a region with an offset past the end of its VMObject.");
return EINVAL;
@ -196,7 +204,16 @@ ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const&
OwnPtr<KString> region_name;
if (!name.is_null())
region_name = TRY(KString::try_create(name));
auto region = TRY(Region::try_create_user_accessible(range, move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
auto region = TRY(Region::create_unplaced(move(vmobject), offset_in_vmobject, move(region_name), prot_to_region_access_flags(prot), Region::Cacheable::Yes, shared));
SpinlockLocker locker(m_lock);
if (requested_address.is_null())
TRY(m_region_tree.place_anywhere(*region, size, alignment));
else
TRY(m_region_tree.place_specifically(*region, VirtualRange { VirtualAddress { requested_address }, size }));
if (prot == PROT_NONE) {
// For PROT_NONE mappings, we don't have to set up any page table mappings.
// We do still need to attach the region to the page_directory though.
@ -205,7 +222,7 @@ ErrorOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const&
} else {
TRY(region->map(page_directory(), ShouldFlushTLB::No));
}
return add_region(move(region));
return region.leak_ptr();
}
void AddressSpace::deallocate_region(Region& region)
@ -267,15 +284,6 @@ ErrorOr<Vector<Region*>> AddressSpace::find_regions_intersecting(VirtualRange co
return regions;
}
ErrorOr<Region*> AddressSpace::add_region(NonnullOwnPtr<Region> region)
{
SpinlockLocker lock(m_lock);
// NOTE: We leak the region into the IRBT here. It must be deleted or readopted when removed from the tree.
auto* ptr = region.leak_ptr();
m_region_tree.regions().insert(ptr->vaddr().get(), *ptr);
return ptr;
}
// Carve out a virtual address range from a region and return the two regions on either side
ErrorOr<Vector<Region*, 2>> AddressSpace::try_split_region_around_range(Region const& source_region, VirtualRange const& desired_range)
{

View file

@ -26,8 +26,6 @@ public:
PageDirectory& page_directory() { return *m_page_directory; }
PageDirectory const& page_directory() const { return *m_page_directory; }
ErrorOr<Region*> add_region(NonnullOwnPtr<Region>);
size_t region_count() const { return m_region_tree.regions().size(); }
auto& regions() { return m_region_tree.regions(); }
@ -37,10 +35,9 @@ public:
ErrorOr<void> unmap_mmap_range(VirtualAddress, size_t);
ErrorOr<VirtualRange> try_allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE);
ErrorOr<Region*> allocate_region_with_vmobject(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
ErrorOr<Region*> allocate_region(VirtualRange const&, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
ErrorOr<Region*> allocate_region_with_vmobject(VirtualRange requested_range, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
ErrorOr<Region*> allocate_region_with_vmobject(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, StringView name, int prot, bool shared);
ErrorOr<Region*> allocate_region(VirtualAddress requested_address, size_t requested_size, size_t requested_alignment, StringView name, int prot = PROT_READ | PROT_WRITE, AllocationStrategy strategy = AllocationStrategy::Reserve);
void deallocate_region(Region& region);
NonnullOwnPtr<Region> take_region(Region& region);

View file

@ -77,7 +77,7 @@ bool MemoryManager::is_initialized()
static UNMAP_AFTER_INIT VirtualRange kernel_virtual_range()
{
auto kernel_range_start = kernel_mapping_base + 2 * MiB; // The first 2 MiB are used for mapping the pre-kernel
size_t kernel_range_start = kernel_mapping_base + 2 * MiB; // The first 2 MiB are used for mapping the pre-kernel
return VirtualRange { VirtualAddress(kernel_range_start), KERNEL_PD_END - kernel_range_start };
}

View file

@ -84,9 +84,9 @@ ErrorOr<NonnullOwnPtr<Region>> Region::create_unbacked()
return adopt_nonnull_own_or_enomem(new (nothrow) Region);
}
ErrorOr<NonnullOwnPtr<Region>> Region::create_unplaced(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable)
ErrorOr<NonnullOwnPtr<Region>> Region::create_unplaced(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
{
return adopt_nonnull_own_or_enomem(new (nothrow) Region(move(vmobject), offset_in_vmobject, move(name), access, cacheable, false));
return adopt_nonnull_own_or_enomem(new (nothrow) Region(move(vmobject), offset_in_vmobject, move(name), access, cacheable, shared));
}
ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()

View file

@ -57,7 +57,7 @@ public:
static ErrorOr<NonnullOwnPtr<Region>> try_create_user_accessible(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
static ErrorOr<NonnullOwnPtr<Region>> try_create_kernel_only(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
static ErrorOr<NonnullOwnPtr<Region>> create_unbacked();
static ErrorOr<NonnullOwnPtr<Region>> create_unplaced(NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
static ErrorOr<NonnullOwnPtr<Region>> create_unplaced(NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes, bool shared = false);
~Region();

View file

@ -5,6 +5,7 @@
*/
#include <AK/Format.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/RegionTree.h>
#include <Kernel/Random.h>
@ -167,4 +168,14 @@ ErrorOr<void> RegionTree::place_specifically(Region& region, VirtualRange const&
return {};
}
ErrorOr<NonnullOwnPtr<Memory::Region>> RegionTree::create_identity_mapped_region(PhysicalAddress paddr, size_t size)
{
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size));
auto region = TRY(Memory::Region::create_unplaced(move(vmobject), 0, {}, Memory::Region::Access::ReadWriteExecute));
Memory::VirtualRange range { VirtualAddress { (FlatPtr)paddr.get() }, size };
region->m_range = range;
TRY(region->map(MM.kernel_page_directory()));
return region;
}
}

View file

@ -37,6 +37,8 @@ public:
ErrorOr<void> place_anywhere(Region&, size_t size, size_t alignment = PAGE_SIZE);
ErrorOr<void> place_specifically(Region&, VirtualRange const&);
ErrorOr<NonnullOwnPtr<Memory::Region>> create_identity_mapped_region(PhysicalAddress, size_t);
ErrorOr<VirtualRange> try_allocate_anywhere(size_t size, size_t alignment = PAGE_SIZE);
ErrorOr<VirtualRange> try_allocate_specific(VirtualAddress base, size_t size);
ErrorOr<VirtualRange> try_allocate_randomized(size_t size, size_t alignment = PAGE_SIZE);

View file

@ -17,8 +17,7 @@ ErrorOr<FlatPtr> Process::sys$map_time_page()
auto& vmobject = TimeManagement::the().time_page_vmobject();
auto range = TRY(address_space().region_tree().try_allocate_randomized(PAGE_SIZE, PAGE_SIZE));
auto* region = TRY(address_space().allocate_region_with_vmobject(range, vmobject, 0, "Kernel time page"sv, PROT_READ, true));
auto* region = TRY(address_space().allocate_region_with_vmobject({}, PAGE_SIZE, PAGE_SIZE, vmobject, 0, "Kernel time page"sv, PROT_READ, true));
return region->vaddr().get();
}

View file

@ -294,9 +294,8 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
return ENOEXEC;
}
auto range = TRY(new_space->try_allocate_range({}, program_header.size_in_memory()));
auto region_name = TRY(KString::formatted("{} (master-tls)", elf_name));
master_tls_region = TRY(new_space->allocate_region(range, region_name->view(), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
master_tls_region = TRY(new_space->allocate_region({}, program_header.size_in_memory(), PAGE_SIZE, region_name->view(), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
master_tls_size = program_header.size_in_memory();
master_tls_alignment = program_header.alignment();
@ -324,8 +323,7 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
size_t rounded_range_end = TRY(Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()));
auto range_end = VirtualAddress { rounded_range_end };
auto range = TRY(new_space->try_allocate_range(range_base, range_end.get() - range_base.get(), program_header.alignment()));
auto region = TRY(new_space->allocate_region(range, region_name->view(), prot, AllocationStrategy::Reserve));
auto region = TRY(new_space->allocate_region(range_base, range_end.get() - range_base.get(), PAGE_SIZE, region_name->view(), prot, AllocationStrategy::Reserve));
// It's not always the case with PIE executables (and very well shouldn't be) that the
// virtual address in the program header matches the one we end up giving the process.
@ -360,8 +358,7 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
size_t rounded_range_end = TRY(Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()));
auto range_end = VirtualAddress { rounded_range_end };
auto range = TRY(new_space->try_allocate_range(range_base, range_end.get() - range_base.get(), program_header.alignment()));
auto region = TRY(new_space->allocate_region_with_vmobject(range, *vmobject, program_header.offset(), elf_name->view(), prot, true));
auto region = TRY(new_space->allocate_region_with_vmobject(range_base, range_end.get() - range_base.get(), program_header.alignment(), *vmobject, program_header.offset(), elf_name->view(), prot, true));
if (should_allow_syscalls == ShouldAllowSyscalls::Yes)
region->set_syscall_region(true);
@ -395,8 +392,7 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
return ENOEXEC;
}
auto stack_range = TRY(new_space->try_allocate_range({}, Thread::default_userspace_stack_size));
auto* stack_region = TRY(new_space->allocate_region(stack_range, "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
auto* stack_region = TRY(new_space->allocate_region({}, Thread::default_userspace_stack_size, PAGE_SIZE, "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve));
stack_region->set_stack(true);
return LoadResult {
@ -473,8 +469,7 @@ ErrorOr<void> Process::do_exec(NonnullRefPtr<OpenFileDescription> main_program_d
bool has_interpreter = interpreter_description;
interpreter_description = nullptr;
auto signal_trampoline_range = TRY(load_result.space->try_allocate_range({}, PAGE_SIZE));
auto* signal_trampoline_region = TRY(load_result.space->allocate_region_with_vmobject(signal_trampoline_range, g_signal_trampoline_region->vmobject(), 0, "Signal trampoline", PROT_READ | PROT_EXEC, true));
auto* signal_trampoline_region = TRY(load_result.space->allocate_region_with_vmobject({}, PAGE_SIZE, PAGE_SIZE, g_signal_trampoline_region->vmobject(), 0, "Signal trampoline", PROT_READ | PROT_EXEC, true));
signal_trampoline_region->set_syscall_region(true);
// (For dynamically linked executable) Allocate an FD for passing the main executable to the dynamic loader.

View file

@ -112,7 +112,8 @@ ErrorOr<FlatPtr> Process::sys$fork(RegisterState& regs)
dbgln_if(FORK_DEBUG, "fork: cloning Region({}) '{}' @ {}", region, region.name(), region.vaddr());
auto region_clone = TRY(region.try_clone());
TRY(region_clone->map(child->address_space().page_directory(), Memory::ShouldFlushTLB::No));
auto* child_region = TRY(child->address_space().add_region(move(region_clone)));
TRY(child->address_space().region_tree().place_specifically(*region_clone, region.range()));
auto* child_region = region_clone.leak_ptr();
if (&region == m_master_tls_region.unsafe_ptr())
child->m_master_tls_region = TRY(child_region->try_make_weak_ptr());

View file

@ -191,23 +191,15 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<Syscall::SC_mmap_params const*> use
Memory::Region* region = nullptr;
auto range = TRY([&]() -> ErrorOr<Memory::VirtualRange> {
if (map_randomized)
return address_space().region_tree().try_allocate_randomized(rounded_size, alignment);
// If MAP_FIXED is specified, existing mappings that intersect the requested range are removed.
if (map_fixed)
TRY(address_space().unmap_mmap_range(VirtualAddress(addr), size));
// If MAP_FIXED is specified, existing mappings that intersect the requested range are removed.
if (map_fixed)
TRY(address_space().unmap_mmap_range(VirtualAddress(addr), size));
auto range = address_space().try_allocate_range(VirtualAddress(addr), size, alignment);
if (range.is_error()) {
if (addr && !(map_fixed || map_fixed_noreplace)) {
// If there's an address but MAP_FIXED wasn't specified, the address is just a hint.
range = address_space().try_allocate_range({}, size, alignment);
}
}
return range;
}());
Memory::VirtualRange requested_range { VirtualAddress { addr }, size };
if (addr && !(map_fixed || map_fixed_noreplace)) {
// If there's an address but MAP_FIXED wasn't specified, the address is just a hint.
requested_range = { {}, 0 };
}
if (map_anonymous) {
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
@ -218,7 +210,7 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<Syscall::SC_mmap_params const*> use
vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(rounded_size, strategy));
}
region = TRY(address_space().allocate_region_with_vmobject(range, vmobject.release_nonnull(), 0, {}, prot, map_shared));
region = TRY(address_space().allocate_region_with_vmobject(requested_range.base(), requested_range.size(), alignment, vmobject.release_nonnull(), 0, {}, prot, map_shared));
} else {
if (offset < 0)
return EINVAL;
@ -237,7 +229,7 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<Syscall::SC_mmap_params const*> use
if (description->inode())
TRY(validate_inode_mmap_prot(prot, *description->inode(), map_shared));
region = TRY(description->mmap(*this, range, static_cast<u64>(offset), prot, map_shared));
region = TRY(description->mmap(*this, requested_range, static_cast<u64>(offset), prot, map_shared));
}
if (!region)
@ -515,8 +507,7 @@ ErrorOr<FlatPtr> Process::sys$allocate_tls(Userspace<char const*> initial_data,
if (multiple_threads)
return EINVAL;
auto range = TRY(address_space().try_allocate_range({}, size));
auto* region = TRY(address_space().allocate_region(range, "Master TLS"sv, PROT_READ | PROT_WRITE));
auto* region = TRY(address_space().allocate_region({}, size, PAGE_SIZE, "Master TLS"sv, PROT_READ | PROT_WRITE));
m_master_tls_region = TRY(region->try_make_weak_ptr());
m_master_tls_size = size;

View file

@ -1419,10 +1419,9 @@ ErrorOr<void> Thread::make_thread_specific_region(Badge<Process>)
if (!process().m_master_tls_region)
return {};
auto range = TRY(process().address_space().try_allocate_range({}, thread_specific_region_size()));
auto* region = TRY(process().address_space().allocate_region(range, "Thread-specific", PROT_READ | PROT_WRITE));
auto* region = TRY(process().address_space().allocate_region({}, thread_specific_region_size(), PAGE_SIZE, "Thread-specific", PROT_READ | PROT_WRITE));
m_thread_specific_range = range;
m_thread_specific_range = region->range();
SmapDisabler disabler;
auto* thread_specific_data = (ThreadSpecificData*)region->vaddr().offset(align_up_to(process().m_master_tls_size, thread_specific_region_alignment())).as_ptr();