mirror of
https://github.com/SerenityOS/serenity
synced 2024-07-23 02:55:15 +00:00
LibWeb: Make VM allocation atomic for kernel regions
Instead of first allocating the VM range, and then inserting a region with that range into the MM region tree, we now do both things in a single atomic operation: - RegionTree::place_anywhere(Region&, size, alignment) - RegionTree::place_specifically(Region&, address, size) To reduce the number of things we do while locking the region tree, we also require callers to provide a constructed Region object.
This commit is contained in:
parent
cbf52d474c
commit
e852a69a06
|
@ -319,19 +319,14 @@ UNMAP_AFTER_INIT bool APIC::init_bsp()
|
|||
return true;
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT static NonnullOwnPtr<Memory::Region> create_identity_mapped_region(PhysicalAddress paddr, size_t size)
|
||||
UNMAP_AFTER_INIT static ErrorOr<NonnullOwnPtr<Memory::Region>> create_identity_mapped_region(PhysicalAddress paddr, size_t size)
|
||||
{
|
||||
auto maybe_vmobject = Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size);
|
||||
// FIXME: Would be nice to be able to return a ErrorOr from here.
|
||||
VERIFY(!maybe_vmobject.is_error());
|
||||
|
||||
auto region_or_error = MM.allocate_kernel_region_with_vmobject(
|
||||
Memory::VirtualRange { VirtualAddress { static_cast<FlatPtr>(paddr.get()) }, size },
|
||||
maybe_vmobject.release_value(),
|
||||
{},
|
||||
Memory::Region::Access::ReadWriteExecute);
|
||||
VERIFY(!region_or_error.is_error());
|
||||
return region_or_error.release_value();
|
||||
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size));
|
||||
auto region = TRY(Memory::Region::create_unplaced(move(vmobject), 0, {}, Memory::Region::Access::ReadWriteExecute));
|
||||
Memory::VirtualRange range { VirtualAddress { paddr.get() }, size };
|
||||
TRY(MM.region_tree().place_specifically(*region, range));
|
||||
TRY(region->map(MM.kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
||||
UNMAP_AFTER_INIT void APIC::setup_ap_boot_environment()
|
||||
|
@ -347,7 +342,7 @@ UNMAP_AFTER_INIT void APIC::setup_ap_boot_environment()
|
|||
constexpr u64 apic_startup_region_base = 0x8000;
|
||||
auto apic_startup_region_size = Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(FlatPtr))).release_value_but_fixme_should_propagate_errors();
|
||||
VERIFY(apic_startup_region_size < USER_RANGE_BASE);
|
||||
auto apic_startup_region = create_identity_mapped_region(PhysicalAddress(apic_startup_region_base), apic_startup_region_size);
|
||||
auto apic_startup_region = MUST(create_identity_mapped_region(PhysicalAddress(apic_startup_region_base), apic_startup_region_size));
|
||||
u8* apic_startup_region_ptr = apic_startup_region->vaddr().as_ptr();
|
||||
memcpy(apic_startup_region_ptr, reinterpret_cast<void const*>(apic_ap_start), apic_ap_start_size);
|
||||
|
||||
|
|
|
@ -451,16 +451,13 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
|||
// Carve out the whole page directory covering the kernel image to make MemoryManager::initialize_physical_pages() happy
|
||||
FlatPtr start_of_range = ((FlatPtr)start_of_kernel_image & ~(FlatPtr)0x1fffff);
|
||||
FlatPtr end_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
|
||||
auto reserved_range = MUST(m_region_tree.try_allocate_specific(VirtualAddress(start_of_range), end_of_range - start_of_range));
|
||||
(void)MUST(Region::create_unbacked(reserved_range)).leak_ptr();
|
||||
MUST(m_region_tree.place_specifically(*MUST(Region::create_unbacked()).leak_ptr(), VirtualRange { VirtualAddress(start_of_range), end_of_range - start_of_range }));
|
||||
}
|
||||
|
||||
// Allocate a virtual address range for our array
|
||||
// This looks awkward, but it basically creates a dummy region to occupy the address range permanently.
|
||||
auto range = MUST(m_region_tree.try_allocate_anywhere(physical_page_array_pages * PAGE_SIZE));
|
||||
|
||||
{
|
||||
(void)MUST(Region::create_unbacked(range)).leak_ptr();
|
||||
}
|
||||
MUST(m_region_tree.place_specifically(*MUST(Region::create_unbacked()).leak_ptr(), range));
|
||||
|
||||
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
|
||||
// try to map the entire region into kernel space so we always have it
|
||||
|
@ -770,10 +767,14 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
|
|||
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
SpinlockLocker lock(kernel_page_directory().get_lock());
|
||||
OwnPtr<KString> name_kstring;
|
||||
if (!name.is_null())
|
||||
name_kstring = TRY(KString::try_create(name));
|
||||
auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size));
|
||||
auto range = TRY(m_region_tree.try_allocate_anywhere(size));
|
||||
return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
|
||||
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
|
||||
TRY(m_region_tree.place_anywhere(*region, size));
|
||||
TRY(region->map(kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page)
|
||||
|
@ -809,27 +810,25 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(
|
|||
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
OwnPtr<KString> name_kstring;
|
||||
if (!name.is_null())
|
||||
name_kstring = TRY(KString::try_create(name));
|
||||
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
|
||||
SpinlockLocker lock(kernel_page_directory().get_lock());
|
||||
auto range = TRY(m_region_tree.try_allocate_anywhere(size));
|
||||
return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
|
||||
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
|
||||
TRY(m_region_tree.place_anywhere(*region, size));
|
||||
TRY(region->map(kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
auto vmobject = TRY(AnonymousVMObject::try_create_for_physical_range(paddr, size));
|
||||
SpinlockLocker lock(kernel_page_directory().get_lock());
|
||||
auto range = TRY(m_region_tree.try_allocate_anywhere(size));
|
||||
return allocate_kernel_region_with_vmobject(range, move(vmobject), name, access, cacheable);
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
{
|
||||
OwnPtr<KString> name_kstring;
|
||||
if (!name.is_null())
|
||||
name_kstring = TRY(KString::try_create(name));
|
||||
auto region = TRY(Region::try_create_kernel_only(range, vmobject, 0, move(name_kstring), access, cacheable));
|
||||
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
|
||||
TRY(m_region_tree.place_anywhere(*region, size, PAGE_SIZE));
|
||||
TRY(region->map(kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
@ -837,9 +836,15 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobje
|
|||
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
||||
{
|
||||
VERIFY(!(size % PAGE_SIZE));
|
||||
SpinlockLocker lock(kernel_page_directory().get_lock());
|
||||
auto range = TRY(m_region_tree.try_allocate_anywhere(size));
|
||||
return allocate_kernel_region_with_vmobject(range, vmobject, name, access, cacheable);
|
||||
|
||||
OwnPtr<KString> name_kstring;
|
||||
if (!name.is_null())
|
||||
name_kstring = TRY(KString::try_create(name));
|
||||
|
||||
auto region = TRY(Region::create_unplaced(vmobject, 0, move(name_kstring), access, cacheable));
|
||||
TRY(m_region_tree.place_anywhere(*region, size));
|
||||
TRY(region->map(kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
||||
ErrorOr<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
|
||||
|
@ -1157,13 +1162,6 @@ bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vadd
|
|||
return validate_user_stack_no_lock(space, vaddr);
|
||||
}
|
||||
|
||||
void MemoryManager::register_kernel_region(Region& region)
|
||||
{
|
||||
VERIFY(region.is_kernel());
|
||||
SpinlockLocker lock(s_mm_lock);
|
||||
m_region_tree.regions().insert(region.vaddr().get(), region);
|
||||
}
|
||||
|
||||
void MemoryManager::unregister_kernel_region(Region& region)
|
||||
{
|
||||
VERIFY(region.is_kernel());
|
||||
|
|
|
@ -135,6 +135,7 @@ class MemoryManager {
|
|||
friend class PageDirectory;
|
||||
friend class AnonymousVMObject;
|
||||
friend class Region;
|
||||
friend class RegionTree;
|
||||
friend class VMObject;
|
||||
friend struct ::KmallocGlobalData;
|
||||
|
||||
|
@ -187,7 +188,6 @@ public:
|
|||
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VirtualRange const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
|
||||
struct SystemMemoryInfo {
|
||||
PhysicalSize user_physical_pages { 0 };
|
||||
|
@ -255,7 +255,6 @@ private:
|
|||
void initialize_physical_pages();
|
||||
void register_reserved_ranges();
|
||||
|
||||
void register_kernel_region(Region&);
|
||||
void unregister_kernel_region(Region&);
|
||||
|
||||
void protect_kernel_image();
|
||||
|
|
|
@ -22,11 +22,21 @@
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
Region::Region(VirtualRange const& range)
|
||||
: m_range(range)
|
||||
Region::Region()
|
||||
: m_range(VirtualRange({}, 0))
|
||||
{
|
||||
if (is_kernel())
|
||||
MM.register_kernel_region(*this);
|
||||
}
|
||||
|
||||
Region::Region(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||
: m_range(VirtualRange({}, 0))
|
||||
, m_offset_in_vmobject(offset_in_vmobject)
|
||||
, m_vmobject(move(vmobject))
|
||||
, m_name(move(name))
|
||||
, m_access(access | ((access & 0x7) << 4))
|
||||
, m_shared(shared)
|
||||
, m_cacheable(cacheable == Cacheable::Yes)
|
||||
{
|
||||
m_vmobject->add_region(*this);
|
||||
}
|
||||
|
||||
Region::Region(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
|
||||
|
@ -43,9 +53,6 @@ Region::Region(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size
|
|||
VERIFY((m_range.size() % PAGE_SIZE) == 0);
|
||||
|
||||
m_vmobject->add_region(*this);
|
||||
|
||||
if (is_kernel())
|
||||
MM.register_kernel_region(*this);
|
||||
}
|
||||
|
||||
Region::~Region()
|
||||
|
@ -72,9 +79,14 @@ Region::~Region()
|
|||
}
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> Region::create_unbacked(VirtualRange const& range)
|
||||
ErrorOr<NonnullOwnPtr<Region>> Region::create_unbacked()
|
||||
{
|
||||
return adopt_nonnull_own_or_enomem(new (nothrow) Region(range));
|
||||
return adopt_nonnull_own_or_enomem(new (nothrow) Region);
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> Region::create_unplaced(NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable)
|
||||
{
|
||||
return adopt_nonnull_own_or_enomem(new (nothrow) Region(move(vmobject), offset_in_vmobject, move(name), access, cacheable, false));
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> Region::try_clone()
|
||||
|
|
|
@ -56,7 +56,8 @@ public:
|
|||
|
||||
static ErrorOr<NonnullOwnPtr<Region>> try_create_user_accessible(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable, bool shared);
|
||||
static ErrorOr<NonnullOwnPtr<Region>> try_create_kernel_only(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
|
||||
static ErrorOr<NonnullOwnPtr<Region>> create_unbacked(VirtualRange const&);
|
||||
static ErrorOr<NonnullOwnPtr<Region>> create_unbacked();
|
||||
static ErrorOr<NonnullOwnPtr<Region>> create_unplaced(NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable = Cacheable::Yes);
|
||||
|
||||
~Region();
|
||||
|
||||
|
@ -199,7 +200,8 @@ public:
|
|||
void set_syscall_region(bool b) { m_syscall_region = b; }
|
||||
|
||||
private:
|
||||
explicit Region(VirtualRange const&);
|
||||
Region();
|
||||
Region(NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
|
||||
Region(VirtualRange const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
|
||||
|
||||
[[nodiscard]] bool remap_vmobject_page(size_t page_index, bool with_flush = true);
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
*/
|
||||
|
||||
#include <AK/Format.h>
|
||||
#include <Kernel/Memory/MemoryManager.h>
|
||||
#include <Kernel/Memory/RegionTree.h>
|
||||
#include <Kernel/Random.h>
|
||||
|
||||
|
@ -142,10 +143,28 @@ ErrorOr<VirtualRange> RegionTree::try_allocate_randomized(size_t size, size_t al
|
|||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Region>> RegionTree::allocate_unbacked_anywhere(size_t size, size_t alignment)
|
||||
{
|
||||
auto region = TRY(Region::create_unbacked());
|
||||
TRY(place_anywhere(*region, size, alignment));
|
||||
return region;
|
||||
}
|
||||
|
||||
ErrorOr<void> RegionTree::place_anywhere(Region& region, size_t size, size_t alignment)
|
||||
{
|
||||
SpinlockLocker locker(m_lock);
|
||||
auto range = TRY(try_allocate_anywhere(size, alignment));
|
||||
return Region::create_unbacked(range);
|
||||
region.m_range = range;
|
||||
m_regions.insert(region.vaddr().get(), region);
|
||||
return {};
|
||||
}
|
||||
|
||||
ErrorOr<void> RegionTree::place_specifically(Region& region, VirtualRange const& range)
|
||||
{
|
||||
SpinlockLocker locker(m_lock);
|
||||
auto allocated_range = TRY(try_allocate_specific(range.base(), range.size()));
|
||||
region.m_range = allocated_range;
|
||||
m_regions.insert(region.vaddr().get(), region);
|
||||
return {};
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -34,6 +34,9 @@ public:
|
|||
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_unbacked_anywhere(size_t size, size_t alignment = PAGE_SIZE);
|
||||
|
||||
ErrorOr<void> place_anywhere(Region&, size_t size, size_t alignment = PAGE_SIZE);
|
||||
ErrorOr<void> place_specifically(Region&, VirtualRange const&);
|
||||
|
||||
ErrorOr<VirtualRange> try_allocate_anywhere(size_t size, size_t alignment = PAGE_SIZE);
|
||||
ErrorOr<VirtualRange> try_allocate_specific(VirtualAddress base, size_t size);
|
||||
ErrorOr<VirtualRange> try_allocate_randomized(size_t size, size_t alignment = PAGE_SIZE);
|
||||
|
|
Loading…
Reference in a new issue