mirror of
https://github.com/SerenityOS/serenity
synced 2024-10-07 00:19:27 +00:00
Kernel: Move create_identity_mapped_region() to MemoryManager
This had no business being in RegionTree, since RegionTree doesn't track identity-mapped regions anyway. (We allow *any* address to be identity mapped, not just the ones that are part of the RegionTree's range.)
This commit is contained in:
parent
3dbb4bc3a6
commit
e0da8da657
|
@ -332,7 +332,7 @@ UNMAP_AFTER_INIT void APIC::setup_ap_boot_environment()
|
|||
constexpr u64 apic_startup_region_base = 0x8000;
|
||||
auto apic_startup_region_size = Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(FlatPtr))).release_value_but_fixme_should_propagate_errors();
|
||||
VERIFY(apic_startup_region_size < USER_RANGE_BASE);
|
||||
auto apic_startup_region = MUST(MM.region_tree().create_identity_mapped_region(PhysicalAddress(apic_startup_region_base), apic_startup_region_size));
|
||||
auto apic_startup_region = MUST(MM.create_identity_mapped_region(PhysicalAddress(apic_startup_region_base), apic_startup_region_size));
|
||||
u8* apic_startup_region_ptr = apic_startup_region->vaddr().as_ptr();
|
||||
memcpy(apic_startup_region_ptr, reinterpret_cast<void const*>(apic_ap_start), apic_ap_start_size);
|
||||
|
||||
|
|
|
@ -1232,4 +1232,14 @@ void MemoryManager::copy_physical_page(PhysicalPage& physical_page, u8 page_buff
|
|||
unquickmap_page();
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::create_identity_mapped_region(PhysicalAddress address, size_t size)
|
||||
{
|
||||
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(address, size));
|
||||
auto region = TRY(Memory::Region::create_unplaced(move(vmobject), 0, {}, Memory::Region::Access::ReadWriteExecute));
|
||||
Memory::VirtualRange range { VirtualAddress { (FlatPtr)address.get() }, size };
|
||||
region->m_range = range;
|
||||
TRY(region->map(MM.kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -188,6 +188,7 @@ public:
|
|||
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
|
||||
ErrorOr<NonnullOwnPtr<Region>> create_identity_mapped_region(PhysicalAddress, size_t);
|
||||
|
||||
struct SystemMemoryInfo {
|
||||
PhysicalSize user_physical_pages { 0 };
|
||||
|
|
|
@ -154,16 +154,6 @@ ErrorOr<void> RegionTree::place_specifically(Region& region, VirtualRange const&
|
|||
return {};
|
||||
}
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> RegionTree::create_identity_mapped_region(PhysicalAddress paddr, size_t size)
|
||||
{
|
||||
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(paddr, size));
|
||||
auto region = TRY(Memory::Region::create_unplaced(move(vmobject), 0, {}, Memory::Region::Access::ReadWriteExecute));
|
||||
Memory::VirtualRange range { VirtualAddress { (FlatPtr)paddr.get() }, size };
|
||||
region->m_range = range;
|
||||
TRY(region->map(MM.kernel_page_directory()));
|
||||
return region;
|
||||
}
|
||||
|
||||
bool RegionTree::remove(Region& region)
|
||||
{
|
||||
SpinlockLocker locker(m_lock);
|
||||
|
|
|
@ -45,8 +45,6 @@ public:
|
|||
ErrorOr<void> place_anywhere(Region&, RandomizeVirtualAddress, size_t size, size_t alignment = PAGE_SIZE);
|
||||
ErrorOr<void> place_specifically(Region&, VirtualRange const&);
|
||||
|
||||
ErrorOr<NonnullOwnPtr<Memory::Region>> create_identity_mapped_region(PhysicalAddress, size_t);
|
||||
|
||||
void delete_all_regions_assuming_they_are_unmapped();
|
||||
|
||||
// FIXME: Access the region tree through a SpinlockProtected or similar.
|
||||
|
|
Loading…
Reference in a new issue