1
0
mirror of https://github.com/SerenityOS/serenity synced 2024-07-09 07:40:45 +00:00

Kernel: Stop allocating physical pages for mapped MMIO regions

As MMIO is placed at fixed physical addressed, and does not need to be
backed by real RAM physical pages, there's no need to use PhysicalPage
instances to track their pages.
This results in slightly reduced allocations, but more importantly
makes MMIO addresses which end up after the normal RAM ranges work,
like 64-bit PCI BARs usually are.
This commit is contained in:
Idan Horowitz 2024-05-10 23:16:01 +03:00 committed by Andrew Kaster
parent d0555f3176
commit 827322c139
21 changed files with 136 additions and 31 deletions

View File

@ -37,7 +37,7 @@ ErrorOr<Optional<PhysicalAddress>> find_rsdp_in_platform_specific_memory_locatio
auto region_size_or_error = Memory::page_round_up(memory_range.length);
if (region_size_or_error.is_error())
return IterationDecision::Continue;
auto region_or_error = MM.allocate_kernel_region(memory_range.start, region_size_or_error.value(), {}, Memory::Region::Access::Read);
auto region_or_error = MM.allocate_mmio_kernel_region(memory_range.start, region_size_or_error.value(), {}, Memory::Region::Access::Read);
if (region_or_error.is_error())
return IterationDecision::Continue;
mapping.region = region_or_error.release_value();

View File

@ -17,7 +17,7 @@ ErrorOr<Memory::MappedROM> map_bios()
mapping.size = 128 * KiB;
mapping.paddr = PhysicalAddress(0xe0000);
auto region_size = TRY(Memory::page_round_up(mapping.size));
mapping.region = TRY(MM.allocate_kernel_region(mapping.paddr, region_size, {}, Memory::Region::Access::Read));
mapping.region = TRY(MM.allocate_mmio_kernel_region(mapping.paddr, region_size, {}, Memory::Region::Access::Read));
return mapping;
}
@ -31,7 +31,7 @@ ErrorOr<Memory::MappedROM> map_ebda()
Memory::MappedROM mapping;
auto region_size = TRY(Memory::page_round_up(ebda_size));
mapping.region = TRY(MM.allocate_kernel_region(ebda_paddr.page_base(), region_size, {}, Memory::Region::Access::Read));
mapping.region = TRY(MM.allocate_mmio_kernel_region(ebda_paddr.page_base(), region_size, {}, Memory::Region::Access::Read));
mapping.offset = ebda_paddr.offset_in_page();
mapping.size = ebda_size;
mapping.paddr = ebda_paddr;

View File

@ -254,7 +254,7 @@ UNMAP_AFTER_INIT bool APIC::init_bsp()
set_base(apic_base);
if (!m_is_x2.was_set()) {
auto region_or_error = MM.allocate_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite);
auto region_or_error = MM.allocate_mmio_kernel_region(apic_base.page_base(), PAGE_SIZE, {}, Memory::Region::Access::ReadWrite);
if (region_or_error.is_error()) {
dbgln("APIC: Failed to allocate memory for APIC base");
return false;

View File

@ -417,7 +417,7 @@ u64 HPET::ns_to_raw_counter_ticks(u64 ns) const
UNMAP_AFTER_INIT HPET::HPET(PhysicalAddress acpi_hpet)
: m_physical_acpi_hpet_table(acpi_hpet)
, m_physical_acpi_hpet_registers(find_acpi_hpet_registers_block())
, m_hpet_mmio_region(MM.allocate_kernel_region(m_physical_acpi_hpet_registers.page_base(), PAGE_SIZE, "HPET MMIO"sv, Memory::Region::Access::ReadWrite).release_value())
, m_hpet_mmio_region(MM.allocate_mmio_kernel_region(m_physical_acpi_hpet_registers.page_base(), PAGE_SIZE, "HPET MMIO"sv, Memory::Region::Access::ReadWrite).release_value())
{
s_hpet = this; // Make available as soon as possible so that IRQs can use it

View File

@ -79,7 +79,7 @@ UNMAP_AFTER_INIT bool Access::find_and_register_pci_host_bridges_from_acpi_mcfg_
dbgln("Failed to round up length of {} to pages", length);
return false;
}
auto mcfg_region_or_error = MM.allocate_kernel_region(mcfg_table.page_base(), region_size_or_error.value(), "PCI Parsing MCFG"sv, Memory::Region::Access::ReadWrite);
auto mcfg_region_or_error = MM.allocate_mmio_kernel_region(mcfg_table.page_base(), region_size_or_error.value(), "PCI Parsing MCFG"sv, Memory::Region::Access::ReadWrite);
if (mcfg_region_or_error.is_error())
return false;
auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region_or_error.value()->vaddr().offset(mcfg_table.offset_in_page()).as_ptr();

View File

@ -69,7 +69,7 @@ void MemoryBackedHostBridge::map_bus_region(BusNumber bus)
if (m_mapped_bus == bus && m_mapped_bus_region)
return;
auto bus_base_address = determine_memory_mapped_bus_base_address(bus);
auto region_or_error = MM.allocate_kernel_region(bus_base_address, memory_range_per_bus, "PCI ECAM"sv, Memory::Region::Access::ReadWrite);
auto region_or_error = MM.allocate_mmio_kernel_region(bus_base_address, memory_range_per_bus, "PCI ECAM"sv, Memory::Region::Access::ReadWrite);
// FIXME: Find a way to propagate error from here.
if (region_or_error.is_error())
VERIFY_NOT_REACHED();

View File

@ -18,7 +18,7 @@ ErrorOr<NonnullLockRefPtr<EHCIController>> EHCIController::try_to_initialize(con
auto pci_bar_space_size = PCI::get_BAR_space_size(pci_device_identifier, SpaceBaseAddressRegister);
auto register_region_size = TRY(Memory::page_round_up(pci_bar_address.offset_in_page() + pci_bar_space_size));
auto register_region = TRY(MM.allocate_kernel_region(pci_bar_address.page_base(), register_region_size, {}, Memory::Region::Access::ReadWrite));
auto register_region = TRY(MM.allocate_mmio_kernel_region(pci_bar_address.page_base(), register_region_size, {}, Memory::Region::Access::ReadWrite));
VirtualAddress register_base_address = register_region->vaddr().offset(pci_bar_address.offset_in_page());

View File

@ -237,6 +237,7 @@ set(KERNEL_SOURCES
Memory/AnonymousVMObject.cpp
Memory/InodeVMObject.cpp
Memory/MemoryManager.cpp
Memory/MMIOVMObject.cpp
Memory/PhysicalPage.cpp
Memory/PhysicalRegion.cpp
Memory/PhysicalZone.cpp

View File

@ -15,7 +15,7 @@ BootFramebufferConsole::BootFramebufferConsole(PhysicalAddress framebuffer_addr,
{
// NOTE: We're very early in the boot process, memory allocations shouldn't really fail
auto framebuffer_end = Memory::page_round_up(framebuffer_addr.offset(height * pitch).get()).release_value();
m_framebuffer = MM.allocate_kernel_region(framebuffer_addr.page_base(), framebuffer_end - framebuffer_addr.page_base().get(), "Boot Framebuffer"sv, Memory::Region::Access::ReadWrite).release_value();
m_framebuffer = MM.allocate_mmio_kernel_region(framebuffer_addr.page_base(), framebuffer_end - framebuffer_addr.page_base().get(), "Boot Framebuffer"sv, Memory::Region::Access::ReadWrite).release_value();
[[maybe_unused]] auto result = m_framebuffer->set_write_combine(true);
m_framebuffer_data = m_framebuffer->vaddr().offset(framebuffer_addr.offset_in_page()).as_ptr();

View File

@ -29,7 +29,7 @@ void ContiguousFramebufferConsole::set_resolution(size_t width, size_t height, s
size_t size = Memory::page_round_up(pitch * height).release_value_but_fixme_should_propagate_errors();
dbgln("Framebuffer Console: taking {} bytes", size);
auto region_or_error = MM.allocate_kernel_region(m_framebuffer_address, size, "Framebuffer Console"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::Yes);
auto region_or_error = MM.allocate_mmio_kernel_region(m_framebuffer_address, size, "Framebuffer Console"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::Yes);
VERIFY(!region_or_error.is_error());
m_framebuffer_region = region_or_error.release_value();

View File

@ -13,7 +13,7 @@ namespace Kernel::Graphics {
NonnullLockRefPtr<VGATextModeConsole> VGATextModeConsole::initialize()
{
auto vga_window_size = MUST(Memory::page_round_up(0xc0000 - 0xa0000));
auto vga_window_region = MUST(MM.allocate_kernel_region(PhysicalAddress(0xa0000), vga_window_size, "VGA Display"sv, Memory::Region::Access::ReadWrite));
auto vga_window_region = MUST(MM.allocate_mmio_kernel_region(PhysicalAddress(0xa0000), vga_window_size, "VGA Display"sv, Memory::Region::Access::ReadWrite));
return adopt_lock_ref(*new (nothrow) VGATextModeConsole(move(vga_window_region)));
}

View File

@ -80,7 +80,7 @@ ErrorOr<void> DisplayConnector::allocate_framebuffer_resources(size_t rounded_si
if (!m_framebuffer_at_arbitrary_physical_range) {
VERIFY(m_framebuffer_address.value().page_base() == m_framebuffer_address.value());
m_shared_framebuffer_vmobject = TRY(Memory::SharedFramebufferVMObject::try_create_for_physical_range(m_framebuffer_address.value(), rounded_size));
m_framebuffer_region = TRY(MM.allocate_kernel_region(m_framebuffer_address.value().page_base(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite));
m_framebuffer_region = TRY(MM.allocate_mmio_kernel_region(m_framebuffer_address.value().page_base(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite));
} else {
m_shared_framebuffer_vmobject = TRY(Memory::SharedFramebufferVMObject::try_create_at_arbitrary_physical_range(rounded_size));
m_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(m_shared_framebuffer_vmobject->real_writes_framebuffer_vmobject(), rounded_size, "Framebuffer"sv, Memory::Region::Access::ReadWrite));

View File

@ -20,7 +20,7 @@ namespace Kernel {
ErrorOr<NonnullLockRefPtr<IntelDisplayConnectorGroup>> IntelDisplayConnectorGroup::try_create(Badge<IntelNativeGraphicsAdapter>, IntelGraphics::Generation generation, MMIORegion const& first_region, MMIORegion const& second_region)
{
auto registers_region = TRY(MM.allocate_kernel_region(first_region.pci_bar_paddr, first_region.pci_bar_space_length, "Intel Native Graphics Registers"sv, Memory::Region::Access::ReadWrite));
auto registers_region = TRY(MM.allocate_mmio_kernel_region(first_region.pci_bar_paddr, first_region.pci_bar_space_length, "Intel Native Graphics Registers"sv, Memory::Region::Access::ReadWrite));
// NOTE: 0x5100 is the offset of the start of the GMBus registers
auto gmbus_connector = TRY(GMBusConnector::create_with_physical_address(first_region.pci_bar_paddr.offset(0x5100)));
auto connector_group = TRY(adopt_nonnull_lock_ref_or_enomem(new (nothrow) IntelDisplayConnectorGroup(generation, move(gmbus_connector), move(registers_region), first_region, second_region)));

View File

@ -507,7 +507,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
dbgln_if(AHCI_DEBUG, "AHCI Port {}: CLE: ctba={:#08x}, ctbau={:#08x}, prdbc={:#08x}, prdtl={:#04x}, attributes={:#04x}", representative_port_index(), (u32)command_list_entries[unused_command_header.value()].ctba, (u32)command_list_entries[unused_command_header.value()].ctbau, (u32)command_list_entries[unused_command_header.value()].prdbc, (u16)command_list_entries[unused_command_header.value()].prdtl, (u16)command_list_entries[unused_command_header.value()].attributes);
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
auto command_table_region = MM.allocate_kernel_region_with_physical_pages({ &m_command_table_pages[unused_command_header.value()], 1 }, "AHCI Command Table"sv, Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Allocated command table at {}", representative_port_index(), command_table_region->vaddr());
@ -591,7 +591,7 @@ bool AHCIPort::identify_device()
// QEMU doesn't care if we don't set the correct CFL field in this register, real hardware will set an handshake error bit in PxSERR register.
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P;
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()]->paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table"sv, Memory::Region::Access::ReadWrite).release_value();
auto command_table_region = MM.allocate_kernel_region_with_physical_pages({ &m_command_table_pages[unused_command_header.value()], 1 }, "AHCI Command Table"sv, Memory::Region::Access::ReadWrite).release_value();
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
memset(const_cast<u8*>(command_table.command_fis), 0, 64);
command_table.descriptors[0].base_high = 0;

View File

@ -0,0 +1,31 @@
/*
* Copyright (c) 2024, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Memory/MMIOVMObject.h>
namespace Kernel::Memory {
ErrorOr<NonnullLockRefPtr<MMIOVMObject>> MMIOVMObject::try_create_for_physical_range(PhysicalAddress paddr, size_t size)
{
if (paddr.offset(size) < paddr) {
dbgln("Shenanigans! MMIOVMObject::try_create_for_physical_range({}, {}) would wrap around", paddr, size);
// Since we can't wrap around yet, let's pretend to OOM.
return ENOMEM;
}
// FIXME: We have to make this allocation because VMObject determines the size of the VMObject based on the physical pages array
auto new_physical_pages = TRY(VMObject::try_create_physical_pages(size));
return adopt_nonnull_lock_ref_or_enomem(new (nothrow) MMIOVMObject(paddr, move(new_physical_pages)));
}
MMIOVMObject::MMIOVMObject(PhysicalAddress paddr, FixedArray<RefPtr<PhysicalPage>>&& new_physical_pages)
: VMObject(move(new_physical_pages))
{
VERIFY(paddr.page_base() == paddr);
}
}

View File

@ -0,0 +1,26 @@
/*
* Copyright (c) 2024, Idan Horowitz <idan.horowitz@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Memory/PhysicalAddress.h>
#include <Kernel/Memory/VMObject.h>
namespace Kernel::Memory {
class MMIOVMObject final : public VMObject {
public:
static ErrorOr<NonnullLockRefPtr<MMIOVMObject>> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
virtual ErrorOr<NonnullLockRefPtr<VMObject>> try_clone() override { return ENOTSUP; }
private:
MMIOVMObject(PhysicalAddress, FixedArray<RefPtr<PhysicalPage>>&&);
virtual StringView class_name() const override { return "MMIOVMObject"sv; }
};
}

View File

@ -21,6 +21,7 @@
#include <Kernel/Library/Panic.h>
#include <Kernel/Library/StdLib.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/MMIOVMObject.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/PhysicalRegion.h>
#include <Kernel/Memory/SharedInodeVMObject.h>
@ -1066,9 +1067,10 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalPage>& dma_buffer_page)
{
dma_buffer_page = TRY(allocate_physical_page());
auto page = TRY(allocate_physical_page());
dma_buffer_page = page;
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default)
return allocate_kernel_region(dma_buffer_page->paddr(), PAGE_SIZE, name, access, Region::Cacheable::No);
return allocate_kernel_region_with_physical_pages({ &page, 1 }, name, access, Region::Cacheable::No);
}
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access)
@ -1083,7 +1085,7 @@ ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(
VERIFY(!(size % PAGE_SIZE));
dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size));
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default)
return allocate_kernel_region(dma_buffer_pages.first()->paddr(), size, name, access, Region::Cacheable::No);
return allocate_kernel_region_with_physical_pages(dma_buffer_pages, name, access, Region::Cacheable::No);
}
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access)
@ -1107,16 +1109,28 @@ ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size
return region;
}
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> pages, StringView name, Region::Access access, Region::Cacheable cacheable)
{
auto vmobject = TRY(AnonymousVMObject::try_create_with_physical_pages(pages));
OwnPtr<KString> name_kstring;
if (!name.is_null())
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, pages.size() * PAGE_SIZE, PAGE_SIZE); }));
TRY(region->map(kernel_page_directory()));
return region;
}
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_mmio_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
{
VERIFY(!(size % PAGE_SIZE));
auto vmobject = TRY(AnonymousVMObject::try_create_for_physical_range(paddr, size));
auto vmobject = TRY(MMIOVMObject::try_create_for_physical_range(paddr, size));
OwnPtr<KString> name_kstring;
if (!name.is_null())
name_kstring = TRY(KString::try_create(name));
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE); }));
TRY(region->map(kernel_page_directory()));
TRY(region->map(kernel_page_directory(), paddr));
return region;
}
@ -1326,7 +1340,7 @@ ErrorOr<Vector<NonnullRefPtr<PhysicalPage>>> MemoryManager::allocate_contiguous_
}));
{
auto cleanup_region = TRY(MM.allocate_kernel_region(physical_pages[0]->paddr(), PAGE_SIZE * page_count, {}, Region::Access::Read | Region::Access::Write));
auto cleanup_region = TRY(MM.allocate_kernel_region_with_physical_pages(physical_pages, {}, Region::Access::Read | Region::Access::Write));
memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count);
}
return physical_pages;

View File

@ -174,7 +174,8 @@ public:
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalPage>>& dma_buffer_pages);
ErrorOr<NonnullOwnPtr<Memory::Region>> allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(size_t, StringView name, Region::Access access, AllocationStrategy strategy = AllocationStrategy::Reserve, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_mmio_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
ErrorOr<NonnullOwnPtr<Region>> allocate_unbacked_region_anywhere(size_t size, size_t alignment);
ErrorOr<NonnullOwnPtr<Region>> create_identity_mapped_region(PhysicalAddress, size_t);

View File

@ -211,6 +211,18 @@ ErrorOr<void> Region::set_should_cow(size_t page_index, bool cow)
}
bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage> page)
{
if (!page)
return map_individual_page_impl(page_index, {}, false, false);
return map_individual_page_impl(page_index, page->paddr(), is_readable(), is_writable() && !page->is_shared_zero_page() && !page->is_lazy_committed_page());
}
bool Region::map_individual_page_impl(size_t page_index, PhysicalAddress paddr)
{
return map_individual_page_impl(page_index, paddr, is_readable(), is_writable());
}
bool Region::map_individual_page_impl(size_t page_index, PhysicalAddress paddr, bool readable, bool writeable)
{
VERIFY(m_page_directory->get_lock().is_locked_by_current_processor());
@ -225,18 +237,15 @@ bool Region::map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage> pa
if (!pte)
return false;
if (!page || (!is_readable() && !is_writable())) {
if (!readable && !writeable) {
pte->clear();
return true;
}
pte->set_cache_disabled(!m_cacheable);
pte->set_physical_page_base(page->paddr().get());
pte->set_physical_page_base(paddr.get());
pte->set_present(true);
if (page->is_shared_zero_page() || page->is_lazy_committed_page() || should_cow(page_index))
pte->set_writable(false);
else
pte->set_writable(is_writable());
pte->set_writable(writeable && !should_cow(page_index));
if (Processor::current().has_nx())
pte->set_execute_disabled(!is_executable());
if (Processor::current().has_pat())
@ -323,6 +332,26 @@ ErrorOr<void> Region::map(PageDirectory& page_directory, ShouldFlushTLB should_f
return ENOMEM;
}
ErrorOr<void> Region::map(PageDirectory& page_directory, PhysicalAddress paddr, ShouldFlushTLB should_flush_tlb)
{
SpinlockLocker page_lock(page_directory.get_lock());
set_page_directory(page_directory);
size_t page_index = 0;
while (page_index < page_count()) {
if (!map_individual_page_impl(page_index, paddr))
break;
++page_index;
paddr = paddr.offset(PAGE_SIZE);
}
if (page_index > 0) {
if (should_flush_tlb == ShouldFlushTLB::Yes)
MemoryManager::flush_tlb(m_page_directory, vaddr(), page_index);
if (page_index == page_count())
return {};
}
return ENOMEM;
}
void Region::remap()
{
VERIFY(m_page_directory);

View File

@ -208,6 +208,7 @@ public:
void set_page_directory(PageDirectory&);
ErrorOr<void> map(PageDirectory&, ShouldFlushTLB = ShouldFlushTLB::Yes);
ErrorOr<void> map(PageDirectory&, PhysicalAddress, ShouldFlushTLB = ShouldFlushTLB::Yes);
void unmap(ShouldFlushTLB = ShouldFlushTLB::Yes);
void unmap_with_locks_held(ShouldFlushTLB, SpinlockLocker<RecursiveSpinlock<LockRank::None>>& pd_locker);
@ -247,6 +248,8 @@ private:
[[nodiscard]] bool map_individual_page_impl(size_t page_index);
[[nodiscard]] bool map_individual_page_impl(size_t page_index, RefPtr<PhysicalPage>);
[[nodiscard]] bool map_individual_page_impl(size_t page_index, PhysicalAddress);
[[nodiscard]] bool map_individual_page_impl(size_t page_index, PhysicalAddress, bool readable, bool writeable);
LockRefPtr<PageDirectory> m_page_directory;
VirtualRange m_range;

View File

@ -32,7 +32,7 @@ template<typename T>
static ErrorOr<NonnullOwnPtr<TypedMapping<T>>> adopt_new_nonnull_own_typed_mapping(PhysicalAddress paddr, size_t length, Region::Access access = Region::Access::Read)
{
auto mapping_length = TRY(page_round_up(paddr.offset_in_page() + length));
auto region = TRY(MM.allocate_kernel_region(paddr.page_base(), mapping_length, {}, access));
auto region = TRY(MM.allocate_mmio_kernel_region(paddr.page_base(), mapping_length, {}, access));
auto table = TRY(adopt_nonnull_own_or_enomem(new (nothrow) Memory::TypedMapping<T>()));
table->region = move(region);
table->offset = paddr.offset_in_page();
@ -46,7 +46,7 @@ static ErrorOr<TypedMapping<T>> map_typed(PhysicalAddress paddr, size_t length,
{
TypedMapping<T> table;
auto mapping_length = TRY(page_round_up(paddr.offset_in_page() + length));
table.region = TRY(MM.allocate_kernel_region(paddr.page_base(), mapping_length, {}, access));
table.region = TRY(MM.allocate_mmio_kernel_region(paddr.page_base(), mapping_length, {}, access));
table.offset = paddr.offset_in_page();
table.paddr = paddr;
table.length = length;