Kernel/riscv64: Take the memory map from the FDT and dump it

For this the BootInfo struct was made architecture specific
This commit is contained in:
Hendiadyoin1 2024-01-26 14:37:36 +01:00 committed by Andrew Kaster
parent 21a21c6a11
commit d3f6b03733
10 changed files with 368 additions and 119 deletions

View file

@ -5,6 +5,7 @@
*/
#include <AK/Types.h>
#include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/InterruptManagement.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Boot/BootInfo.h>
@ -217,29 +218,7 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init([[maybe_unused]] BootInfo con
// FIXME: Read the /chosen/bootargs property.
kernel_cmdline = RPi::Mailbox::the().query_kernel_command_line(s_command_line_buffer);
#elif ARCH(RISCV64)
// FIXME: Get the actual memory map from the device tree.
static multiboot_memory_map_t mmap[] = {
{
// We currently can't get the actual size of firmware-reserved memory, so mark the first 0x20'0000 bytes as reserved.
// This reserved memory region should be large enough for now.
sizeof(multiboot_mmap_entry) - sizeof(u32),
0x8000'0000,
0x20'0000,
MULTIBOOT_MEMORY_RESERVED,
},
{
sizeof(multiboot_mmap_entry) - sizeof(u32),
0x8020'0000,
1 * GiB - 0x20'0000,
MULTIBOOT_MEMORY_AVAILABLE,
},
};
multiboot_memory_map = mmap;
multiboot_memory_map_count = array_size(mmap);
multiboot_modules = nullptr;
multiboot_modules_count = 0;
// FIXME: Take this from the flattened device tree (/chosen/bootargs)
kernel_cmdline = "serial_debug"sv;
#endif
@ -308,6 +287,11 @@ extern "C" [[noreturn]] UNMAP_AFTER_INIT void init([[maybe_unused]] BootInfo con
InterruptManagement::initialize();
ACPI::initialize();
#if ARCH(RISCV64)
// FIXME: Unflatten the device tree and use it for device discovery
dump_fdt();
#endif
// Initialize TimeManagement before using randomness!
TimeManagement::initialize(0);

View file

@ -0,0 +1,25 @@
/*
* Copyright (c) 2024, Leon Albrecht <leon.a@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/riscv64/CPU.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Userland/Libraries/LibDeviceTree/FlattenedDeviceTree.h>
#include <Userland/Libraries/LibDeviceTree/Validation.h>
namespace Kernel {
BootInfo s_boot_info;
alignas(PAGE_SIZE) __attribute__((section(".bss.fdt"))) u8 s_fdt_storage[fdt_storage_size];
void dump_fdt()
{
auto& header = *bit_cast<DeviceTree::FlattenedDeviceTreeHeader*>(&s_fdt_storage[0]);
auto fdt = ReadonlyBytes(s_fdt_storage, header.totalsize);
MUST(DeviceTree::dump(header, fdt));
}
}

View file

@ -6,5 +6,18 @@
#pragma once
#include <Kernel/Memory/PhysicalAddress.h>
#include <Kernel/Prekernel/Prekernel.h>
#include <AK/Platform.h>
VALIDATE_IS_RISCV64()
namespace Kernel {
constexpr size_t fdt_storage_size = 2 * MiB;
extern u8 s_fdt_storage[fdt_storage_size];
extern BootInfo s_boot_info;
void dump_fdt();
}

View file

@ -6,12 +6,14 @@
#include <AK/Types.h>
#include <Kernel/Arch/riscv64/CPU.h>
#include <Kernel/Arch/riscv64/MMU.h>
#include <Kernel/Arch/riscv64/PageDirectory.h>
#include <Kernel/Arch/riscv64/SBI.h>
#include <Kernel/Arch/riscv64/pre_init.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Sections.h>
#include <LibDeviceTree/FlattenedDeviceTree.h>
// These come from the linker script
extern u8 page_tables_phys_start[];
@ -173,10 +175,11 @@ static UNMAP_AFTER_INIT void setup_kernel_page_directory(u64* root_table)
}
// This function has to fit into one page as it will be identity mapped.
[[gnu::aligned(PAGE_SIZE)]] [[noreturn]] UNMAP_AFTER_INIT static void enable_paging(FlatPtr satp, u64* enable_paging_pte)
[[gnu::aligned(PAGE_SIZE)]] [[noreturn]] UNMAP_AFTER_INIT static void enable_paging(BootInfo const& info, FlatPtr satp, u64* enable_paging_pte)
{
// Switch current root page table to argument 0. This will immediately take effect, but we won't not crash as this function is identity mapped.
// Also, set up a temporary trap handler to catch any traps while switching page tables.
register FlatPtr a0 asm("a0") = bit_cast<FlatPtr>(&info);
asm volatile(
" lla t0, 1f \n"
" csrw stvec, t0 \n"
@ -210,21 +213,33 @@ static UNMAP_AFTER_INIT void setup_kernel_page_directory(u64* root_table)
" wfi \n"
" j 1b \n"
:
: [satp] "r"(satp), [offset] "r"(calculate_physical_to_link_time_address_offset()), [enable_paging_pte] "r"(enable_paging_pte)
: "r"(a0), [satp] "r"(satp), [offset] "r"(calculate_physical_to_link_time_address_offset()), [enable_paging_pte] "r"(enable_paging_pte)
: "t0");
VERIFY_NOT_REACHED();
}
[[noreturn]] UNMAP_AFTER_INIT void init_page_tables_and_jump_to_init()
[[noreturn]] UNMAP_AFTER_INIT void init_page_tables_and_jump_to_init(FlatPtr mhartid, PhysicalPtr fdt_phys_addr)
{
if (RISCV64::CSR::SATP::read().MODE != RISCV64::CSR::SATP::Mode::Bare)
panic_without_mmu("Kernel booted with MMU enabled"sv);
// Copy the FDT to a known location
DeviceTree::FlattenedDeviceTreeHeader* fdt_header = bit_cast<DeviceTree::FlattenedDeviceTreeHeader*>(fdt_phys_addr);
u8* fdt_storage = bit_cast<u8*>(fdt_phys_addr);
if (fdt_header->totalsize > fdt_storage_size)
panic_without_mmu("Passed FDT is bigger than the internal storage"sv);
for (size_t o = 0; o < fdt_header->totalsize; o += 1) {
// FIXME: Maybe increase the IO size here
adjust_by_mapping_base(s_fdt_storage)[o] = fdt_storage[o];
}
*adjust_by_mapping_base(&physical_to_virtual_offset) = calculate_physical_to_link_time_address_offset();
*adjust_by_mapping_base(&kernel_mapping_base) = KERNEL_MAPPING_BASE;
*adjust_by_mapping_base(&kernel_load_base) = KERNEL_MAPPING_BASE;
*adjust_by_mapping_base(&s_boot_info) = { .mhartid = mhartid, .fdt_phys_addr = fdt_phys_addr };
PageBumpAllocator allocator(adjust_by_mapping_base(reinterpret_cast<u64*>(page_tables_phys_start)), adjust_by_mapping_base(reinterpret_cast<u64*>(page_tables_phys_end)));
auto* root_table = allocator.take_page();
build_mappings(allocator, root_table);
@ -247,7 +262,7 @@ static UNMAP_AFTER_INIT void setup_kernel_page_directory(u64* root_table)
.MODE = RISCV64::CSR::SATP::Mode::Sv39,
};
enable_paging(bit_cast<FlatPtr>(satp), &enable_paging_level0_table[enable_paging_vpn_0]);
enable_paging(s_boot_info, bit_cast<FlatPtr>(satp), &enable_paging_level0_table[enable_paging_vpn_0]);
}
}

View file

@ -7,12 +7,13 @@
#pragma once
#include <AK/Forward.h>
#include <Kernel/Prekernel/Prekernel.h>
#include <AK/Platform.h>
VALIDATE_IS_RISCV64()
namespace Kernel::Memory {
[[noreturn]] void init_page_tables_and_jump_to_init();
[[noreturn]] void init_page_tables_and_jump_to_init(FlatPtr mhartid, PhysicalPtr fdt_phys_addr);
}

View file

@ -45,13 +45,11 @@ UNMAP_AFTER_INIT void dbgln_without_mmu(StringView message)
extern "C" [[noreturn]] UNMAP_AFTER_INIT void pre_init(FlatPtr mhartid, PhysicalPtr fdt_phys_addr)
{
(void)mhartid;
(void)fdt_phys_addr;
// Catch traps in pre_init
RISCV64::CSR::write(RISCV64::CSR::Address::STVEC, bit_cast<FlatPtr>(&early_trap_handler));
Memory::init_page_tables_and_jump_to_init();
Memory::init_page_tables_and_jump_to_init(mhartid, fdt_phys_addr);
}
}

View file

@ -529,6 +529,7 @@ elseif("${SERENITY_ARCH}" STREQUAL "riscv64")
Arch/riscv64/Firmware/ACPI/StaticParsing.cpp
Arch/riscv64/boot.S
Arch/riscv64/CPU.cpp
Arch/riscv64/CurrentTime.cpp
Arch/riscv64/DebugOutput.cpp
Arch/riscv64/Delay.cpp
@ -575,6 +576,12 @@ set(AK_SOURCES
../AK/UUID.cpp
)
set(DT_SOURCES
../Userland/Libraries/LibDeviceTree/DeviceTree.cpp
../Userland/Libraries/LibDeviceTree/FlattenedDeviceTree.cpp
../Userland/Libraries/LibDeviceTree/Validation.cpp
)
set(EDID_SOURCES
../Userland/Libraries/LibEDID/DMT.cpp
../Userland/Libraries/LibEDID/EDID.cpp
@ -624,6 +631,7 @@ set(SOURCES
${KERNEL_SOURCES}
${GENERATED_SOURCES}
${AK_SOURCES}
${DT_SOURCES}
${EDID_SOURCES}
${ELF_SOURCES}
${VT_SOURCES}

View file

@ -5,6 +5,8 @@
*/
#include <AK/Assertions.h>
#include <AK/MemoryStream.h>
#include <AK/QuickSort.h>
#include <AK/StringView.h>
#include <Kernel/Arch/CPU.h>
#include <Kernel/Arch/PageDirectory.h>
@ -26,6 +28,7 @@
#include <Kernel/Sections.h>
#include <Kernel/Security/AddressSanitizer.h>
#include <Kernel/Tasks/Process.h>
#include <Userland/Libraries/LibDeviceTree/FlattenedDeviceTree.h>
extern u8 start_of_kernel_image[];
extern u8 end_of_kernel_image[];
@ -249,7 +252,7 @@ bool MemoryManager::is_allowed_to_read_physical_memory_for_userspace(PhysicalAdd
UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
{
// Register used memory regions that we know of.
m_global_data.with([&](auto& global_data) {
m_global_data.with([this](auto& global_data) {
global_data.used_memory_ranges.ensure_capacity(4);
#if ARCH(X86_64)
// NOTE: We don't touch the first 1 MiB of RAM on x86-64 even if it's usable as indicated
@ -278,88 +281,40 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
#endif
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image)).release_value_but_fixme_should_propagate_errors()) });
if (multiboot_flags & 0x4) {
auto* bootmods_start = multiboot_copy_boot_modules_array;
auto* bootmods_end = bootmods_start + multiboot_copy_boot_modules_count;
for (auto* bootmod = bootmods_start; bootmod < bootmods_end; bootmod++) {
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) });
}
}
auto* mmap_begin = multiboot_memory_map;
auto* mmap_end = multiboot_memory_map + multiboot_memory_map_count;
#if ARCH(RISCV64)
// FIXME: AARCH64 might be able to make use of this code path
// Some x86 platforms also provide flattened device trees
parse_memory_map_fdt(global_data, s_fdt_storage);
#else
parse_memory_map_multiboot(global_data);
#endif
// Now we need to setup the physical regions we will use later
struct ContiguousPhysicalVirtualRange {
PhysicalAddress lower;
PhysicalAddress upper;
};
Optional<ContiguousPhysicalVirtualRange> last_contiguous_physical_range;
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
// We have to copy these onto the stack, because we take a reference to these when printing them out,
// and doing so on a packed struct field is UB.
auto address = mmap->addr;
auto length = mmap->len;
ArmedScopeGuard write_back_guard = [&]() {
mmap->addr = address;
mmap->len = length;
};
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", address, length, mmap->type);
auto start_address = PhysicalAddress(address);
switch (mmap->type) {
case (MULTIBOOT_MEMORY_AVAILABLE):
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
break;
case (MULTIBOOT_MEMORY_RESERVED):
#if ARCH(X86_64)
// Workaround for https://gitlab.com/qemu-project/qemu/-/commit/8504f129450b909c88e199ca44facd35d38ba4de
// That commit added a reserved 12GiB entry for the benefit of virtual firmware.
// We can safely ignore this block as it isn't actually reserved on any real hardware.
// From: https://lore.kernel.org/all/20220701161014.3850-1-joao.m.martins@oracle.com/
// "Always add the HyperTransport range into e820 even when the relocation isn't
// done *and* there's >= 40 phys bit that would put max phyusical boundary to 1T
// This should allow virtual firmware to avoid the reserved range at the
// 1T boundary on VFs with big bars."
if (address != 0x000000fd00000000 || length != (0x000000ffffffffff - 0x000000fd00000000) + 1)
#endif
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, start_address, length });
break;
case (MULTIBOOT_MEMORY_ACPI_RECLAIMABLE):
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_Reclaimable, start_address, length });
break;
case (MULTIBOOT_MEMORY_NVS):
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_NVS, start_address, length });
break;
case (MULTIBOOT_MEMORY_BADRAM):
dmesgln("MM: Warning, detected bad memory range!");
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::BadMemory, start_address, length });
break;
default:
dbgln("MM: Unknown range!");
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Unknown, start_address, length });
break;
}
if (mmap->type != MULTIBOOT_MEMORY_AVAILABLE)
for (auto range : global_data.physical_memory_ranges) {
if (range.type != PhysicalMemoryRangeType::Usable)
continue;
auto address = range.start.get();
auto length = range.length;
// Fix up unaligned memory regions.
auto diff = (FlatPtr)address % PAGE_SIZE;
if (diff != 0) {
dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting {:p} by {} bytes", address, diff);
dmesgln("MM: Got an unaligned usable physical_region from the bootloader; correcting {:p} by {} bytes", address, diff);
diff = PAGE_SIZE - diff;
address += diff;
length -= diff;
}
if ((length % PAGE_SIZE) != 0) {
dmesgln("MM: Got an unaligned physical_region from the bootloader; correcting length {} by {} bytes", length, length % PAGE_SIZE);
dmesgln("MM: Got an unaligned usable physical_region from the bootloader; correcting length {} by {} bytes", length, length % PAGE_SIZE);
length -= length % PAGE_SIZE;
}
if (length < PAGE_SIZE) {
dmesgln("MM: Memory physical_region from bootloader is too small; we want >= {} bytes, but got {} bytes", PAGE_SIZE, length);
dmesgln("MM: Memory usable physical_region from bootloader is too small; we want >= {} bytes, but got {} bytes", PAGE_SIZE, length);
continue;
}
@ -391,13 +346,12 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
last_contiguous_physical_range->upper = addr;
}
}
}
// FIXME: If this is ever false, theres a good chance that all physical memory is already spent
if (last_contiguous_physical_range.has_value()) {
auto range = last_contiguous_physical_range.release_value();
// FIXME: OOM?
global_data.physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
// FIXME: If this is ever false, theres a good chance that all physical memory is already spent
if (last_contiguous_physical_range.has_value()) {
auto range = last_contiguous_physical_range.release_value();
// FIXME: OOM?
global_data.physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
}
}
for (auto& region : global_data.physical_regions)
@ -426,6 +380,247 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
});
}
UNMAP_AFTER_INIT void MemoryManager::parse_memory_map_fdt(MemoryManager::GlobalData& global_data, u8 const* fdt_addr)
{
auto const& fdt_header = *reinterpret_cast<DeviceTree::FlattenedDeviceTreeHeader const*>(fdt_addr);
auto fdt_buffer = ReadonlyBytes(fdt_addr, fdt_header.totalsize);
// FIXME: Parse the MemoryReservationBlock
// Schema:
// https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/root-node.yaml
// -> /#address-cells ∈ [1,2], /#size-cells ∈ [1,2]
// Reserved Memory:
// https://android.googlesource.com/kernel/msm/+/android-7.1.0_r0.2/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
// -> #address-cells === /#address-cells, #size-cells === /#size-cells
// https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/reserved-memory/reserved-memory.yaml
// Memory:
// https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/memory.yaml
// -> #address-cells: /#address-cells , #size-cells: /size-cells #
// FIXME: When booting from UEFI, the /memory node may not be relied upon
enum class State {
Root,
InReservedMemory,
InReservedMemoryChild,
InMemory
};
struct {
u32 depth = 0;
State state = State::Root;
Optional<u64> start {};
Optional<u64> size {};
u32 address_cells = 0;
u32 size_cells = 0;
} state;
MUST(DeviceTree::walk_device_tree(
fdt_header, fdt_buffer,
DeviceTree::DeviceTreeCallbacks {
.on_node_begin = [&state](StringView node_name) -> ErrorOr<IterationDecision> {
switch (state.state) {
case State::Root:
if (state.depth != 1)
break;
if (node_name == "reserved-memory")
state.state = State::InReservedMemory;
else if (node_name.starts_with("memory"sv))
state.state = State::InMemory;
break;
case State::InReservedMemory:
// FIXME: The node names may hint to the purpose
state.state = State::InReservedMemoryChild;
state.start = {};
state.size = {};
break;
case State::InReservedMemoryChild:
case State::InMemory:
// We should never be here
VERIFY_NOT_REACHED();
}
state.depth++;
return IterationDecision::Continue;
},
.on_node_end = [&global_data, &state](StringView node_name) -> ErrorOr<IterationDecision> {
switch (state.state) {
case State::Root:
break;
case State::InReservedMemory:
state.state = State::Root;
break;
case State::InMemory:
VERIFY(state.start.has_value() && state.size.has_value());
dbgln("MM: Memory Range {}: address: {} size {:#x}", node_name, PhysicalAddress { state.start.value() }, state.size.value());
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, PhysicalAddress { state.start.value() }, state.size.value() });
state.state = State::Root;
break;
case State::InReservedMemoryChild:
// FIXME: Handle non static allocations,
VERIFY(state.start.has_value() && state.size.has_value());
dbgln("MM: Reserved Range {}: address: {} size {:#x}", node_name, PhysicalAddress { state.start.value() }, state.size.value());
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, PhysicalAddress { state.start.value() }, state.size.value() });
// FIXME: Not all of these are "used", only those in "memory" are actually "used"
// There might be for example debug DMA control registers, which are marked as reserved
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress { state.start.value() }, PhysicalAddress { state.start.value() + state.size.value() } });
state.state = State::InReservedMemory;
break;
}
state.depth--;
return IterationDecision::Continue;
},
.on_property = [&state](StringView property_name, ReadonlyBytes data) -> ErrorOr<IterationDecision> {
switch (state.state) {
case State::Root:
if (state.depth != 1)
break;
if (property_name == "#address-cells"sv) {
BigEndian<u32> data_as_int;
__builtin_memcpy(&data_as_int, data.data(), sizeof(u32));
state.address_cells = data_as_int;
VERIFY(state.address_cells != 0);
VERIFY(state.address_cells <= 2);
} else if (property_name == "#size-cells"sv) {
BigEndian<u32> data_as_int;
__builtin_memcpy(&data_as_int, data.data(), sizeof(u32));
state.size_cells = data_as_int;
VERIFY(state.size_cells != 0);
VERIFY(state.size_cells <= 2);
}
break;
case State::InReservedMemory:
// FIXME: We could check and verify that the address and size cells
// are the same as in the root node
// FIXME: Handle the ranges attribute if not empty
if (property_name == "ranges"sv && data.size() != 0)
TODO();
break;
case State::InReservedMemoryChild:
case State::InMemory:
if (property_name == "reg"sv) {
VERIFY(state.address_cells);
VERIFY(state.size_cells);
// FIXME: We may get more than one range here
if (data.size() > (state.address_cells + state.size_cells) * sizeof(u32))
TODO();
if (state.address_cells == 1) {
BigEndian<u32> data_as_int;
__builtin_memcpy(&data_as_int, data.data(), sizeof(u32));
state.start = data_as_int;
data = data.slice(sizeof(u32));
} else {
BigEndian<u64> data_as_int;
__builtin_memcpy(&data_as_int, data.data(), sizeof(u64));
state.start = data_as_int;
data = data.slice(sizeof(u64));
}
if (state.size_cells == 1) {
BigEndian<u32> data_as_int;
__builtin_memcpy(&data_as_int, data.data(), sizeof(u32));
state.size = data_as_int;
data = data.slice(sizeof(u32));
} else {
BigEndian<u64> data_as_int;
__builtin_memcpy(&data_as_int, data.data(), sizeof(u64));
state.size = data_as_int;
data = data.slice(sizeof(u64));
}
} else {
// Reserved Memory:
// FIXME: Handle `compatible: "framebuffer";`
// FIMXE: Handle `compatible: "shared-dma-pool";`, `compatible: "restricted-dma-pool";`
// FIXME: Handle "iommu-addresses" property
// FIXME: Support "size" and "align" property
// Also "alloc-ranges"
// FIXME: Support no-map
// FIXME: Support no-map-fixup
// FIXME: Support reusable
}
break;
}
return IterationDecision::Continue;
},
.on_noop = []() -> ErrorOr<IterationDecision> { return IterationDecision::Continue; },
.on_end = []() -> ErrorOr<void> { return {}; },
}));
// FDTs do not seem to be fully sort memory ranges, especially as we get them from at least two structures
quick_sort(global_data.physical_memory_ranges, [](auto& a, auto& b) -> bool { return a.start > b.start; });
}
UNMAP_AFTER_INIT void MemoryManager::parse_memory_map_multiboot(MemoryManager::GlobalData& global_data)
{
// Register used memory regions that we know of.
if (multiboot_flags & 0x4) {
auto* bootmods_start = multiboot_copy_boot_modules_array;
auto* bootmods_end = bootmods_start + multiboot_copy_boot_modules_count;
for (auto* bootmod = bootmods_start; bootmod < bootmods_end; bootmod++) {
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress(bootmod->start), PhysicalAddress(bootmod->end) });
}
}
auto* mmap_begin = multiboot_memory_map;
auto* mmap_end = multiboot_memory_map + multiboot_memory_map_count;
struct ContiguousPhysicalVirtualRange {
PhysicalAddress lower;
PhysicalAddress upper;
};
Optional<ContiguousPhysicalVirtualRange> last_contiguous_physical_range;
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
// We have to copy these onto the stack, because we take a reference to these when printing them out,
// and doing so on a packed struct field is UB.
auto address = mmap->addr;
auto length = mmap->len;
ArmedScopeGuard write_back_guard = [&]() {
mmap->addr = address;
mmap->len = length;
};
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", address, length, mmap->type);
auto start_address = PhysicalAddress(address);
switch (mmap->type) {
case (MULTIBOOT_MEMORY_AVAILABLE):
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
break;
case (MULTIBOOT_MEMORY_RESERVED):
#if ARCH(X86_64)
// Workaround for https://gitlab.com/qemu-project/qemu/-/commit/8504f129450b909c88e199ca44facd35d38ba4de
// That commit added a reserved 12GiB entry for the benefit of virtual firmware.
// We can safely ignore this block as it isn't actually reserved on any real hardware.
// From: https://lore.kernel.org/all/20220701161014.3850-1-joao.m.martins@oracle.com/
// "Always add the HyperTransport range into e820 even when the relocation isn't
// done *and* there's >= 40 phys bit that would put max phyusical boundary to 1T
// This should allow virtual firmware to avoid the reserved range at the
// 1T boundary on VFs with big bars."
if (address != 0x000000fd00000000 || length != (0x000000ffffffffff - 0x000000fd00000000) + 1)
#endif
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, start_address, length });
break;
case (MULTIBOOT_MEMORY_ACPI_RECLAIMABLE):
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_Reclaimable, start_address, length });
break;
case (MULTIBOOT_MEMORY_NVS):
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_NVS, start_address, length });
break;
case (MULTIBOOT_MEMORY_BADRAM):
dmesgln("MM: Warning, detected bad memory range!");
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::BadMemory, start_address, length });
break;
default:
dbgln("MM: Unknown range!");
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Unknown, start_address, length });
break;
}
}
}
UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
{
m_global_data.with([&](auto& global_data) {

View file

@ -240,6 +240,21 @@ private:
MemoryManager();
~MemoryManager();
struct GlobalData {
GlobalData();
SystemMemoryInfo system_memory_info;
Vector<NonnullOwnPtr<PhysicalRegion>> physical_regions;
OwnPtr<PhysicalRegion> physical_pages_region;
RegionTree region_tree;
Vector<UsedMemoryRange> used_memory_ranges;
Vector<PhysicalMemoryRange> physical_memory_ranges;
Vector<ContiguousReservedMemoryRange> reserved_memory_ranges;
};
void initialize_physical_pages();
void register_reserved_ranges();
@ -251,6 +266,8 @@ private:
void protect_kernel_image();
void parse_memory_map();
void parse_memory_map_fdt(GlobalData&, u8 const* fdt_addr);
void parse_memory_map_multiboot(GlobalData&);
static void flush_tlb_local(VirtualAddress, size_t page_count = 1);
static void flush_tlb(PageDirectory const*, VirtualAddress, size_t page_count = 1);
@ -286,21 +303,6 @@ private:
PhysicalPageEntry* m_physical_page_entries { nullptr };
size_t m_physical_page_entries_count { 0 };
struct GlobalData {
GlobalData();
SystemMemoryInfo system_memory_info;
Vector<NonnullOwnPtr<PhysicalRegion>> physical_regions;
OwnPtr<PhysicalRegion> physical_pages_region;
RegionTree region_tree;
Vector<UsedMemoryRange> used_memory_ranges;
Vector<PhysicalMemoryRange> physical_memory_ranges;
Vector<ContiguousReservedMemoryRange> reserved_memory_ranges;
};
SpinlockProtected<GlobalData, LockRank::None> m_global_data;
};

View file

@ -22,17 +22,16 @@
#ifdef __cplusplus
namespace Kernel {
# if ARCH(X86_64)
struct [[gnu::packed]] BootInfo {
u32 start_of_prekernel_image;
u32 end_of_prekernel_image;
u64 physical_to_virtual_offset;
u64 kernel_mapping_base;
u64 kernel_load_base;
# if ARCH(X86_64)
u32 gdt64ptr;
u16 code64_sel;
u32 boot_pml4t;
# endif
u32 boot_pdpt;
u32 boot_pd0;
u32 boot_pd_kernel;
@ -50,5 +49,14 @@ struct [[gnu::packed]] BootInfo {
u8 multiboot_framebuffer_bpp;
u8 multiboot_framebuffer_type;
};
# elif ARCH(AARCH64)
struct BootInfo { };
# elif ARCH(RISCV64)
struct BootInfo {
FlatPtr mhartid;
PhysicalPtr fdt_phys_addr;
};
# endif
}
#endif