2020-01-18 08:38:21 +00:00
|
|
|
/*
|
2022-01-16 12:10:05 +00:00
|
|
|
* Copyright (c) 2018-2022, Andreas Kling <kling@serenityos.org>
|
2020-01-18 08:38:21 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-01-18 08:38:21 +00:00
|
|
|
*/
|
|
|
|
|
2018-10-17 21:13:55 +00:00
|
|
|
#include <AK/Assertions.h>
|
2024-01-26 13:37:36 +00:00
|
|
|
#include <AK/MemoryStream.h>
|
|
|
|
#include <AK/QuickSort.h>
|
2020-03-23 12:45:10 +00:00
|
|
|
#include <AK/StringView.h>
|
2022-04-02 22:55:20 +00:00
|
|
|
#include <Kernel/Arch/CPU.h>
|
2022-04-02 22:56:20 +00:00
|
|
|
#include <Kernel/Arch/PageDirectory.h>
|
2022-04-02 22:48:04 +00:00
|
|
|
#include <Kernel/Arch/PageFault.h>
|
2022-04-02 22:56:20 +00:00
|
|
|
#include <Kernel/Arch/RegisterState.h>
|
2023-02-24 18:21:53 +00:00
|
|
|
#include <Kernel/Boot/BootInfo.h>
|
|
|
|
#include <Kernel/Boot/Multiboot.h>
|
2019-06-07 09:43:58 +00:00
|
|
|
#include <Kernel/FileSystem/Inode.h>
|
2020-08-24 22:38:20 +00:00
|
|
|
#include <Kernel/Heap/kmalloc.h>
|
2023-02-24 18:33:43 +00:00
|
|
|
#include <Kernel/Interrupts/InterruptDisabler.h>
|
2022-01-28 14:04:34 +00:00
|
|
|
#include <Kernel/KSyms.h>
|
2023-02-24 18:10:59 +00:00
|
|
|
#include <Kernel/Library/Panic.h>
|
|
|
|
#include <Kernel/Library/StdLib.h>
|
2021-08-06 08:45:34 +00:00
|
|
|
#include <Kernel/Memory/AnonymousVMObject.h>
|
2024-05-10 20:16:01 +00:00
|
|
|
#include <Kernel/Memory/MMIOVMObject.h>
|
2021-08-06 08:45:34 +00:00
|
|
|
#include <Kernel/Memory/MemoryManager.h>
|
|
|
|
#include <Kernel/Memory/PhysicalRegion.h>
|
|
|
|
#include <Kernel/Memory/SharedInodeVMObject.h>
|
2022-04-03 11:28:16 +00:00
|
|
|
#include <Kernel/Prekernel/Prekernel.h>
|
2021-06-22 15:40:16 +00:00
|
|
|
#include <Kernel/Sections.h>
|
2023-12-29 00:36:39 +00:00
|
|
|
#include <Kernel/Security/AddressSanitizer.h>
|
2023-02-24 17:45:37 +00:00
|
|
|
#include <Kernel/Tasks/Process.h>
|
2024-01-26 13:37:36 +00:00
|
|
|
#include <Userland/Libraries/LibDeviceTree/FlattenedDeviceTree.h>
|
2018-10-17 21:13:55 +00:00
|
|
|
|
2021-07-22 20:11:17 +00:00
|
|
|
extern u8 start_of_kernel_image[];
|
|
|
|
extern u8 end_of_kernel_image[];
|
|
|
|
extern u8 start_of_kernel_text[];
|
|
|
|
extern u8 start_of_kernel_data[];
|
|
|
|
extern u8 end_of_kernel_bss[];
|
|
|
|
extern u8 start_of_ro_after_init[];
|
|
|
|
extern u8 end_of_ro_after_init[];
|
|
|
|
extern u8 start_of_unmap_after_init[];
|
|
|
|
extern u8 end_of_unmap_after_init[];
|
|
|
|
extern u8 start_of_kernel_ksyms[];
|
|
|
|
extern u8 end_of_kernel_ksyms[];
|
2020-02-16 00:27:42 +00:00
|
|
|
|
2023-04-28 15:27:24 +00:00
|
|
|
extern multiboot_module_entry_t multiboot_copy_boot_modules_array[16];
|
|
|
|
extern size_t multiboot_copy_boot_modules_count;
|
|
|
|
|
2021-08-06 11:49:36 +00:00
|
|
|
namespace Kernel::Memory {
|
2020-02-16 00:27:42 +00:00
|
|
|
|
2021-12-24 14:22:11 +00:00
|
|
|
ErrorOr<FlatPtr> page_round_up(FlatPtr x)
|
|
|
|
{
|
|
|
|
if (x > (explode_byte(0xFF) & ~0xFFF)) {
|
|
|
|
return Error::from_errno(EINVAL);
|
|
|
|
}
|
|
|
|
return (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
|
|
|
|
}
|
|
|
|
|
2021-08-07 19:34:11 +00:00
|
|
|
// NOTE: We can NOT use Singleton for this class, because
|
2020-08-25 01:35:19 +00:00
|
|
|
// MemoryManager::initialize is called *before* global constructors are
|
2021-08-07 19:34:11 +00:00
|
|
|
// run. If we do, then Singleton would get re-initialized, causing
|
2020-08-25 01:35:19 +00:00
|
|
|
// the memory manager to be initialized twice!
|
2020-08-22 15:53:34 +00:00
|
|
|
static MemoryManager* s_the;
|
2022-08-18 15:36:54 +00:00
|
|
|
|
2021-08-06 11:49:36 +00:00
|
|
|
MemoryManager& MemoryManager::the()
|
2018-10-17 21:13:55 +00:00
|
|
|
{
|
|
|
|
return *s_the;
|
|
|
|
}
|
|
|
|
|
2020-08-29 22:41:30 +00:00
|
|
|
bool MemoryManager::is_initialized()
|
|
|
|
{
|
|
|
|
return s_the != nullptr;
|
|
|
|
}
|
|
|
|
|
2022-04-03 11:28:16 +00:00
|
|
|
static UNMAP_AFTER_INIT VirtualRange kernel_virtual_range()
|
|
|
|
{
|
2023-11-29 14:05:14 +00:00
|
|
|
#if ARCH(X86_64)
|
2022-04-03 15:12:39 +00:00
|
|
|
size_t kernel_range_start = kernel_mapping_base + 2 * MiB; // The first 2 MiB are used for mapping the pre-kernel
|
2022-04-03 11:28:16 +00:00
|
|
|
return VirtualRange { VirtualAddress(kernel_range_start), KERNEL_PD_END - kernel_range_start };
|
2023-11-29 14:05:14 +00:00
|
|
|
#elif ARCH(AARCH64) || ARCH(RISCV64)
|
|
|
|
// NOTE: This is not the same as x86_64, because the aarch64 and riscv64 kernels currently don't use the pre-kernel.
|
|
|
|
return VirtualRange { VirtualAddress(kernel_mapping_base), KERNEL_PD_END - kernel_mapping_base };
|
|
|
|
#else
|
|
|
|
# error Unknown architecture
|
2022-09-21 14:16:39 +00:00
|
|
|
#endif
|
2022-04-03 11:28:16 +00:00
|
|
|
}
|
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
MemoryManager::GlobalData::GlobalData()
|
|
|
|
: region_tree(kernel_virtual_range())
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2021-02-19 17:41:50 +00:00
|
|
|
UNMAP_AFTER_INIT MemoryManager::MemoryManager()
|
2018-10-17 21:13:55 +00:00
|
|
|
{
|
2021-07-08 01:50:05 +00:00
|
|
|
s_the = this;
|
|
|
|
|
2020-01-17 20:03:52 +00:00
|
|
|
parse_memory_map();
|
2022-04-02 22:56:20 +00:00
|
|
|
activate_kernel_page_directory(kernel_page_directory());
|
2020-01-17 21:07:20 +00:00
|
|
|
protect_kernel_image();
|
2020-02-15 12:12:02 +00:00
|
|
|
|
2020-09-05 03:12:25 +00:00
|
|
|
// We're temporarily "committing" to two pages that we need to allocate below
|
2022-07-14 12:27:22 +00:00
|
|
|
auto committed_pages = commit_physical_pages(2).release_value();
|
2020-09-05 03:12:25 +00:00
|
|
|
|
2021-09-05 19:54:12 +00:00
|
|
|
m_shared_zero_page = committed_pages.take_one();
|
2020-09-05 03:12:25 +00:00
|
|
|
|
|
|
|
// We're wasting a page here, we just need a special tag (physical
|
|
|
|
// address) so that we know when we need to lazily allocate a page
|
|
|
|
// that we should be drawing this page from the committed pool rather
|
|
|
|
// than potentially failing if no pages are available anymore.
|
|
|
|
// By using a tag we don't have to query the VMObject for every page
|
|
|
|
// whether it was committed or not
|
2021-09-05 19:54:12 +00:00
|
|
|
m_lazy_committed_page = committed_pages.take_one();
|
2023-12-29 00:36:39 +00:00
|
|
|
|
|
|
|
#ifdef HAS_ADDRESS_SANITIZER
|
|
|
|
initialize_kasan_shadow_memory();
|
|
|
|
#endif
|
2020-01-17 22:56:13 +00:00
|
|
|
}
|
2020-01-17 21:07:20 +00:00
|
|
|
|
2022-03-16 19:15:15 +00:00
|
|
|
UNMAP_AFTER_INIT MemoryManager::~MemoryManager() = default;
|
2020-01-17 20:03:52 +00:00
|
|
|
|
2021-02-19 17:41:50 +00:00
|
|
|
UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
|
2020-01-17 21:07:20 +00:00
|
|
|
{
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker page_lock(kernel_page_directory().get_lock());
|
2020-01-17 21:07:20 +00:00
|
|
|
// Disable writing to the kernel text and rodata segments.
|
2021-12-28 18:54:05 +00:00
|
|
|
for (auto const* i = start_of_kernel_text; i < start_of_kernel_data; i += PAGE_SIZE) {
|
2020-09-01 22:10:54 +00:00
|
|
|
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
2020-01-17 21:07:20 +00:00
|
|
|
pte.set_writable(false);
|
|
|
|
}
|
2022-04-02 22:56:20 +00:00
|
|
|
if (Processor::current().has_nx()) {
|
2021-01-20 16:49:55 +00:00
|
|
|
// Disable execution of the kernel data, bss and heap segments.
|
2021-12-28 18:54:05 +00:00
|
|
|
for (auto const* i = start_of_kernel_data; i < end_of_kernel_image; i += PAGE_SIZE) {
|
2020-09-01 22:10:54 +00:00
|
|
|
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
2020-08-24 22:38:20 +00:00
|
|
|
pte.set_execute_disabled(true);
|
|
|
|
}
|
2020-01-17 21:07:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-19 00:15:12 +00:00
|
|
|
UNMAP_AFTER_INIT void MemoryManager::unmap_prekernel()
|
|
|
|
{
|
|
|
|
SpinlockLocker page_lock(kernel_page_directory().get_lock());
|
|
|
|
|
|
|
|
auto start = start_of_prekernel_image.page_base().get();
|
|
|
|
auto end = end_of_prekernel_image.page_base().get();
|
|
|
|
|
|
|
|
for (auto i = start; i <= end; i += PAGE_SIZE)
|
Kernel: Remove redundant hash map of page tables in PageDirectory
The purpose of the PageDirectory::m_page_tables map was really just
to act as ref-counting storage for PhysicalPage objects that were
being used for the directory's page tables.
However, this was basically redundant, since we can find the physical
address of each page table from the page directory, and we can find the
PhysicalPage object from MemoryManager::get_physical_page_entry().
So if we just manually ref() and unref() the pages when they go in and
out of the directory, we no longer need PageDirectory::m_page_tables!
Not only does this remove a bunch of kmalloc() traffic, it also solves
a race condition that would occur when lazily adding a new page table
to a directory:
Previously, when MemoryManager::ensure_pte() would call HashMap::set()
to insert the new page table into m_page_tables, if the HashMap had to
grow its internal storage, it would call kmalloc(). If that kmalloc()
would need to perform heap expansion, it would end up calling
ensure_pte() again, which would clobber the page directory mapping used
by the outer invocation of ensure_pte().
The net result of the above bug would be that any invocation of
MemoryManager::ensure_pte() could erroneously return a pointer into
a kernel page table instead of the correct one!
This whole problem goes away when we remove the HashMap, as ensure_pte()
no longer does anything that allocates from the heap.
2022-01-10 15:00:46 +00:00
|
|
|
release_pte(kernel_page_directory(), VirtualAddress(i), i == end ? IsLastPTERelease::Yes : IsLastPTERelease::No);
|
2021-12-19 00:15:12 +00:00
|
|
|
flush_tlb(&kernel_page_directory(), VirtualAddress(start), (end - start) / PAGE_SIZE);
|
|
|
|
}
|
|
|
|
|
2021-02-19 17:41:50 +00:00
|
|
|
UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
|
2021-02-14 16:35:07 +00:00
|
|
|
{
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker page_lock(kernel_page_directory().get_lock());
|
2021-02-14 16:35:07 +00:00
|
|
|
// Disable writing to the .ro_after_init section
|
|
|
|
for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) {
|
|
|
|
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
|
|
|
pte.set_writable(false);
|
|
|
|
flush_tlb(&kernel_page_directory(), VirtualAddress(i));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-16 07:50:34 +00:00
|
|
|
void MemoryManager::unmap_text_after_init()
|
2021-02-19 17:21:54 +00:00
|
|
|
{
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker page_lock(kernel_page_directory().get_lock());
|
2021-02-19 17:21:54 +00:00
|
|
|
|
|
|
|
auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
|
2021-12-24 14:22:11 +00:00
|
|
|
auto end = page_round_up((FlatPtr)&end_of_unmap_after_init).release_value_but_fixme_should_propagate_errors();
|
2021-02-19 17:21:54 +00:00
|
|
|
|
|
|
|
// Unmap the entire .unmap_after_init section
|
|
|
|
for (auto i = start; i < end; i += PAGE_SIZE) {
|
|
|
|
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
|
|
|
pte.clear();
|
|
|
|
flush_tlb(&kernel_page_directory(), VirtualAddress(i));
|
|
|
|
}
|
|
|
|
|
|
|
|
dmesgln("Unmapped {} KiB of kernel text after init! :^)", (end - start) / KiB);
|
2021-07-16 07:50:34 +00:00
|
|
|
}
|
|
|
|
|
2021-12-24 13:21:08 +00:00
|
|
|
UNMAP_AFTER_INIT void MemoryManager::protect_ksyms_after_init()
|
2021-07-16 07:50:34 +00:00
|
|
|
{
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker page_lock(kernel_page_directory().get_lock());
|
2021-07-16 07:50:34 +00:00
|
|
|
|
2021-07-22 20:11:17 +00:00
|
|
|
auto start = page_round_down((FlatPtr)start_of_kernel_ksyms);
|
2021-12-24 14:22:11 +00:00
|
|
|
auto end = page_round_up((FlatPtr)end_of_kernel_ksyms).release_value_but_fixme_should_propagate_errors();
|
2021-07-16 07:50:34 +00:00
|
|
|
|
|
|
|
for (auto i = start; i < end; i += PAGE_SIZE) {
|
|
|
|
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
2021-12-17 08:52:06 +00:00
|
|
|
pte.set_writable(false);
|
2021-07-16 07:50:34 +00:00
|
|
|
flush_tlb(&kernel_page_directory(), VirtualAddress(i));
|
|
|
|
}
|
|
|
|
|
2021-12-17 08:52:06 +00:00
|
|
|
dmesgln("Write-protected kernel symbols after init.");
|
2021-02-19 17:21:54 +00:00
|
|
|
}
|
|
|
|
|
2022-01-02 23:27:21 +00:00
|
|
|
IterationDecision MemoryManager::for_each_physical_memory_range(Function<IterationDecision(PhysicalMemoryRange const&)> callback)
|
|
|
|
{
|
2022-08-25 14:46:13 +00:00
|
|
|
return m_global_data.with([&](auto& global_data) {
|
|
|
|
VERIFY(!global_data.physical_memory_ranges.is_empty());
|
|
|
|
for (auto& current_range : global_data.physical_memory_ranges) {
|
|
|
|
IterationDecision decision = callback(current_range);
|
|
|
|
if (decision != IterationDecision::Continue)
|
|
|
|
return decision;
|
|
|
|
}
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
2022-01-02 23:27:21 +00:00
|
|
|
}
|
|
|
|
|
2021-02-19 17:41:50 +00:00
|
|
|
UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
|
2021-01-29 12:03:25 +00:00
|
|
|
{
|
2022-08-25 14:46:13 +00:00
|
|
|
m_global_data.with([&](auto& global_data) {
|
|
|
|
VERIFY(!global_data.physical_memory_ranges.is_empty());
|
|
|
|
ContiguousReservedMemoryRange range;
|
|
|
|
for (auto& current_range : global_data.physical_memory_ranges) {
|
|
|
|
if (current_range.type != PhysicalMemoryRangeType::Reserved) {
|
|
|
|
if (range.start.is_null())
|
|
|
|
continue;
|
|
|
|
global_data.reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, current_range.start.get() - range.start.get() });
|
|
|
|
range.start.set((FlatPtr) nullptr);
|
2021-01-29 12:03:25 +00:00
|
|
|
continue;
|
2022-08-25 14:46:13 +00:00
|
|
|
}
|
|
|
|
if (!range.start.is_null()) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
range.start = current_range.start;
|
2021-01-29 12:03:25 +00:00
|
|
|
}
|
2022-08-25 14:46:13 +00:00
|
|
|
if (global_data.physical_memory_ranges.last().type != PhysicalMemoryRangeType::Reserved)
|
|
|
|
return;
|
|
|
|
if (range.start.is_null())
|
|
|
|
return;
|
|
|
|
global_data.reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, global_data.physical_memory_ranges.last().start.get() + global_data.physical_memory_ranges.last().length - range.start.get() });
|
|
|
|
});
|
2021-01-29 12:03:25 +00:00
|
|
|
}
|
|
|
|
|
2021-12-23 19:49:31 +00:00
|
|
|
bool MemoryManager::is_allowed_to_read_physical_memory_for_userspace(PhysicalAddress start_address, size_t read_length) const
|
2021-01-29 12:03:25 +00:00
|
|
|
{
|
Kernel: Fix restrictions in is_allowed_to_mmap_to_userspace function
This small change simplifies the function a bit but also fixes a problem
with it.
Let's take an example to see this:
Let's say we have a reserved range between 0xe0000 to 0xfffff (EBDA),
then we want to map from the memory device (/dev/mem) the entire
EBDA to a program. If a program tries to map more than 131072 bytes,
the current logic will work - the start address is 0xe0000, and ofcourse
it's below the limit, hence it passes the first two restrictions.
Then, the third if statement will fail if we try to mmap more than
the said allowed bytes.
However, let's take another scenario, where we try to mmap from
0xf0000 - but we try to mmap less than 131072 - but more than 65536.
In such case, we again pass the first two if statements, but the third
one is passed two, because it doesn't take into account the offseted
address from the start of the reserved range (0xe0000). In such case,
a user can easily mmap 65535 bytes above 0x100000. This might
seem negligible. However, it's still a severe bug that can theoretically
be exploited into a info leak or tampering with important kernel
structures.
2021-10-22 08:59:31 +00:00
|
|
|
// Note: Guard against overflow in case someone tries to mmap on the edge of
|
|
|
|
// the RAM
|
2021-12-23 19:49:31 +00:00
|
|
|
if (start_address.offset_addition_would_overflow(read_length))
|
Kernel: Fix restrictions in is_allowed_to_mmap_to_userspace function
This small change simplifies the function a bit but also fixes a problem
with it.
Let's take an example to see this:
Let's say we have a reserved range between 0xe0000 to 0xfffff (EBDA),
then we want to map from the memory device (/dev/mem) the entire
EBDA to a program. If a program tries to map more than 131072 bytes,
the current logic will work - the start address is 0xe0000, and ofcourse
it's below the limit, hence it passes the first two restrictions.
Then, the third if statement will fail if we try to mmap more than
the said allowed bytes.
However, let's take another scenario, where we try to mmap from
0xf0000 - but we try to mmap less than 131072 - but more than 65536.
In such case, we again pass the first two if statements, but the third
one is passed two, because it doesn't take into account the offseted
address from the start of the reserved range (0xe0000). In such case,
a user can easily mmap 65535 bytes above 0x100000. This might
seem negligible. However, it's still a severe bug that can theoretically
be exploited into a info leak or tampering with important kernel
structures.
2021-10-22 08:59:31 +00:00
|
|
|
return false;
|
2021-12-23 19:49:31 +00:00
|
|
|
auto end_address = start_address.offset(read_length);
|
2022-08-25 14:46:13 +00:00
|
|
|
|
|
|
|
return m_global_data.with([&](auto& global_data) {
|
|
|
|
for (auto const& current_range : global_data.reserved_memory_ranges) {
|
|
|
|
if (current_range.start > start_address)
|
|
|
|
continue;
|
|
|
|
if (current_range.start.offset(current_range.length) < end_address)
|
|
|
|
continue;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
});
|
2021-01-29 12:03:25 +00:00
|
|
|
}
|
|
|
|
|
2021-02-19 17:41:50 +00:00
|
|
|
UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
2020-01-17 20:03:52 +00:00
|
|
|
{
|
2021-01-19 18:13:03 +00:00
|
|
|
// Register used memory regions that we know of.
|
2024-01-26 13:37:36 +00:00
|
|
|
m_global_data.with([this](auto& global_data) {
|
2022-08-25 14:46:13 +00:00
|
|
|
global_data.used_memory_ranges.ensure_capacity(4);
|
2022-10-04 00:05:54 +00:00
|
|
|
#if ARCH(X86_64)
|
2023-04-05 02:25:06 +00:00
|
|
|
// NOTE: We don't touch the first 1 MiB of RAM on x86-64 even if it's usable as indicated
|
|
|
|
// by a certain memory map. There are 2 reasons for this:
|
|
|
|
//
|
|
|
|
// The first reason is specified for Linux doing the same thing in
|
|
|
|
// https://cateee.net/lkddb/web-lkddb/X86_RESERVE_LOW.html -
|
|
|
|
// "By default we reserve the first 64K of physical RAM, as a number of BIOSes are known
|
|
|
|
// to corrupt that memory range during events such as suspend/resume or monitor cable insertion,
|
|
|
|
// so it must not be used by the kernel."
|
|
|
|
//
|
|
|
|
// Linux also allows configuring this knob in compiletime for this reserved range length, that might
|
|
|
|
// also include the EBDA and other potential ranges in the first 1 MiB that could be corrupted by the BIOS:
|
|
|
|
// "You can set this to 4 if you are absolutely sure that you trust the BIOS to get all its memory
|
|
|
|
// reservations and usages right. If you know your BIOS have problems beyond the default 64K area,
|
|
|
|
// you can set this to 640 to avoid using the entire low memory range."
|
|
|
|
//
|
|
|
|
// The second reason is that the first 1 MiB memory range should also include the actual BIOS blob
|
|
|
|
// together with possible execution blob code for various option ROMs, which should not be touched
|
|
|
|
// by our kernel.
|
|
|
|
//
|
|
|
|
// **To be completely on the safe side** and never worry about where the EBDA is located, how BIOS might
|
|
|
|
// corrupt the low memory range during power state changing, other bad behavior of some BIOS might change
|
|
|
|
// a value in the very first 64k bytes of RAM, etc - we should just ignore this range completely.
|
2022-08-25 14:46:13 +00:00
|
|
|
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
|
2022-09-20 20:13:31 +00:00
|
|
|
#endif
|
2022-08-25 14:46:13 +00:00
|
|
|
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image)).release_value_but_fixme_should_propagate_errors()) });
|
2020-08-24 22:38:20 +00:00
|
|
|
|
2024-01-26 13:37:36 +00:00
|
|
|
#if ARCH(RISCV64)
|
|
|
|
// FIXME: AARCH64 might be able to make use of this code path
|
|
|
|
// Some x86 platforms also provide flattened device trees
|
|
|
|
parse_memory_map_fdt(global_data, s_fdt_storage);
|
|
|
|
#else
|
|
|
|
parse_memory_map_multiboot(global_data);
|
|
|
|
#endif
|
2021-01-19 18:13:03 +00:00
|
|
|
|
2024-01-26 13:37:36 +00:00
|
|
|
// Now we need to setup the physical regions we will use later
|
2022-08-25 14:46:13 +00:00
|
|
|
struct ContiguousPhysicalVirtualRange {
|
|
|
|
PhysicalAddress lower;
|
|
|
|
PhysicalAddress upper;
|
Kernel: Fix UB caused by taking a reference to a packed struct's member
Taking a reference or a pointer to a value that's not aligned properly
is undefined behavior. While `[[gnu::packed]]` ensures that reads from
and writes to fields of packed structs is a safe operation, the
information about the reduced alignment is lost when creating pointers
to these values.
Weirdly enough, GCC's undefined behavior sanitizer doesn't flag these,
even though the doc of `-Waddress-of-packed-member` says that it usually
leads to UB. In contrast, x86_64 Clang does flag these, which renders
the 64-bit kernel unable to boot.
For now, the `address-of-packed-member` warning will only be enabled in
the kernel, as it is absolutely crucial there because of KUBSAN, but
might get excessively noisy for the userland in the future.
Also note that we can't append to `CMAKE_CXX_FLAGS` like we do for other
flags in the kernel, because flags added via `add_compile_options` come
after these, so the `-Wno-address-of-packed-member` in the root would
cancel it out.
2021-08-01 18:30:43 +00:00
|
|
|
};
|
2024-01-11 23:03:49 +00:00
|
|
|
Optional<ContiguousPhysicalVirtualRange> last_contiguous_physical_range;
|
2024-01-26 13:37:36 +00:00
|
|
|
for (auto range : global_data.physical_memory_ranges) {
|
|
|
|
if (range.type != PhysicalMemoryRangeType::Usable)
|
2022-08-25 14:46:13 +00:00
|
|
|
continue;
|
2024-01-26 13:37:36 +00:00
|
|
|
auto address = range.start.get();
|
|
|
|
auto length = range.length;
|
2019-06-11 11:13:02 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// Fix up unaligned memory regions.
|
|
|
|
auto diff = (FlatPtr)address % PAGE_SIZE;
|
|
|
|
if (diff != 0) {
|
2024-01-26 13:37:36 +00:00
|
|
|
dmesgln("MM: Got an unaligned usable physical_region from the bootloader; correcting {:p} by {} bytes", address, diff);
|
2022-08-25 14:46:13 +00:00
|
|
|
diff = PAGE_SIZE - diff;
|
|
|
|
address += diff;
|
|
|
|
length -= diff;
|
|
|
|
}
|
|
|
|
if ((length % PAGE_SIZE) != 0) {
|
2024-01-26 13:37:36 +00:00
|
|
|
dmesgln("MM: Got an unaligned usable physical_region from the bootloader; correcting length {} by {} bytes", length, length % PAGE_SIZE);
|
2022-08-25 14:46:13 +00:00
|
|
|
length -= length % PAGE_SIZE;
|
2021-01-20 16:49:55 +00:00
|
|
|
}
|
2022-08-25 14:46:13 +00:00
|
|
|
if (length < PAGE_SIZE) {
|
2024-01-26 13:37:36 +00:00
|
|
|
dmesgln("MM: Memory usable physical_region from bootloader is too small; we want >= {} bytes, but got {} bytes", PAGE_SIZE, length);
|
2020-08-24 22:38:20 +00:00
|
|
|
continue;
|
2022-08-25 14:46:13 +00:00
|
|
|
}
|
2020-08-24 22:38:20 +00:00
|
|
|
|
2024-01-11 23:03:49 +00:00
|
|
|
// FIXME: This might have a nicer solution than slicing the ranges apart,
|
|
|
|
// to just put them back together when we dont find a used range in them
|
2022-08-25 14:46:13 +00:00
|
|
|
for (PhysicalSize page_base = address; page_base <= (address + length); page_base += PAGE_SIZE) {
|
|
|
|
auto addr = PhysicalAddress(page_base);
|
|
|
|
|
|
|
|
// Skip used memory ranges.
|
|
|
|
bool should_skip = false;
|
|
|
|
for (auto& used_range : global_data.used_memory_ranges) {
|
|
|
|
if (addr.get() >= used_range.start.get() && addr.get() <= used_range.end.get()) {
|
|
|
|
should_skip = true;
|
2024-01-11 23:03:49 +00:00
|
|
|
page_base = used_range.end.get();
|
2022-08-25 14:46:13 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (should_skip)
|
|
|
|
continue;
|
|
|
|
|
2024-01-11 23:03:49 +00:00
|
|
|
if (!last_contiguous_physical_range.has_value() || last_contiguous_physical_range->upper.offset(PAGE_SIZE) != addr) {
|
|
|
|
if (last_contiguous_physical_range.has_value()) {
|
|
|
|
auto range = last_contiguous_physical_range.release_value();
|
|
|
|
// FIXME: OOM?
|
|
|
|
global_data.physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
|
|
|
|
}
|
|
|
|
last_contiguous_physical_range = ContiguousPhysicalVirtualRange { .lower = addr, .upper = addr };
|
2022-08-25 14:46:13 +00:00
|
|
|
} else {
|
2024-01-11 23:03:49 +00:00
|
|
|
last_contiguous_physical_range->upper = addr;
|
2022-08-25 14:46:13 +00:00
|
|
|
}
|
2019-06-09 09:48:58 +00:00
|
|
|
}
|
2024-01-26 13:37:36 +00:00
|
|
|
// FIXME: If this is ever false, theres a good chance that all physical memory is already spent
|
|
|
|
if (last_contiguous_physical_range.has_value()) {
|
|
|
|
auto range = last_contiguous_physical_range.release_value();
|
|
|
|
// FIXME: OOM?
|
|
|
|
global_data.physical_regions.append(PhysicalRegion::try_create(range.lower, range.upper).release_nonnull());
|
|
|
|
}
|
2022-08-25 14:46:13 +00:00
|
|
|
}
|
2021-07-13 16:22:49 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
for (auto& region : global_data.physical_regions)
|
2023-03-06 16:16:25 +00:00
|
|
|
global_data.system_memory_info.physical_pages += region->size();
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
register_reserved_ranges();
|
|
|
|
for (auto& range : global_data.reserved_memory_ranges) {
|
|
|
|
dmesgln("MM: Contiguous reserved range from {}, length is {}", range.start, range.length);
|
|
|
|
}
|
2020-05-08 20:15:02 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
initialize_physical_pages();
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
VERIFY(global_data.system_memory_info.physical_pages > 0);
|
2020-09-05 03:12:25 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// We start out with no committed pages
|
|
|
|
global_data.system_memory_info.physical_pages_uncommitted = global_data.system_memory_info.physical_pages;
|
2021-07-07 03:35:15 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
for (auto& used_range : global_data.used_memory_ranges) {
|
|
|
|
dmesgln("MM: {} range @ {} - {} (size {:#x})", UserMemoryRangeTypeNames[to_underlying(used_range.type)], used_range.start, used_range.end.offset(-1), used_range.end.as_ptr() - used_range.start.as_ptr());
|
|
|
|
}
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
for (auto& region : global_data.physical_regions) {
|
2023-03-06 16:16:25 +00:00
|
|
|
dmesgln("MM: User physical region: {} - {} (size {:#x})", region->lower(), region->upper().offset(-1), PAGE_SIZE * region->size());
|
|
|
|
region->initialize_zones();
|
2022-08-25 14:46:13 +00:00
|
|
|
}
|
|
|
|
});
|
2021-07-08 01:50:05 +00:00
|
|
|
}
|
|
|
|
|
2024-01-26 13:37:36 +00:00
|
|
|
UNMAP_AFTER_INIT void MemoryManager::parse_memory_map_fdt(MemoryManager::GlobalData& global_data, u8 const* fdt_addr)
|
|
|
|
{
|
|
|
|
auto const& fdt_header = *reinterpret_cast<DeviceTree::FlattenedDeviceTreeHeader const*>(fdt_addr);
|
|
|
|
auto fdt_buffer = ReadonlyBytes(fdt_addr, fdt_header.totalsize);
|
|
|
|
|
2024-03-03 12:52:35 +00:00
|
|
|
auto const* mem_reserve_block = reinterpret_cast<DeviceTree::FlattenedDeviceTreeReserveEntry const*>(&fdt_buffer[fdt_header.off_mem_rsvmap]);
|
|
|
|
|
|
|
|
u64 next_block_offset = fdt_header.off_mem_rsvmap + sizeof(DeviceTree::FlattenedDeviceTreeReserveEntry);
|
|
|
|
while ((next_block_offset < fdt_header.off_dt_struct) && (*mem_reserve_block != DeviceTree::FlattenedDeviceTreeReserveEntry {})) {
|
|
|
|
dbgln("MM: Reserved Range /memreserve/: address: {} size {:#x}", PhysicalAddress { mem_reserve_block->address }, mem_reserve_block->size);
|
|
|
|
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, PhysicalAddress { mem_reserve_block->address }, mem_reserve_block->size });
|
|
|
|
// FIXME: Not all of these are "used", only those in "memory" are actually "used"
|
|
|
|
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress { mem_reserve_block->address }, PhysicalAddress { mem_reserve_block->address + mem_reserve_block->size } });
|
|
|
|
++mem_reserve_block;
|
|
|
|
next_block_offset += sizeof(DeviceTree::FlattenedDeviceTreeReserveEntry);
|
|
|
|
}
|
2024-01-26 13:37:36 +00:00
|
|
|
|
|
|
|
// Schema:
|
|
|
|
// https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/root-node.yaml
|
|
|
|
// -> /#address-cells ∈ [1,2], /#size-cells ∈ [1,2]
|
|
|
|
// Reserved Memory:
|
|
|
|
// https://android.googlesource.com/kernel/msm/+/android-7.1.0_r0.2/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
|
|
|
|
// -> #address-cells === /#address-cells, #size-cells === /#size-cells
|
|
|
|
// https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/reserved-memory/reserved-memory.yaml
|
|
|
|
// Memory:
|
|
|
|
// https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/memory.yaml
|
2024-03-03 12:52:35 +00:00
|
|
|
// -> #address-cells: /#address-cells , #size-cells: /#size-cells
|
2024-01-26 13:37:36 +00:00
|
|
|
|
|
|
|
// FIXME: When booting from UEFI, the /memory node may not be relied upon
|
|
|
|
enum class State {
|
|
|
|
Root,
|
|
|
|
InReservedMemory,
|
|
|
|
InReservedMemoryChild,
|
|
|
|
|
|
|
|
InMemory
|
|
|
|
};
|
|
|
|
|
|
|
|
struct {
|
|
|
|
u32 depth = 0;
|
|
|
|
State state = State::Root;
|
|
|
|
Optional<u64> start {};
|
|
|
|
Optional<u64> size {};
|
|
|
|
u32 address_cells = 0;
|
|
|
|
u32 size_cells = 0;
|
|
|
|
} state;
|
|
|
|
|
|
|
|
MUST(DeviceTree::walk_device_tree(
|
|
|
|
fdt_header, fdt_buffer,
|
|
|
|
DeviceTree::DeviceTreeCallbacks {
|
|
|
|
.on_node_begin = [&state](StringView node_name) -> ErrorOr<IterationDecision> {
|
|
|
|
switch (state.state) {
|
|
|
|
case State::Root:
|
|
|
|
if (state.depth != 1)
|
|
|
|
break;
|
|
|
|
if (node_name == "reserved-memory")
|
|
|
|
state.state = State::InReservedMemory;
|
|
|
|
else if (node_name.starts_with("memory"sv))
|
|
|
|
state.state = State::InMemory;
|
|
|
|
break;
|
|
|
|
case State::InReservedMemory:
|
|
|
|
// FIXME: The node names may hint to the purpose
|
|
|
|
state.state = State::InReservedMemoryChild;
|
|
|
|
state.start = {};
|
|
|
|
state.size = {};
|
|
|
|
break;
|
|
|
|
case State::InReservedMemoryChild:
|
|
|
|
case State::InMemory:
|
|
|
|
// We should never be here
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
|
|
|
state.depth++;
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
},
|
|
|
|
.on_node_end = [&global_data, &state](StringView node_name) -> ErrorOr<IterationDecision> {
|
|
|
|
switch (state.state) {
|
|
|
|
case State::Root:
|
|
|
|
break;
|
|
|
|
case State::InReservedMemory:
|
|
|
|
state.state = State::Root;
|
|
|
|
break;
|
|
|
|
case State::InMemory:
|
|
|
|
VERIFY(state.start.has_value() && state.size.has_value());
|
|
|
|
dbgln("MM: Memory Range {}: address: {} size {:#x}", node_name, PhysicalAddress { state.start.value() }, state.size.value());
|
|
|
|
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, PhysicalAddress { state.start.value() }, state.size.value() });
|
|
|
|
state.state = State::Root;
|
|
|
|
break;
|
|
|
|
case State::InReservedMemoryChild:
|
|
|
|
// FIXME: Handle non static allocations,
|
2024-03-03 12:55:52 +00:00
|
|
|
if (!state.start.has_value()) {
|
|
|
|
VERIFY(state.size.has_value());
|
|
|
|
dbgln("MM: Non static reserved memory range {} of size {:#x}, skipping for now", node_name, state.size.value());
|
|
|
|
state.state = State::InReservedMemory;
|
|
|
|
break;
|
|
|
|
}
|
2024-01-26 13:37:36 +00:00
|
|
|
VERIFY(state.start.has_value() && state.size.has_value());
|
|
|
|
dbgln("MM: Reserved Range {}: address: {} size {:#x}", node_name, PhysicalAddress { state.start.value() }, state.size.value());
|
|
|
|
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, PhysicalAddress { state.start.value() }, state.size.value() });
|
|
|
|
// FIXME: Not all of these are "used", only those in "memory" are actually "used"
|
|
|
|
// There might be for example debug DMA control registers, which are marked as reserved
|
|
|
|
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, PhysicalAddress { state.start.value() }, PhysicalAddress { state.start.value() + state.size.value() } });
|
|
|
|
state.state = State::InReservedMemory;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
state.depth--;
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
},
|
|
|
|
.on_property = [&state](StringView property_name, ReadonlyBytes data) -> ErrorOr<IterationDecision> {
|
|
|
|
switch (state.state) {
|
|
|
|
case State::Root:
|
|
|
|
if (state.depth != 1)
|
|
|
|
break;
|
|
|
|
if (property_name == "#address-cells"sv) {
|
|
|
|
BigEndian<u32> data_as_int;
|
|
|
|
__builtin_memcpy(&data_as_int, data.data(), sizeof(u32));
|
|
|
|
state.address_cells = data_as_int;
|
|
|
|
VERIFY(state.address_cells != 0);
|
|
|
|
VERIFY(state.address_cells <= 2);
|
|
|
|
} else if (property_name == "#size-cells"sv) {
|
|
|
|
BigEndian<u32> data_as_int;
|
|
|
|
__builtin_memcpy(&data_as_int, data.data(), sizeof(u32));
|
|
|
|
state.size_cells = data_as_int;
|
|
|
|
VERIFY(state.size_cells != 0);
|
|
|
|
VERIFY(state.size_cells <= 2);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case State::InReservedMemory:
|
|
|
|
// FIXME: We could check and verify that the address and size cells
|
|
|
|
// are the same as in the root node
|
|
|
|
// FIXME: Handle the ranges attribute if not empty
|
|
|
|
if (property_name == "ranges"sv && data.size() != 0)
|
|
|
|
TODO();
|
|
|
|
break;
|
|
|
|
case State::InReservedMemoryChild:
|
|
|
|
case State::InMemory:
|
|
|
|
if (property_name == "reg"sv) {
|
|
|
|
VERIFY(state.address_cells);
|
|
|
|
VERIFY(state.size_cells);
|
|
|
|
// FIXME: We may get more than one range here
|
|
|
|
if (data.size() > (state.address_cells + state.size_cells) * sizeof(u32))
|
|
|
|
TODO();
|
|
|
|
if (state.address_cells == 1) {
|
|
|
|
BigEndian<u32> data_as_int;
|
|
|
|
__builtin_memcpy(&data_as_int, data.data(), sizeof(u32));
|
|
|
|
state.start = data_as_int;
|
|
|
|
data = data.slice(sizeof(u32));
|
|
|
|
} else {
|
|
|
|
BigEndian<u64> data_as_int;
|
|
|
|
__builtin_memcpy(&data_as_int, data.data(), sizeof(u64));
|
|
|
|
state.start = data_as_int;
|
|
|
|
data = data.slice(sizeof(u64));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (state.size_cells == 1) {
|
|
|
|
BigEndian<u32> data_as_int;
|
|
|
|
__builtin_memcpy(&data_as_int, data.data(), sizeof(u32));
|
|
|
|
state.size = data_as_int;
|
|
|
|
data = data.slice(sizeof(u32));
|
|
|
|
} else {
|
|
|
|
BigEndian<u64> data_as_int;
|
|
|
|
__builtin_memcpy(&data_as_int, data.data(), sizeof(u64));
|
|
|
|
state.size = data_as_int;
|
|
|
|
data = data.slice(sizeof(u64));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Reserved Memory:
|
|
|
|
// FIXME: Handle `compatible: "framebuffer";`
|
|
|
|
// FIMXE: Handle `compatible: "shared-dma-pool";`, `compatible: "restricted-dma-pool";`
|
|
|
|
// FIXME: Handle "iommu-addresses" property
|
|
|
|
// FIXME: Support "size" and "align" property
|
|
|
|
// Also "alloc-ranges"
|
|
|
|
// FIXME: Support no-map
|
|
|
|
// FIXME: Support no-map-fixup
|
|
|
|
// FIXME: Support reusable
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
},
|
|
|
|
.on_noop = []() -> ErrorOr<IterationDecision> { return IterationDecision::Continue; },
|
|
|
|
.on_end = []() -> ErrorOr<void> { return {}; },
|
|
|
|
}));
|
|
|
|
|
|
|
|
// FDTs do not seem to be fully sort memory ranges, especially as we get them from at least two structures
|
|
|
|
quick_sort(global_data.physical_memory_ranges, [](auto& a, auto& b) -> bool { return a.start > b.start; });
|
|
|
|
}
|
|
|
|
|
|
|
|
UNMAP_AFTER_INIT void MemoryManager::parse_memory_map_multiboot(MemoryManager::GlobalData& global_data)
|
|
|
|
{
|
|
|
|
// Register used memory regions that we know of.
|
2023-03-24 17:31:53 +00:00
|
|
|
if (multiboot_flags & 0x4 && !multiboot_module_physical_ptr.is_null()) {
|
|
|
|
dmesgln("MM: Multiboot module @ {}, length={}", multiboot_module_physical_ptr, multiboot_module_length);
|
|
|
|
VERIFY(multiboot_module_length != 0);
|
|
|
|
global_data.used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::BootModule, multiboot_module_physical_ptr, multiboot_module_physical_ptr.offset(multiboot_module_length) });
|
2024-01-26 13:37:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
auto* mmap_begin = multiboot_memory_map;
|
|
|
|
auto* mmap_end = multiboot_memory_map + multiboot_memory_map_count;
|
|
|
|
|
|
|
|
struct ContiguousPhysicalVirtualRange {
|
|
|
|
PhysicalAddress lower;
|
|
|
|
PhysicalAddress upper;
|
|
|
|
};
|
|
|
|
|
|
|
|
Optional<ContiguousPhysicalVirtualRange> last_contiguous_physical_range;
|
|
|
|
for (auto* mmap = mmap_begin; mmap < mmap_end; mmap++) {
|
|
|
|
// We have to copy these onto the stack, because we take a reference to these when printing them out,
|
|
|
|
// and doing so on a packed struct field is UB.
|
|
|
|
auto address = mmap->addr;
|
|
|
|
auto length = mmap->len;
|
|
|
|
ArmedScopeGuard write_back_guard = [&]() {
|
|
|
|
mmap->addr = address;
|
|
|
|
mmap->len = length;
|
|
|
|
};
|
|
|
|
|
|
|
|
dmesgln("MM: Multiboot mmap: address={:p}, length={}, type={}", address, length, mmap->type);
|
|
|
|
|
|
|
|
auto start_address = PhysicalAddress(address);
|
|
|
|
switch (mmap->type) {
|
|
|
|
case (MULTIBOOT_MEMORY_AVAILABLE):
|
|
|
|
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Usable, start_address, length });
|
|
|
|
break;
|
|
|
|
case (MULTIBOOT_MEMORY_RESERVED):
|
|
|
|
#if ARCH(X86_64)
|
|
|
|
// Workaround for https://gitlab.com/qemu-project/qemu/-/commit/8504f129450b909c88e199ca44facd35d38ba4de
|
|
|
|
// That commit added a reserved 12GiB entry for the benefit of virtual firmware.
|
|
|
|
// We can safely ignore this block as it isn't actually reserved on any real hardware.
|
|
|
|
// From: https://lore.kernel.org/all/20220701161014.3850-1-joao.m.martins@oracle.com/
|
|
|
|
// "Always add the HyperTransport range into e820 even when the relocation isn't
|
|
|
|
// done *and* there's >= 40 phys bit that would put max phyusical boundary to 1T
|
|
|
|
// This should allow virtual firmware to avoid the reserved range at the
|
|
|
|
// 1T boundary on VFs with big bars."
|
|
|
|
if (address != 0x000000fd00000000 || length != (0x000000ffffffffff - 0x000000fd00000000) + 1)
|
|
|
|
#endif
|
|
|
|
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Reserved, start_address, length });
|
|
|
|
break;
|
|
|
|
case (MULTIBOOT_MEMORY_ACPI_RECLAIMABLE):
|
|
|
|
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_Reclaimable, start_address, length });
|
|
|
|
break;
|
|
|
|
case (MULTIBOOT_MEMORY_NVS):
|
|
|
|
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::ACPI_NVS, start_address, length });
|
|
|
|
break;
|
|
|
|
case (MULTIBOOT_MEMORY_BADRAM):
|
|
|
|
dmesgln("MM: Warning, detected bad memory range!");
|
|
|
|
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::BadMemory, start_address, length });
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dbgln("MM: Unknown range!");
|
|
|
|
global_data.physical_memory_ranges.append(PhysicalMemoryRange { PhysicalMemoryRangeType::Unknown, start_address, length });
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-08 01:50:05 +00:00
|
|
|
UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
|
|
|
{
|
2022-08-25 14:46:13 +00:00
|
|
|
m_global_data.with([&](auto& global_data) {
|
|
|
|
// We assume that the physical page range is contiguous and doesn't contain huge gaps!
|
|
|
|
PhysicalAddress highest_physical_address;
|
2023-07-20 06:36:40 +00:00
|
|
|
#if ARCH(X86_64)
|
|
|
|
// On x86 LAPIC is at 0xfee00000 or a similar address. Round up to 0x100000000LL to cover variations.
|
|
|
|
highest_physical_address = PhysicalAddress { 0x100000000LL };
|
|
|
|
#endif
|
2022-08-25 14:46:13 +00:00
|
|
|
for (auto& range : global_data.used_memory_ranges) {
|
|
|
|
if (range.end.get() > highest_physical_address.get())
|
|
|
|
highest_physical_address = range.end;
|
|
|
|
}
|
|
|
|
for (auto& region : global_data.physical_memory_ranges) {
|
|
|
|
auto range_end = PhysicalAddress(region.start).offset(region.length);
|
|
|
|
if (range_end.get() > highest_physical_address.get())
|
|
|
|
highest_physical_address = range_end;
|
|
|
|
}
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2023-07-20 06:36:40 +00:00
|
|
|
#if ARCH(X86_64)
|
|
|
|
// Map multiboot framebuffer
|
|
|
|
if ((multiboot_flags & MULTIBOOT_INFO_FRAMEBUFFER_INFO) && !multiboot_framebuffer_addr.is_null() && multiboot_framebuffer_type == MULTIBOOT_FRAMEBUFFER_TYPE_RGB) {
|
|
|
|
PhysicalAddress multiboot_framebuffer_addr_end = multiboot_framebuffer_addr.offset(multiboot_framebuffer_height * multiboot_framebuffer_pitch);
|
|
|
|
if (multiboot_framebuffer_addr_end > highest_physical_address)
|
|
|
|
highest_physical_address = multiboot_framebuffer_addr_end;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// Calculate how many total physical pages the array will have
|
|
|
|
m_physical_page_entries_count = PhysicalAddress::physical_page_index(highest_physical_address.get()) + 1;
|
|
|
|
VERIFY(m_physical_page_entries_count != 0);
|
|
|
|
VERIFY(!Checked<decltype(m_physical_page_entries_count)>::multiplication_would_overflow(m_physical_page_entries_count, sizeof(PhysicalPageEntry)));
|
|
|
|
|
|
|
|
// Calculate how many bytes the array will consume
|
|
|
|
auto physical_page_array_size = m_physical_page_entries_count * sizeof(PhysicalPageEntry);
|
|
|
|
auto physical_page_array_pages = page_round_up(physical_page_array_size).release_value_but_fixme_should_propagate_errors() / PAGE_SIZE;
|
|
|
|
VERIFY(physical_page_array_pages * PAGE_SIZE >= physical_page_array_size);
|
|
|
|
|
|
|
|
// Calculate how many page tables we will need to be able to map them all
|
|
|
|
auto needed_page_table_count = (physical_page_array_pages + 512 - 1) / 512;
|
|
|
|
|
|
|
|
auto physical_page_array_pages_and_page_tables_count = physical_page_array_pages + needed_page_table_count;
|
|
|
|
|
|
|
|
// Now that we know how much memory we need for a contiguous array of PhysicalPage instances, find a memory region that can fit it
|
|
|
|
PhysicalRegion* found_region { nullptr };
|
|
|
|
Optional<size_t> found_region_index;
|
|
|
|
for (size_t i = 0; i < global_data.physical_regions.size(); ++i) {
|
|
|
|
auto& region = global_data.physical_regions[i];
|
2023-03-06 16:16:25 +00:00
|
|
|
if (region->size() >= physical_page_array_pages_and_page_tables_count) {
|
|
|
|
found_region = region;
|
2022-08-25 14:46:13 +00:00
|
|
|
found_region_index = i;
|
|
|
|
break;
|
|
|
|
}
|
2021-07-08 01:50:05 +00:00
|
|
|
}
|
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
if (!found_region) {
|
|
|
|
dmesgln("MM: Need {} bytes for physical page management, but no memory region is large enough!", physical_page_array_pages_and_page_tables_count);
|
|
|
|
VERIFY_NOT_REACHED();
|
|
|
|
}
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
VERIFY(global_data.system_memory_info.physical_pages >= physical_page_array_pages_and_page_tables_count);
|
|
|
|
global_data.system_memory_info.physical_pages -= physical_page_array_pages_and_page_tables_count;
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
if (found_region->size() == physical_page_array_pages_and_page_tables_count) {
|
|
|
|
// We're stealing the entire region
|
|
|
|
global_data.physical_pages_region = global_data.physical_regions.take(*found_region_index);
|
|
|
|
} else {
|
|
|
|
global_data.physical_pages_region = found_region->try_take_pages_from_beginning(physical_page_array_pages_and_page_tables_count);
|
|
|
|
}
|
|
|
|
global_data.used_memory_ranges.append({ UsedMemoryRangeType::PhysicalPages, global_data.physical_pages_region->lower(), global_data.physical_pages_region->upper() });
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// Create the bare page directory. This is not a fully constructed page directory and merely contains the allocators!
|
|
|
|
m_kernel_page_directory = PageDirectory::must_create_kernel_page_directory();
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
{
|
|
|
|
// Carve out the whole page directory covering the kernel image to make MemoryManager::initialize_physical_pages() happy
|
|
|
|
FlatPtr start_of_range = ((FlatPtr)start_of_kernel_image & ~(FlatPtr)0x1fffff);
|
|
|
|
FlatPtr end_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
|
|
|
|
MUST(global_data.region_tree.place_specifically(*MUST(Region::create_unbacked()).leak_ptr(), VirtualRange { VirtualAddress(start_of_range), end_of_range - start_of_range }));
|
2021-07-08 01:50:05 +00:00
|
|
|
}
|
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// Allocate a virtual address range for our array
|
|
|
|
// This looks awkward, but it basically creates a dummy region to occupy the address range permanently.
|
|
|
|
auto& region = *MUST(Region::create_unbacked()).leak_ptr();
|
|
|
|
MUST(global_data.region_tree.place_anywhere(region, RandomizeVirtualAddress::No, physical_page_array_pages * PAGE_SIZE));
|
|
|
|
auto range = region.range();
|
|
|
|
|
|
|
|
// Now that we have our special m_physical_pages_region region with enough pages to hold the entire array
|
|
|
|
// try to map the entire region into kernel space so we always have it
|
|
|
|
// We can't use ensure_pte here because it would try to allocate a PhysicalPage and we don't have the array
|
|
|
|
// mapped yet so we can't create them
|
|
|
|
|
|
|
|
// Create page tables at the beginning of m_physical_pages_region, followed by the PhysicalPageEntry array
|
|
|
|
auto page_tables_base = global_data.physical_pages_region->lower();
|
|
|
|
auto physical_page_array_base = page_tables_base.offset(needed_page_table_count * PAGE_SIZE);
|
|
|
|
auto physical_page_array_current_page = physical_page_array_base.get();
|
|
|
|
auto virtual_page_array_base = range.base().get();
|
|
|
|
auto virtual_page_array_current_page = virtual_page_array_base;
|
|
|
|
for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
|
|
|
|
auto virtual_page_base_for_this_pt = virtual_page_array_current_page;
|
|
|
|
auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
|
|
|
|
auto* pt = reinterpret_cast<PageTableEntry*>(quickmap_page(pt_paddr));
|
|
|
|
__builtin_memset(pt, 0, PAGE_SIZE);
|
|
|
|
for (size_t pte_index = 0; pte_index < PAGE_SIZE / sizeof(PageTableEntry); pte_index++) {
|
|
|
|
auto& pte = pt[pte_index];
|
|
|
|
pte.set_physical_page_base(physical_page_array_current_page);
|
|
|
|
pte.set_user_allowed(false);
|
|
|
|
pte.set_writable(true);
|
|
|
|
if (Processor::current().has_nx())
|
|
|
|
pte.set_execute_disabled(false);
|
|
|
|
pte.set_global(true);
|
|
|
|
pte.set_present(true);
|
|
|
|
|
|
|
|
physical_page_array_current_page += PAGE_SIZE;
|
|
|
|
virtual_page_array_current_page += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
unquickmap_page();
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// Hook the page table into the kernel page directory
|
|
|
|
u32 page_directory_index = (virtual_page_base_for_this_pt >> 21) & 0x1ff;
|
|
|
|
auto* pd = reinterpret_cast<PageDirectoryEntry*>(quickmap_page(boot_pd_kernel));
|
|
|
|
PageDirectoryEntry& pde = pd[page_directory_index];
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
VERIFY(!pde.is_present()); // Nothing should be using this PD yet
|
2022-09-21 14:46:06 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// We can't use ensure_pte quite yet!
|
|
|
|
pde.set_page_table_base(pt_paddr.get());
|
|
|
|
pde.set_user_allowed(false);
|
|
|
|
pde.set_present(true);
|
|
|
|
pde.set_writable(true);
|
|
|
|
pde.set_global(true);
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
unquickmap_page();
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
flush_tlb_local(VirtualAddress(virtual_page_base_for_this_pt));
|
|
|
|
}
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// We now have the entire PhysicalPageEntry array mapped!
|
|
|
|
m_physical_page_entries = (PhysicalPageEntry*)range.base().get();
|
|
|
|
for (size_t i = 0; i < m_physical_page_entries_count; i++)
|
|
|
|
new (&m_physical_page_entries[i]) PageTableEntry();
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// Now we should be able to allocate PhysicalPage instances,
|
|
|
|
// so finish setting up the kernel page directory
|
|
|
|
m_kernel_page_directory->allocate_kernel_directory();
|
Kernel: Remove redundant hash map of page tables in PageDirectory
The purpose of the PageDirectory::m_page_tables map was really just
to act as ref-counting storage for PhysicalPage objects that were
being used for the directory's page tables.
However, this was basically redundant, since we can find the physical
address of each page table from the page directory, and we can find the
PhysicalPage object from MemoryManager::get_physical_page_entry().
So if we just manually ref() and unref() the pages when they go in and
out of the directory, we no longer need PageDirectory::m_page_tables!
Not only does this remove a bunch of kmalloc() traffic, it also solves
a race condition that would occur when lazily adding a new page table
to a directory:
Previously, when MemoryManager::ensure_pte() would call HashMap::set()
to insert the new page table into m_page_tables, if the HashMap had to
grow its internal storage, it would call kmalloc(). If that kmalloc()
would need to perform heap expansion, it would end up calling
ensure_pte() again, which would clobber the page directory mapping used
by the outer invocation of ensure_pte().
The net result of the above bug would be that any invocation of
MemoryManager::ensure_pte() could erroneously return a pointer into
a kernel page table instead of the correct one!
This whole problem goes away when we remove the HashMap, as ensure_pte()
no longer does anything that allocates from the heap.
2022-01-10 15:00:46 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// Now create legit PhysicalPage objects for the page tables we created.
|
|
|
|
virtual_page_array_current_page = virtual_page_array_base;
|
|
|
|
for (size_t pt_index = 0; pt_index < needed_page_table_count; pt_index++) {
|
|
|
|
VERIFY(virtual_page_array_current_page <= range.end().get());
|
|
|
|
auto pt_paddr = page_tables_base.offset(pt_index * PAGE_SIZE);
|
|
|
|
auto physical_page_index = PhysicalAddress::physical_page_index(pt_paddr.get());
|
|
|
|
auto& physical_page_entry = m_physical_page_entries[physical_page_index];
|
2024-05-11 15:15:51 +00:00
|
|
|
auto physical_page = adopt_lock_ref(*new (&physical_page_entry.allocated.physical_page) PhysicalRAMPage(MayReturnToFreeList::No));
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// NOTE: This leaked ref is matched by the unref in MemoryManager::release_pte()
|
|
|
|
(void)physical_page.leak_ref();
|
2021-07-08 01:50:05 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
virtual_page_array_current_page += (PAGE_SIZE / sizeof(PageTableEntry)) * PAGE_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
dmesgln("MM: Physical page entries: {}", range);
|
|
|
|
});
|
2021-07-08 01:50:05 +00:00
|
|
|
}
|
|
|
|
|
2023-12-29 00:36:39 +00:00
|
|
|
#ifdef HAS_ADDRESS_SANITIZER
|
|
|
|
void MemoryManager::initialize_kasan_shadow_memory()
|
|
|
|
{
|
|
|
|
m_global_data.with([&](auto& global_data) {
|
|
|
|
// We map every 8 bytes of normal memory to 1 byte of shadow memory, so we need a 1/9 of total memory for the shadow memory.
|
|
|
|
auto virtual_range = global_data.region_tree.total_range();
|
|
|
|
auto shadow_range_size = MUST(page_round_up(ceil_div(virtual_range.size(), 9ul)));
|
|
|
|
dbgln("MM: Reserving {} bytes for KASAN shadow memory", shadow_range_size);
|
|
|
|
|
|
|
|
auto vmobject = MUST(AnonymousVMObject::try_create_with_size(shadow_range_size, AllocationStrategy::AllocateNow));
|
|
|
|
auto* shadow_region = MUST(Region::create_unplaced(move(vmobject), 0, {}, Memory::Region::Access::ReadWrite)).leak_ptr();
|
|
|
|
auto shadow_range = VirtualRange { virtual_range.base().offset(virtual_range.size() - shadow_range_size), shadow_range_size };
|
|
|
|
MUST(global_data.region_tree.place_specifically(*shadow_region, shadow_range));
|
|
|
|
MUST(shadow_region->map(kernel_page_directory()));
|
|
|
|
|
|
|
|
AddressSanitizer::init(shadow_region->vaddr().get());
|
|
|
|
});
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2021-07-08 01:50:05 +00:00
|
|
|
PhysicalPageEntry& MemoryManager::get_physical_page_entry(PhysicalAddress physical_address)
|
|
|
|
{
|
|
|
|
auto physical_page_entry_index = PhysicalAddress::physical_page_index(physical_address.get());
|
|
|
|
VERIFY(physical_page_entry_index < m_physical_page_entries_count);
|
|
|
|
return m_physical_page_entries[physical_page_entry_index];
|
|
|
|
}
|
|
|
|
|
2024-05-11 15:15:51 +00:00
|
|
|
PhysicalAddress MemoryManager::get_physical_address(PhysicalRAMPage const& physical_page)
|
2021-07-08 01:50:05 +00:00
|
|
|
{
|
2021-07-12 20:52:17 +00:00
|
|
|
PhysicalPageEntry const& physical_page_entry = *reinterpret_cast<PhysicalPageEntry const*>((u8 const*)&physical_page - __builtin_offsetof(PhysicalPageEntry, allocated.physical_page));
|
2021-07-08 01:50:05 +00:00
|
|
|
size_t physical_page_entry_index = &physical_page_entry - m_physical_page_entries;
|
|
|
|
VERIFY(physical_page_entry_index < m_physical_page_entries_count);
|
|
|
|
return PhysicalAddress((PhysicalPtr)physical_page_entry_index * PAGE_SIZE);
|
2018-10-17 21:13:55 +00:00
|
|
|
}
|
|
|
|
|
2020-10-31 23:19:18 +00:00
|
|
|
PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress vaddr)
|
2020-02-21 12:05:39 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2021-08-29 18:10:24 +00:00
|
|
|
VERIFY(page_directory.get_lock().is_locked_by_current_processor());
|
2021-07-17 00:42:59 +00:00
|
|
|
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
|
2020-02-21 12:05:39 +00:00
|
|
|
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
|
|
|
|
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
|
|
|
|
|
|
|
|
auto* pd = quickmap_pd(const_cast<PageDirectory&>(page_directory), page_directory_table_index);
|
2021-07-14 11:31:21 +00:00
|
|
|
PageDirectoryEntry const& pde = pd[page_directory_index];
|
2020-02-21 12:05:39 +00:00
|
|
|
if (!pde.is_present())
|
|
|
|
return nullptr;
|
|
|
|
|
2020-03-08 09:36:51 +00:00
|
|
|
return &quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()))[page_table_index];
|
2020-02-21 12:05:39 +00:00
|
|
|
}
|
|
|
|
|
2020-09-01 22:10:54 +00:00
|
|
|
PageTableEntry* MemoryManager::ensure_pte(PageDirectory& page_directory, VirtualAddress vaddr)
|
2018-10-17 21:13:55 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2021-08-29 18:10:24 +00:00
|
|
|
VERIFY(page_directory.get_lock().is_locked_by_current_processor());
|
2021-07-17 00:42:59 +00:00
|
|
|
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
|
2019-12-25 10:22:16 +00:00
|
|
|
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
|
|
|
|
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
|
2018-10-17 21:13:55 +00:00
|
|
|
|
2020-01-17 18:59:20 +00:00
|
|
|
auto* pd = quickmap_pd(page_directory, page_directory_table_index);
|
Kernel: Remove redundant hash map of page tables in PageDirectory
The purpose of the PageDirectory::m_page_tables map was really just
to act as ref-counting storage for PhysicalPage objects that were
being used for the directory's page tables.
However, this was basically redundant, since we can find the physical
address of each page table from the page directory, and we can find the
PhysicalPage object from MemoryManager::get_physical_page_entry().
So if we just manually ref() and unref() the pages when they go in and
out of the directory, we no longer need PageDirectory::m_page_tables!
Not only does this remove a bunch of kmalloc() traffic, it also solves
a race condition that would occur when lazily adding a new page table
to a directory:
Previously, when MemoryManager::ensure_pte() would call HashMap::set()
to insert the new page table into m_page_tables, if the HashMap had to
grow its internal storage, it would call kmalloc(). If that kmalloc()
would need to perform heap expansion, it would end up calling
ensure_pte() again, which would clobber the page directory mapping used
by the outer invocation of ensure_pte().
The net result of the above bug would be that any invocation of
MemoryManager::ensure_pte() could erroneously return a pointer into
a kernel page table instead of the correct one!
This whole problem goes away when we remove the HashMap, as ensure_pte()
no longer does anything that allocates from the heap.
2022-01-10 15:00:46 +00:00
|
|
|
auto& pde = pd[page_directory_index];
|
|
|
|
if (pde.is_present())
|
|
|
|
return &quickmap_pt(PhysicalAddress(pde.page_table_base()))[page_table_index];
|
|
|
|
|
|
|
|
bool did_purge = false;
|
2022-07-14 12:27:22 +00:00
|
|
|
auto page_table_or_error = allocate_physical_page(ShouldZeroFill::Yes, &did_purge);
|
2022-01-28 14:36:53 +00:00
|
|
|
if (page_table_or_error.is_error()) {
|
Kernel: Remove redundant hash map of page tables in PageDirectory
The purpose of the PageDirectory::m_page_tables map was really just
to act as ref-counting storage for PhysicalPage objects that were
being used for the directory's page tables.
However, this was basically redundant, since we can find the physical
address of each page table from the page directory, and we can find the
PhysicalPage object from MemoryManager::get_physical_page_entry().
So if we just manually ref() and unref() the pages when they go in and
out of the directory, we no longer need PageDirectory::m_page_tables!
Not only does this remove a bunch of kmalloc() traffic, it also solves
a race condition that would occur when lazily adding a new page table
to a directory:
Previously, when MemoryManager::ensure_pte() would call HashMap::set()
to insert the new page table into m_page_tables, if the HashMap had to
grow its internal storage, it would call kmalloc(). If that kmalloc()
would need to perform heap expansion, it would end up calling
ensure_pte() again, which would clobber the page directory mapping used
by the outer invocation of ensure_pte().
The net result of the above bug would be that any invocation of
MemoryManager::ensure_pte() could erroneously return a pointer into
a kernel page table instead of the correct one!
This whole problem goes away when we remove the HashMap, as ensure_pte()
no longer does anything that allocates from the heap.
2022-01-10 15:00:46 +00:00
|
|
|
dbgln("MM: Unable to allocate page table to map {}", vaddr);
|
|
|
|
return nullptr;
|
|
|
|
}
|
2022-01-28 14:36:53 +00:00
|
|
|
auto page_table = page_table_or_error.release_value();
|
Kernel: Remove redundant hash map of page tables in PageDirectory
The purpose of the PageDirectory::m_page_tables map was really just
to act as ref-counting storage for PhysicalPage objects that were
being used for the directory's page tables.
However, this was basically redundant, since we can find the physical
address of each page table from the page directory, and we can find the
PhysicalPage object from MemoryManager::get_physical_page_entry().
So if we just manually ref() and unref() the pages when they go in and
out of the directory, we no longer need PageDirectory::m_page_tables!
Not only does this remove a bunch of kmalloc() traffic, it also solves
a race condition that would occur when lazily adding a new page table
to a directory:
Previously, when MemoryManager::ensure_pte() would call HashMap::set()
to insert the new page table into m_page_tables, if the HashMap had to
grow its internal storage, it would call kmalloc(). If that kmalloc()
would need to perform heap expansion, it would end up calling
ensure_pte() again, which would clobber the page directory mapping used
by the outer invocation of ensure_pte().
The net result of the above bug would be that any invocation of
MemoryManager::ensure_pte() could erroneously return a pointer into
a kernel page table instead of the correct one!
This whole problem goes away when we remove the HashMap, as ensure_pte()
no longer does anything that allocates from the heap.
2022-01-10 15:00:46 +00:00
|
|
|
if (did_purge) {
|
|
|
|
// If any memory had to be purged, ensure_pte may have been called as part
|
|
|
|
// of the purging process. So we need to re-map the pd in this case to ensure
|
|
|
|
// we're writing to the correct underlying physical page
|
|
|
|
pd = quickmap_pd(page_directory, page_directory_table_index);
|
|
|
|
VERIFY(&pde == &pd[page_directory_index]); // Sanity check
|
|
|
|
|
|
|
|
VERIFY(!pde.is_present()); // Should have not changed
|
2018-10-17 21:13:55 +00:00
|
|
|
}
|
Kernel: Remove redundant hash map of page tables in PageDirectory
The purpose of the PageDirectory::m_page_tables map was really just
to act as ref-counting storage for PhysicalPage objects that were
being used for the directory's page tables.
However, this was basically redundant, since we can find the physical
address of each page table from the page directory, and we can find the
PhysicalPage object from MemoryManager::get_physical_page_entry().
So if we just manually ref() and unref() the pages when they go in and
out of the directory, we no longer need PageDirectory::m_page_tables!
Not only does this remove a bunch of kmalloc() traffic, it also solves
a race condition that would occur when lazily adding a new page table
to a directory:
Previously, when MemoryManager::ensure_pte() would call HashMap::set()
to insert the new page table into m_page_tables, if the HashMap had to
grow its internal storage, it would call kmalloc(). If that kmalloc()
would need to perform heap expansion, it would end up calling
ensure_pte() again, which would clobber the page directory mapping used
by the outer invocation of ensure_pte().
The net result of the above bug would be that any invocation of
MemoryManager::ensure_pte() could erroneously return a pointer into
a kernel page table instead of the correct one!
This whole problem goes away when we remove the HashMap, as ensure_pte()
no longer does anything that allocates from the heap.
2022-01-10 15:00:46 +00:00
|
|
|
pde.set_page_table_base(page_table->paddr().get());
|
|
|
|
pde.set_user_allowed(true);
|
|
|
|
pde.set_present(true);
|
|
|
|
pde.set_writable(true);
|
|
|
|
pde.set_global(&page_directory == m_kernel_page_directory.ptr());
|
2020-01-17 18:59:20 +00:00
|
|
|
|
Kernel: Remove redundant hash map of page tables in PageDirectory
The purpose of the PageDirectory::m_page_tables map was really just
to act as ref-counting storage for PhysicalPage objects that were
being used for the directory's page tables.
However, this was basically redundant, since we can find the physical
address of each page table from the page directory, and we can find the
PhysicalPage object from MemoryManager::get_physical_page_entry().
So if we just manually ref() and unref() the pages when they go in and
out of the directory, we no longer need PageDirectory::m_page_tables!
Not only does this remove a bunch of kmalloc() traffic, it also solves
a race condition that would occur when lazily adding a new page table
to a directory:
Previously, when MemoryManager::ensure_pte() would call HashMap::set()
to insert the new page table into m_page_tables, if the HashMap had to
grow its internal storage, it would call kmalloc(). If that kmalloc()
would need to perform heap expansion, it would end up calling
ensure_pte() again, which would clobber the page directory mapping used
by the outer invocation of ensure_pte().
The net result of the above bug would be that any invocation of
MemoryManager::ensure_pte() could erroneously return a pointer into
a kernel page table instead of the correct one!
This whole problem goes away when we remove the HashMap, as ensure_pte()
no longer does anything that allocates from the heap.
2022-01-10 15:00:46 +00:00
|
|
|
// NOTE: This leaked ref is matched by the unref in MemoryManager::release_pte()
|
|
|
|
(void)page_table.leak_ref();
|
|
|
|
|
|
|
|
return &quickmap_pt(PhysicalAddress(pde.page_table_base()))[page_table_index];
|
2018-10-17 21:13:55 +00:00
|
|
|
}
|
|
|
|
|
Kernel: Remove redundant hash map of page tables in PageDirectory
The purpose of the PageDirectory::m_page_tables map was really just
to act as ref-counting storage for PhysicalPage objects that were
being used for the directory's page tables.
However, this was basically redundant, since we can find the physical
address of each page table from the page directory, and we can find the
PhysicalPage object from MemoryManager::get_physical_page_entry().
So if we just manually ref() and unref() the pages when they go in and
out of the directory, we no longer need PageDirectory::m_page_tables!
Not only does this remove a bunch of kmalloc() traffic, it also solves
a race condition that would occur when lazily adding a new page table
to a directory:
Previously, when MemoryManager::ensure_pte() would call HashMap::set()
to insert the new page table into m_page_tables, if the HashMap had to
grow its internal storage, it would call kmalloc(). If that kmalloc()
would need to perform heap expansion, it would end up calling
ensure_pte() again, which would clobber the page directory mapping used
by the outer invocation of ensure_pte().
The net result of the above bug would be that any invocation of
MemoryManager::ensure_pte() could erroneously return a pointer into
a kernel page table instead of the correct one!
This whole problem goes away when we remove the HashMap, as ensure_pte()
no longer does anything that allocates from the heap.
2022-01-10 15:00:46 +00:00
|
|
|
void MemoryManager::release_pte(PageDirectory& page_directory, VirtualAddress vaddr, IsLastPTERelease is_last_pte_release)
|
2020-08-28 03:29:17 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2021-08-29 18:10:24 +00:00
|
|
|
VERIFY(page_directory.get_lock().is_locked_by_current_processor());
|
2021-07-17 00:42:59 +00:00
|
|
|
u32 page_directory_table_index = (vaddr.get() >> 30) & 0x1ff;
|
2020-08-28 03:29:17 +00:00
|
|
|
u32 page_directory_index = (vaddr.get() >> 21) & 0x1ff;
|
|
|
|
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
|
|
|
|
|
|
|
|
auto* pd = quickmap_pd(page_directory, page_directory_table_index);
|
|
|
|
PageDirectoryEntry& pde = pd[page_directory_index];
|
|
|
|
if (pde.is_present()) {
|
|
|
|
auto* page_table = quickmap_pt(PhysicalAddress((FlatPtr)pde.page_table_base()));
|
|
|
|
auto& pte = page_table[page_table_index];
|
|
|
|
pte.clear();
|
|
|
|
|
2021-12-19 00:15:12 +00:00
|
|
|
if (is_last_pte_release == IsLastPTERelease::Yes || page_table_index == 0x1ff) {
|
2020-08-28 03:29:17 +00:00
|
|
|
// If this is the last PTE in a region or the last PTE in a page table then
|
|
|
|
// check if we can also release the page table
|
|
|
|
bool all_clear = true;
|
|
|
|
for (u32 i = 0; i <= 0x1ff; i++) {
|
|
|
|
if (!page_table[i].is_null()) {
|
|
|
|
all_clear = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (all_clear) {
|
Kernel: Remove redundant hash map of page tables in PageDirectory
The purpose of the PageDirectory::m_page_tables map was really just
to act as ref-counting storage for PhysicalPage objects that were
being used for the directory's page tables.
However, this was basically redundant, since we can find the physical
address of each page table from the page directory, and we can find the
PhysicalPage object from MemoryManager::get_physical_page_entry().
So if we just manually ref() and unref() the pages when they go in and
out of the directory, we no longer need PageDirectory::m_page_tables!
Not only does this remove a bunch of kmalloc() traffic, it also solves
a race condition that would occur when lazily adding a new page table
to a directory:
Previously, when MemoryManager::ensure_pte() would call HashMap::set()
to insert the new page table into m_page_tables, if the HashMap had to
grow its internal storage, it would call kmalloc(). If that kmalloc()
would need to perform heap expansion, it would end up calling
ensure_pte() again, which would clobber the page directory mapping used
by the outer invocation of ensure_pte().
The net result of the above bug would be that any invocation of
MemoryManager::ensure_pte() could erroneously return a pointer into
a kernel page table instead of the correct one!
This whole problem goes away when we remove the HashMap, as ensure_pte()
no longer does anything that allocates from the heap.
2022-01-10 15:00:46 +00:00
|
|
|
get_physical_page_entry(PhysicalAddress { pde.page_table_base() }).allocated.physical_page.unref();
|
2020-08-28 03:29:17 +00:00
|
|
|
pde.clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-02-19 17:41:50 +00:00
|
|
|
UNMAP_AFTER_INIT void MemoryManager::initialize(u32 cpu)
|
2018-10-17 21:13:55 +00:00
|
|
|
{
|
2023-01-08 04:31:43 +00:00
|
|
|
dmesgln("Initialize MMU");
|
2021-07-27 12:30:26 +00:00
|
|
|
ProcessorSpecific<MemoryManagerData>::initialize();
|
2020-06-28 22:04:35 +00:00
|
|
|
|
2020-08-29 22:41:30 +00:00
|
|
|
if (cpu == 0) {
|
2021-07-08 01:50:05 +00:00
|
|
|
new MemoryManager;
|
2020-08-29 22:41:30 +00:00
|
|
|
kmalloc_enable_expand();
|
|
|
|
}
|
2018-10-17 21:13:55 +00:00
|
|
|
}
|
|
|
|
|
2021-08-06 11:57:39 +00:00
|
|
|
Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr)
|
2019-08-06 08:31:20 +00:00
|
|
|
{
|
2022-08-23 15:58:05 +00:00
|
|
|
return space.find_region_containing({ vaddr, 1 });
|
2021-07-18 16:31:28 +00:00
|
|
|
}
|
|
|
|
|
2022-08-23 15:58:05 +00:00
|
|
|
void MemoryManager::validate_syscall_preconditions(Process& process, RegisterState const& regs)
|
2021-07-18 16:31:28 +00:00
|
|
|
{
|
2022-08-23 15:58:05 +00:00
|
|
|
bool should_crash = false;
|
|
|
|
char const* crash_description = nullptr;
|
|
|
|
int crash_signal = 0;
|
|
|
|
|
|
|
|
auto unlock_and_handle_crash = [&](char const* description, int signal) {
|
|
|
|
should_crash = true;
|
|
|
|
crash_description = description;
|
|
|
|
crash_signal = signal;
|
2021-07-18 16:31:28 +00:00
|
|
|
};
|
|
|
|
|
2022-08-23 15:58:05 +00:00
|
|
|
process.address_space().with([&](auto& space) -> void {
|
2021-07-18 16:31:28 +00:00
|
|
|
VirtualAddress userspace_sp = VirtualAddress { regs.userspace_sp() };
|
2022-08-23 15:58:05 +00:00
|
|
|
if (!MM.validate_user_stack(*space, userspace_sp)) {
|
2021-09-03 03:45:01 +00:00
|
|
|
dbgln("Invalid stack pointer: {}", userspace_sp);
|
2021-11-29 23:07:59 +00:00
|
|
|
return unlock_and_handle_crash("Bad stack on syscall entry", SIGSEGV);
|
2021-07-18 16:31:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
VirtualAddress ip = VirtualAddress { regs.ip() };
|
2022-08-23 15:58:05 +00:00
|
|
|
auto* calling_region = MM.find_user_region_from_vaddr(*space, ip);
|
2021-07-18 16:31:28 +00:00
|
|
|
if (!calling_region) {
|
|
|
|
dbgln("Syscall from {:p} which has no associated region", ip);
|
2021-11-29 23:07:59 +00:00
|
|
|
return unlock_and_handle_crash("Syscall from unknown region", SIGSEGV);
|
2021-07-18 16:31:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (calling_region->is_writable()) {
|
|
|
|
dbgln("Syscall from writable memory at {:p}", ip);
|
2021-11-29 23:07:59 +00:00
|
|
|
return unlock_and_handle_crash("Syscall from writable memory", SIGSEGV);
|
2021-07-18 16:31:28 +00:00
|
|
|
}
|
|
|
|
|
2022-08-23 15:58:05 +00:00
|
|
|
if (space->enforces_syscall_regions() && !calling_region->is_syscall_region()) {
|
2021-07-18 16:31:28 +00:00
|
|
|
dbgln("Syscall from non-syscall region");
|
2021-11-29 23:07:59 +00:00
|
|
|
return unlock_and_handle_crash("Syscall from non-syscall region", SIGSEGV);
|
2021-07-18 16:31:28 +00:00
|
|
|
}
|
2022-08-23 15:58:05 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
if (should_crash) {
|
|
|
|
handle_crash(regs, crash_description, crash_signal);
|
2021-07-18 16:31:28 +00:00
|
|
|
}
|
2019-01-25 00:39:15 +00:00
|
|
|
}
|
|
|
|
|
2021-07-14 11:31:21 +00:00
|
|
|
PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
|
2018-10-18 11:05:00 +00:00
|
|
|
{
|
2022-01-28 14:04:34 +00:00
|
|
|
auto faulted_in_range = [&fault](auto const* start, auto const* end) {
|
|
|
|
return fault.vaddr() >= VirtualAddress { start } && fault.vaddr() < VirtualAddress { end };
|
|
|
|
};
|
|
|
|
|
2022-07-11 16:46:06 +00:00
|
|
|
if (faulted_in_range(&start_of_ro_after_init, &end_of_ro_after_init)) {
|
|
|
|
dbgln("Attempt to write into READONLY_AFTER_INIT section");
|
|
|
|
return PageFaultResponse::ShouldCrash;
|
|
|
|
}
|
2022-01-28 14:04:34 +00:00
|
|
|
|
|
|
|
if (faulted_in_range(&start_of_unmap_after_init, &end_of_unmap_after_init)) {
|
|
|
|
auto const* kernel_symbol = symbolicate_kernel_address(fault.vaddr().get());
|
2023-05-05 20:48:55 +00:00
|
|
|
dbgln("Attempt to access UNMAP_AFTER_INIT section ({}: {})", fault.vaddr(), kernel_symbol ? kernel_symbol->name : "(Unknown)");
|
2022-07-11 16:46:06 +00:00
|
|
|
return PageFaultResponse::ShouldCrash;
|
2022-01-28 14:04:34 +00:00
|
|
|
}
|
|
|
|
|
2022-07-11 16:46:06 +00:00
|
|
|
if (faulted_in_range(&start_of_kernel_ksyms, &end_of_kernel_ksyms)) {
|
|
|
|
dbgln("Attempt to access KSYMS section");
|
|
|
|
return PageFaultResponse::ShouldCrash;
|
|
|
|
}
|
2022-01-28 14:04:34 +00:00
|
|
|
|
2021-08-22 10:21:31 +00:00
|
|
|
if (Processor::current_in_irq()) {
|
2021-01-10 15:21:56 +00:00
|
|
|
dbgln("CPU[{}] BUG! Page fault while handling IRQ! code={}, vaddr={}, irq level: {}",
|
2021-08-22 10:37:50 +00:00
|
|
|
Processor::current_id(), fault.code(), fault.vaddr(), Processor::current_in_irq());
|
2020-02-23 09:46:16 +00:00
|
|
|
dump_kernel_regions();
|
2020-06-27 19:42:28 +00:00
|
|
|
return PageFaultResponse::ShouldCrash;
|
2020-02-23 09:46:16 +00:00
|
|
|
}
|
2021-08-22 10:37:50 +00:00
|
|
|
dbgln_if(PAGE_FAULT_DEBUG, "MM: CPU[{}] handle_page_fault({:#04x}) at {}", Processor::current_id(), fault.code(), fault.vaddr());
|
2023-04-05 22:11:12 +00:00
|
|
|
|
|
|
|
// The faulting region may be unmapped concurrently to handling this page fault, and since
|
|
|
|
// regions are singly-owned it would usually result in the region being immediately
|
|
|
|
// de-allocated. To ensure the region is not de-allocated while we're still handling the
|
|
|
|
// fault we increase a page fault counter on the region, and the region will refrain from
|
|
|
|
// de-allocating itself until the counter reaches zero. (Since unmapping the region also
|
|
|
|
// includes removing it from the region tree while holding the address space spinlock, and
|
|
|
|
// because we increment the counter while still holding the spinlock it is guaranteed that
|
|
|
|
// we always increment the counter before it gets a chance to be deleted)
|
|
|
|
Region* region = nullptr;
|
|
|
|
if (is_user_address(fault.vaddr())) {
|
|
|
|
auto page_directory = PageDirectory::find_current();
|
|
|
|
if (!page_directory)
|
|
|
|
return PageFaultResponse::ShouldCrash;
|
|
|
|
auto* process = page_directory->process();
|
|
|
|
VERIFY(process);
|
|
|
|
region = process->address_space().with([&](auto& space) -> Region* {
|
|
|
|
auto* region = find_user_region_from_vaddr(*space, fault.vaddr());
|
|
|
|
if (!region)
|
|
|
|
return nullptr;
|
|
|
|
region->start_handling_page_fault({});
|
|
|
|
return region;
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
region = MM.m_global_data.with([&](auto& global_data) -> Region* {
|
|
|
|
auto* region = global_data.region_tree.find_region_containing(fault.vaddr());
|
|
|
|
if (!region)
|
|
|
|
return nullptr;
|
|
|
|
region->start_handling_page_fault({});
|
|
|
|
return region;
|
|
|
|
});
|
2018-11-08 14:39:26 +00:00
|
|
|
}
|
2023-04-05 22:11:12 +00:00
|
|
|
if (!region)
|
|
|
|
return PageFaultResponse::ShouldCrash;
|
|
|
|
|
|
|
|
auto response = region->handle_fault(fault);
|
|
|
|
region->finish_handling_page_fault({});
|
|
|
|
return response;
|
2018-10-18 11:05:00 +00:00
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
2020-03-07 17:24:41 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(!(size % PAGE_SIZE));
|
2022-04-03 13:27:47 +00:00
|
|
|
OwnPtr<KString> name_kstring;
|
|
|
|
if (!name.is_null())
|
|
|
|
name_kstring = TRY(KString::try_create(name));
|
2021-09-05 23:36:14 +00:00
|
|
|
auto vmobject = TRY(AnonymousVMObject::try_create_physically_contiguous_with_size(size));
|
2022-04-03 13:27:47 +00:00
|
|
|
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
|
2022-08-25 14:46:13 +00:00
|
|
|
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
|
2022-04-03 13:27:47 +00:00
|
|
|
TRY(region->map(kernel_page_directory()));
|
|
|
|
return region;
|
2020-03-07 17:24:41 +00:00
|
|
|
}
|
|
|
|
|
2024-05-11 15:15:51 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access, RefPtr<Memory::PhysicalRAMPage>& dma_buffer_page)
|
2021-12-16 15:05:51 +00:00
|
|
|
{
|
2024-05-10 20:16:01 +00:00
|
|
|
auto page = TRY(allocate_physical_page());
|
|
|
|
dma_buffer_page = page;
|
2023-05-06 14:53:22 +00:00
|
|
|
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default)
|
2024-05-10 20:16:01 +00:00
|
|
|
return allocate_kernel_region_with_physical_pages({ &page, 1 }, name, access, Region::Cacheable::No);
|
2021-12-16 15:05:51 +00:00
|
|
|
}
|
|
|
|
|
2022-01-08 16:34:09 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_page(StringView name, Memory::Region::Access access)
|
|
|
|
{
|
2024-05-11 15:15:51 +00:00
|
|
|
RefPtr<Memory::PhysicalRAMPage> dma_buffer_page;
|
2022-01-08 16:34:09 +00:00
|
|
|
|
|
|
|
return allocate_dma_buffer_page(name, access, dma_buffer_page);
|
|
|
|
}
|
|
|
|
|
2024-05-11 15:15:51 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access, Vector<NonnullRefPtr<Memory::PhysicalRAMPage>>& dma_buffer_pages)
|
2021-12-16 15:05:51 +00:00
|
|
|
{
|
|
|
|
VERIFY(!(size % PAGE_SIZE));
|
2022-07-14 12:27:22 +00:00
|
|
|
dma_buffer_pages = TRY(allocate_contiguous_physical_pages(size));
|
2023-05-06 14:53:22 +00:00
|
|
|
// Do not enable Cache for this region as physical memory transfers are performed (Most architectures have this behavior by default)
|
2024-05-10 20:16:01 +00:00
|
|
|
return allocate_kernel_region_with_physical_pages(dma_buffer_pages, name, access, Region::Cacheable::No);
|
2021-12-16 15:05:51 +00:00
|
|
|
}
|
|
|
|
|
2022-01-08 16:34:09 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::allocate_dma_buffer_pages(size_t size, StringView name, Memory::Region::Access access)
|
|
|
|
{
|
|
|
|
VERIFY(!(size % PAGE_SIZE));
|
2024-05-11 15:15:51 +00:00
|
|
|
Vector<NonnullRefPtr<Memory::PhysicalRAMPage>> dma_buffer_pages;
|
2022-01-08 16:34:09 +00:00
|
|
|
|
|
|
|
return allocate_dma_buffer_pages(size, name, access, dma_buffer_pages);
|
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region(size_t size, StringView name, Region::Access access, AllocationStrategy strategy, Region::Cacheable cacheable)
|
2019-05-14 09:51:00 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(!(size % PAGE_SIZE));
|
2022-04-03 13:27:47 +00:00
|
|
|
OwnPtr<KString> name_kstring;
|
|
|
|
if (!name.is_null())
|
|
|
|
name_kstring = TRY(KString::try_create(name));
|
2021-09-05 23:36:14 +00:00
|
|
|
auto vmobject = TRY(AnonymousVMObject::try_create_with_size(size, strategy));
|
2022-04-03 13:27:47 +00:00
|
|
|
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
|
2022-08-25 14:46:13 +00:00
|
|
|
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
|
2022-04-03 13:27:47 +00:00
|
|
|
TRY(region->map(kernel_page_directory()));
|
|
|
|
return region;
|
2019-05-14 09:51:00 +00:00
|
|
|
}
|
|
|
|
|
2024-05-11 15:15:51 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_physical_pages(Span<NonnullRefPtr<PhysicalRAMPage>> pages, StringView name, Region::Access access, Region::Cacheable cacheable)
|
2024-05-10 20:16:01 +00:00
|
|
|
{
|
|
|
|
auto vmobject = TRY(AnonymousVMObject::try_create_with_physical_pages(pages));
|
|
|
|
OwnPtr<KString> name_kstring;
|
|
|
|
if (!name.is_null())
|
|
|
|
name_kstring = TRY(KString::try_create(name));
|
|
|
|
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
|
|
|
|
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, pages.size() * PAGE_SIZE, PAGE_SIZE); }));
|
|
|
|
TRY(region->map(kernel_page_directory()));
|
|
|
|
return region;
|
|
|
|
}
|
|
|
|
|
|
|
|
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_mmio_kernel_region(PhysicalAddress paddr, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
2019-07-19 15:01:16 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(!(size % PAGE_SIZE));
|
2024-05-10 20:16:01 +00:00
|
|
|
auto vmobject = TRY(MMIOVMObject::try_create_for_physical_range(paddr, size));
|
2021-09-06 17:24:54 +00:00
|
|
|
OwnPtr<KString> name_kstring;
|
|
|
|
if (!name.is_null())
|
|
|
|
name_kstring = TRY(KString::try_create(name));
|
2022-04-03 13:27:47 +00:00
|
|
|
auto region = TRY(Region::create_unplaced(move(vmobject), 0, move(name_kstring), access, cacheable));
|
2022-08-25 14:46:13 +00:00
|
|
|
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, PAGE_SIZE); }));
|
2024-05-10 20:16:01 +00:00
|
|
|
TRY(region->map(kernel_page_directory(), paddr));
|
2019-12-15 21:21:28 +00:00
|
|
|
return region;
|
|
|
|
}
|
|
|
|
|
2021-11-07 23:51:39 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
|
2020-03-01 14:55:27 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(!(size % PAGE_SIZE));
|
2022-04-03 13:27:47 +00:00
|
|
|
|
|
|
|
OwnPtr<KString> name_kstring;
|
|
|
|
if (!name.is_null())
|
|
|
|
name_kstring = TRY(KString::try_create(name));
|
|
|
|
|
|
|
|
auto region = TRY(Region::create_unplaced(vmobject, 0, move(name_kstring), access, cacheable));
|
2022-08-25 14:46:13 +00:00
|
|
|
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size); }));
|
2022-04-03 13:27:47 +00:00
|
|
|
TRY(region->map(kernel_page_directory()));
|
|
|
|
return region;
|
2020-03-01 14:55:27 +00:00
|
|
|
}
|
|
|
|
|
2022-07-14 12:27:22 +00:00
|
|
|
ErrorOr<CommittedPhysicalPageSet> MemoryManager::commit_physical_pages(size_t page_count)
|
2020-09-05 03:12:25 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(page_count > 0);
|
2022-08-27 16:00:57 +00:00
|
|
|
auto result = m_global_data.with([&](auto& global_data) -> ErrorOr<CommittedPhysicalPageSet> {
|
2022-08-25 14:46:13 +00:00
|
|
|
if (global_data.system_memory_info.physical_pages_uncommitted < page_count) {
|
|
|
|
dbgln("MM: Unable to commit {} pages, have only {}", page_count, global_data.system_memory_info.physical_pages_uncommitted);
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
2020-09-05 03:12:25 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
global_data.system_memory_info.physical_pages_uncommitted -= page_count;
|
|
|
|
global_data.system_memory_info.physical_pages_committed += page_count;
|
|
|
|
return CommittedPhysicalPageSet { {}, page_count };
|
|
|
|
});
|
2022-08-27 16:00:57 +00:00
|
|
|
if (result.is_error()) {
|
2022-11-02 20:26:02 +00:00
|
|
|
Process::for_each_ignoring_jails([&](Process const& process) {
|
2022-08-27 16:00:57 +00:00
|
|
|
size_t amount_resident = 0;
|
|
|
|
size_t amount_shared = 0;
|
|
|
|
size_t amount_virtual = 0;
|
|
|
|
process.address_space().with([&](auto& space) {
|
|
|
|
amount_resident = space->amount_resident();
|
|
|
|
amount_shared = space->amount_shared();
|
|
|
|
amount_virtual = space->amount_virtual();
|
|
|
|
});
|
2023-02-04 13:01:46 +00:00
|
|
|
process.name().with([&](auto& process_name) {
|
|
|
|
dbgln("{}({}) resident:{}, shared:{}, virtual:{}",
|
2023-07-17 15:34:19 +00:00
|
|
|
process_name.representable_view(),
|
2023-02-04 13:01:46 +00:00
|
|
|
process.pid(),
|
|
|
|
amount_resident / PAGE_SIZE,
|
|
|
|
amount_shared / PAGE_SIZE,
|
|
|
|
amount_virtual / PAGE_SIZE);
|
|
|
|
});
|
2022-08-27 16:00:57 +00:00
|
|
|
return IterationDecision::Continue;
|
|
|
|
});
|
|
|
|
}
|
|
|
|
return result;
|
2020-09-05 03:12:25 +00:00
|
|
|
}
|
|
|
|
|
2022-07-14 12:27:22 +00:00
|
|
|
void MemoryManager::uncommit_physical_pages(Badge<CommittedPhysicalPageSet>, size_t page_count)
|
2020-09-05 03:12:25 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(page_count > 0);
|
2021-08-04 20:49:13 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
m_global_data.with([&](auto& global_data) {
|
|
|
|
VERIFY(global_data.system_memory_info.physical_pages_committed >= page_count);
|
2020-09-05 03:12:25 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
global_data.system_memory_info.physical_pages_uncommitted += page_count;
|
|
|
|
global_data.system_memory_info.physical_pages_committed -= page_count;
|
|
|
|
});
|
2020-09-05 03:12:25 +00:00
|
|
|
}
|
|
|
|
|
2021-07-11 21:12:32 +00:00
|
|
|
void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
|
2019-06-11 11:13:02 +00:00
|
|
|
{
|
2022-08-25 14:46:13 +00:00
|
|
|
return m_global_data.with([&](auto& global_data) {
|
|
|
|
// Are we returning a user page?
|
|
|
|
for (auto& region : global_data.physical_regions) {
|
2023-03-06 16:16:25 +00:00
|
|
|
if (!region->contains(paddr))
|
2022-08-25 14:46:13 +00:00
|
|
|
continue;
|
2019-06-11 11:13:02 +00:00
|
|
|
|
2023-03-06 16:16:25 +00:00
|
|
|
region->return_page(paddr);
|
2022-08-25 14:46:13 +00:00
|
|
|
--global_data.system_memory_info.physical_pages_used;
|
2019-06-11 11:13:02 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
// Always return pages to the uncommitted pool. Pages that were
|
|
|
|
// committed and allocated are only freed upon request. Once
|
|
|
|
// returned there is no guarantee being able to get them back.
|
|
|
|
++global_data.system_memory_info.physical_pages_uncommitted;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
PANIC("MM: deallocate_physical_page couldn't figure out region for page @ {}", paddr);
|
|
|
|
});
|
2019-06-11 11:13:02 +00:00
|
|
|
}
|
|
|
|
|
2024-05-11 15:15:51 +00:00
|
|
|
RefPtr<PhysicalRAMPage> MemoryManager::find_free_physical_page(bool committed)
|
2018-11-08 11:59:16 +00:00
|
|
|
{
|
2024-05-11 15:15:51 +00:00
|
|
|
RefPtr<PhysicalRAMPage> page;
|
2022-08-25 14:46:13 +00:00
|
|
|
m_global_data.with([&](auto& global_data) {
|
|
|
|
if (committed) {
|
|
|
|
// Draw from the committed pages pool. We should always have these pages available
|
|
|
|
VERIFY(global_data.system_memory_info.physical_pages_committed > 0);
|
|
|
|
global_data.system_memory_info.physical_pages_committed--;
|
|
|
|
} else {
|
|
|
|
// We need to make sure we don't touch pages that we have committed to
|
|
|
|
if (global_data.system_memory_info.physical_pages_uncommitted == 0)
|
|
|
|
return;
|
|
|
|
global_data.system_memory_info.physical_pages_uncommitted--;
|
2020-09-05 03:12:25 +00:00
|
|
|
}
|
2022-08-25 14:46:13 +00:00
|
|
|
for (auto& region : global_data.physical_regions) {
|
2023-03-06 16:16:25 +00:00
|
|
|
page = region->take_free_page();
|
2022-08-25 14:46:13 +00:00
|
|
|
if (!page.is_null()) {
|
|
|
|
++global_data.system_memory_info.physical_pages_used;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
2022-12-07 00:13:34 +00:00
|
|
|
|
|
|
|
if (page.is_null())
|
|
|
|
dbgln("MM: couldn't find free physical page. Continuing...");
|
|
|
|
|
2019-11-08 21:39:29 +00:00
|
|
|
return page;
|
|
|
|
}
|
|
|
|
|
2024-05-11 15:15:51 +00:00
|
|
|
NonnullRefPtr<PhysicalRAMPage> MemoryManager::allocate_committed_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
|
2020-09-05 03:12:25 +00:00
|
|
|
{
|
2022-07-14 12:27:22 +00:00
|
|
|
auto page = find_free_physical_page(true);
|
2022-12-07 00:14:31 +00:00
|
|
|
VERIFY(page);
|
2020-09-05 03:12:25 +00:00
|
|
|
if (should_zero_fill == ShouldZeroFill::Yes) {
|
2022-08-22 13:23:32 +00:00
|
|
|
InterruptDisabler disabler;
|
2020-09-05 03:12:25 +00:00
|
|
|
auto* ptr = quickmap_page(*page);
|
|
|
|
memset(ptr, 0, PAGE_SIZE);
|
|
|
|
unquickmap_page();
|
|
|
|
}
|
|
|
|
return page.release_nonnull();
|
|
|
|
}
|
|
|
|
|
2024-05-11 15:15:51 +00:00
|
|
|
ErrorOr<NonnullRefPtr<PhysicalRAMPage>> MemoryManager::allocate_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
|
2019-11-08 21:39:29 +00:00
|
|
|
{
|
2024-05-11 15:15:51 +00:00
|
|
|
return m_global_data.with([&](auto&) -> ErrorOr<NonnullRefPtr<PhysicalRAMPage>> {
|
2022-08-25 14:46:13 +00:00
|
|
|
auto page = find_free_physical_page(false);
|
|
|
|
bool purged_pages = false;
|
|
|
|
|
|
|
|
if (!page) {
|
|
|
|
// We didn't have a single free physical page. Let's try to free something up!
|
|
|
|
// First, we look for a purgeable VMObject in the volatile state.
|
|
|
|
for_each_vmobject([&](auto& vmobject) {
|
|
|
|
if (!vmobject.is_anonymous())
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
auto& anonymous_vmobject = static_cast<AnonymousVMObject&>(vmobject);
|
|
|
|
if (!anonymous_vmobject.is_purgeable() || !anonymous_vmobject.is_volatile())
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
if (auto purged_page_count = anonymous_vmobject.purge()) {
|
|
|
|
dbgln("MM: Purge saved the day! Purged {} pages from AnonymousVMObject", purged_page_count);
|
|
|
|
page = find_free_physical_page(false);
|
|
|
|
purged_pages = true;
|
|
|
|
VERIFY(page);
|
|
|
|
return IterationDecision::Break;
|
|
|
|
}
|
2021-07-25 00:11:32 +00:00
|
|
|
return IterationDecision::Continue;
|
2022-08-25 14:46:13 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
if (!page) {
|
|
|
|
// Second, we look for a file-backed VMObject with clean pages.
|
|
|
|
for_each_vmobject([&](auto& vmobject) {
|
|
|
|
if (!vmobject.is_inode())
|
|
|
|
return IterationDecision::Continue;
|
|
|
|
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject);
|
|
|
|
if (auto released_page_count = inode_vmobject.try_release_clean_pages(1)) {
|
|
|
|
dbgln("MM: Clean inode release saved the day! Released {} pages from InodeVMObject", released_page_count);
|
|
|
|
page = find_free_physical_page(false);
|
|
|
|
VERIFY(page);
|
|
|
|
return IterationDecision::Break;
|
|
|
|
}
|
2022-08-14 19:46:08 +00:00
|
|
|
return IterationDecision::Continue;
|
2022-08-25 14:46:13 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
if (!page) {
|
|
|
|
dmesgln("MM: no physical pages available");
|
|
|
|
return ENOMEM;
|
|
|
|
}
|
2019-06-11 11:13:02 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
if (should_zero_fill == ShouldZeroFill::Yes) {
|
|
|
|
auto* ptr = quickmap_page(*page);
|
|
|
|
memset(ptr, 0, PAGE_SIZE);
|
|
|
|
unquickmap_page();
|
|
|
|
}
|
2019-06-11 11:13:02 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
if (did_purge)
|
|
|
|
*did_purge = purged_pages;
|
|
|
|
return page.release_nonnull();
|
|
|
|
});
|
2019-06-11 11:13:02 +00:00
|
|
|
}
|
|
|
|
|
2024-05-11 15:15:51 +00:00
|
|
|
ErrorOr<Vector<NonnullRefPtr<PhysicalRAMPage>>> MemoryManager::allocate_contiguous_physical_pages(size_t size)
|
2022-02-11 11:20:26 +00:00
|
|
|
{
|
|
|
|
VERIFY(!(size % PAGE_SIZE));
|
|
|
|
size_t page_count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
|
|
|
|
|
2024-05-11 15:15:51 +00:00
|
|
|
auto physical_pages = TRY(m_global_data.with([&](auto& global_data) -> ErrorOr<Vector<NonnullRefPtr<PhysicalRAMPage>>> {
|
2022-08-25 14:46:13 +00:00
|
|
|
// We need to make sure we don't touch pages that we have committed to
|
|
|
|
if (global_data.system_memory_info.physical_pages_uncommitted < page_count)
|
|
|
|
return ENOMEM;
|
|
|
|
|
|
|
|
for (auto& physical_region : global_data.physical_regions) {
|
2023-03-06 16:16:25 +00:00
|
|
|
auto physical_pages = physical_region->take_contiguous_free_pages(page_count);
|
2022-08-25 14:46:13 +00:00
|
|
|
if (!physical_pages.is_empty()) {
|
|
|
|
global_data.system_memory_info.physical_pages_uncommitted -= page_count;
|
|
|
|
global_data.system_memory_info.physical_pages_used += page_count;
|
|
|
|
return physical_pages;
|
2022-02-11 11:20:26 +00:00
|
|
|
}
|
|
|
|
}
|
2022-08-25 14:46:13 +00:00
|
|
|
dmesgln("MM: no contiguous physical pages available");
|
|
|
|
return ENOMEM;
|
|
|
|
}));
|
2022-02-11 11:20:26 +00:00
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
{
|
2024-05-10 20:16:01 +00:00
|
|
|
auto cleanup_region = TRY(MM.allocate_kernel_region_with_physical_pages(physical_pages, {}, Region::Access::Read | Region::Access::Write));
|
2022-08-25 14:46:13 +00:00
|
|
|
memset(cleanup_region->vaddr().as_ptr(), 0, PAGE_SIZE * page_count);
|
|
|
|
}
|
|
|
|
return physical_pages;
|
2022-02-11 11:20:26 +00:00
|
|
|
}
|
|
|
|
|
2021-09-06 15:11:33 +00:00
|
|
|
void MemoryManager::enter_process_address_space(Process& process)
|
2021-02-08 14:45:40 +00:00
|
|
|
{
|
2022-08-23 15:58:05 +00:00
|
|
|
process.address_space().with([](auto& space) {
|
|
|
|
enter_address_space(*space);
|
|
|
|
});
|
2021-02-08 14:45:40 +00:00
|
|
|
}
|
|
|
|
|
2021-09-06 15:11:33 +00:00
|
|
|
void MemoryManager::enter_address_space(AddressSpace& space)
|
2018-11-01 10:30:48 +00:00
|
|
|
{
|
2021-12-28 18:54:05 +00:00
|
|
|
auto* current_thread = Thread::current();
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(current_thread != nullptr);
|
2022-04-02 22:56:20 +00:00
|
|
|
activate_page_directory(space.page_directory(), current_thread);
|
2018-11-01 10:30:48 +00:00
|
|
|
}
|
|
|
|
|
2020-07-06 13:27:22 +00:00
|
|
|
void MemoryManager::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
|
2018-10-23 09:03:56 +00:00
|
|
|
{
|
2020-07-06 13:27:22 +00:00
|
|
|
Processor::flush_tlb_local(vaddr, page_count);
|
2018-10-23 09:03:56 +00:00
|
|
|
}
|
|
|
|
|
2021-07-14 11:31:21 +00:00
|
|
|
void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
|
2018-10-23 13:53:11 +00:00
|
|
|
{
|
2021-01-02 19:27:38 +00:00
|
|
|
Processor::flush_tlb(page_directory, vaddr, page_count);
|
2018-10-23 13:53:11 +00:00
|
|
|
}
|
|
|
|
|
2020-01-17 18:59:20 +00:00
|
|
|
PageDirectoryEntry* MemoryManager::quickmap_pd(PageDirectory& directory, size_t pdpt_index)
|
|
|
|
{
|
2022-08-22 12:41:08 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
|
|
|
|
|
|
|
VirtualAddress vaddr(KERNEL_QUICKMAP_PD_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
|
|
|
|
size_t pte_index = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
|
|
|
|
|
|
|
|
auto& pte = boot_pd_kernel_pt1023[pte_index];
|
2020-01-17 18:59:20 +00:00
|
|
|
auto pd_paddr = directory.m_directory_pages[pdpt_index]->paddr();
|
2021-07-07 03:35:15 +00:00
|
|
|
if (pte.physical_page_base() != pd_paddr.get()) {
|
2020-01-17 18:59:20 +00:00
|
|
|
pte.set_physical_page_base(pd_paddr.get());
|
|
|
|
pte.set_present(true);
|
|
|
|
pte.set_writable(true);
|
|
|
|
pte.set_user_allowed(false);
|
2022-08-22 12:41:08 +00:00
|
|
|
flush_tlb_local(vaddr);
|
2020-01-17 18:59:20 +00:00
|
|
|
}
|
2022-08-22 12:41:08 +00:00
|
|
|
return (PageDirectoryEntry*)vaddr.get();
|
2020-01-17 18:59:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
PageTableEntry* MemoryManager::quickmap_pt(PhysicalAddress pt_paddr)
|
|
|
|
{
|
2022-08-22 12:41:08 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
|
|
|
|
|
|
|
VirtualAddress vaddr(KERNEL_QUICKMAP_PT_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
|
|
|
|
size_t pte_index = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
|
|
|
|
|
|
|
|
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_index];
|
2021-07-07 03:35:15 +00:00
|
|
|
if (pte.physical_page_base() != pt_paddr.get()) {
|
2020-01-17 18:59:20 +00:00
|
|
|
pte.set_physical_page_base(pt_paddr.get());
|
|
|
|
pte.set_present(true);
|
|
|
|
pte.set_writable(true);
|
|
|
|
pte.set_user_allowed(false);
|
2022-08-22 12:41:08 +00:00
|
|
|
flush_tlb_local(vaddr);
|
2020-01-17 18:59:20 +00:00
|
|
|
}
|
2022-08-22 12:41:08 +00:00
|
|
|
return (PageTableEntry*)vaddr.get();
|
2020-01-17 18:59:20 +00:00
|
|
|
}
|
|
|
|
|
2021-07-08 01:50:05 +00:00
|
|
|
u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
|
2018-11-05 12:48:07 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2020-06-28 22:04:35 +00:00
|
|
|
auto& mm_data = get_data();
|
2022-08-23 19:42:30 +00:00
|
|
|
mm_data.m_quickmap_previous_interrupts_state = mm_data.m_quickmap_in_use.lock();
|
2020-01-17 21:18:56 +00:00
|
|
|
|
2021-08-22 10:37:50 +00:00
|
|
|
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
|
2021-07-08 01:50:05 +00:00
|
|
|
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
|
2020-06-28 22:04:35 +00:00
|
|
|
|
2021-07-18 12:47:32 +00:00
|
|
|
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
|
2021-07-08 01:50:05 +00:00
|
|
|
if (pte.physical_page_base() != physical_address.get()) {
|
|
|
|
pte.set_physical_page_base(physical_address.get());
|
2020-01-17 21:18:56 +00:00
|
|
|
pte.set_present(true);
|
|
|
|
pte.set_writable(true);
|
|
|
|
pte.set_user_allowed(false);
|
2020-07-06 15:11:52 +00:00
|
|
|
flush_tlb_local(vaddr);
|
2020-01-17 21:18:56 +00:00
|
|
|
}
|
2020-06-28 22:04:35 +00:00
|
|
|
return vaddr.as_ptr();
|
2018-11-05 12:48:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MemoryManager::unquickmap_page()
|
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_INTERRUPTS_DISABLED();
|
2020-06-28 22:04:35 +00:00
|
|
|
auto& mm_data = get_data();
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(mm_data.m_quickmap_in_use.is_locked());
|
2021-08-22 10:37:50 +00:00
|
|
|
VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::current_id() * PAGE_SIZE);
|
2021-07-08 01:50:05 +00:00
|
|
|
u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
|
2021-07-18 12:47:32 +00:00
|
|
|
auto& pte = ((PageTableEntry*)boot_pd_kernel_pt1023)[pte_idx];
|
2020-02-08 11:49:00 +00:00
|
|
|
pte.clear();
|
2020-07-06 15:11:52 +00:00
|
|
|
flush_tlb_local(vaddr);
|
2022-08-23 19:42:30 +00:00
|
|
|
mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_previous_interrupts_state);
|
2018-11-05 12:48:07 +00:00
|
|
|
}
|
|
|
|
|
2022-08-23 15:58:05 +00:00
|
|
|
bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const
|
2019-11-17 11:11:43 +00:00
|
|
|
{
|
2019-12-31 02:43:24 +00:00
|
|
|
if (!is_user_address(vaddr))
|
|
|
|
return false;
|
2021-07-18 15:53:37 +00:00
|
|
|
|
2022-08-23 15:58:05 +00:00
|
|
|
auto* region = find_user_region_from_vaddr(space, vaddr);
|
2021-02-14 00:25:22 +00:00
|
|
|
return region && region->is_user() && region->is_stack();
|
2019-11-17 11:11:43 +00:00
|
|
|
}
|
|
|
|
|
2022-01-15 16:01:53 +00:00
|
|
|
void MemoryManager::unregister_kernel_region(Region& region)
|
2018-11-08 21:24:02 +00:00
|
|
|
{
|
2022-01-15 16:01:53 +00:00
|
|
|
VERIFY(region.is_kernel());
|
2022-08-25 14:46:13 +00:00
|
|
|
m_global_data.with([&](auto& global_data) { global_data.region_tree.remove(region); });
|
2018-11-08 21:24:02 +00:00
|
|
|
}
|
2018-11-09 00:25:31 +00:00
|
|
|
|
2020-01-18 07:34:28 +00:00
|
|
|
void MemoryManager::dump_kernel_regions()
|
|
|
|
{
|
2021-02-12 15:20:21 +00:00
|
|
|
dbgln("Kernel regions:");
|
2021-12-28 18:54:05 +00:00
|
|
|
char const* addr_padding = " ";
|
2021-07-21 23:21:39 +00:00
|
|
|
dbgln("BEGIN{} END{} SIZE{} ACCESS NAME",
|
|
|
|
addr_padding, addr_padding, addr_padding);
|
2022-08-25 14:46:13 +00:00
|
|
|
m_global_data.with([&](auto& global_data) {
|
|
|
|
for (auto& region : global_data.region_tree.regions()) {
|
2022-08-23 10:28:04 +00:00
|
|
|
dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
|
|
|
|
region.vaddr().get(),
|
|
|
|
region.vaddr().offset(region.size() - 1).get(),
|
|
|
|
region.size(),
|
|
|
|
region.is_readable() ? 'R' : ' ',
|
|
|
|
region.is_writable() ? 'W' : ' ',
|
|
|
|
region.is_executable() ? 'X' : ' ',
|
|
|
|
region.is_shared() ? 'S' : ' ',
|
|
|
|
region.is_stack() ? 'T' : ' ',
|
|
|
|
region.is_syscall_region() ? 'C' : ' ',
|
|
|
|
region.name());
|
|
|
|
}
|
|
|
|
});
|
2020-01-18 07:34:28 +00:00
|
|
|
}
|
|
|
|
|
2021-03-11 12:03:40 +00:00
|
|
|
void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
|
|
|
|
{
|
2021-08-21 23:49:22 +00:00
|
|
|
SpinlockLocker page_lock(kernel_page_directory().get_lock());
|
2021-03-11 12:03:40 +00:00
|
|
|
auto* pte = ensure_pte(kernel_page_directory(), vaddr);
|
|
|
|
VERIFY(pte);
|
|
|
|
if (pte->is_writable() == writable)
|
|
|
|
return;
|
|
|
|
pte->set_writable(writable);
|
|
|
|
flush_tlb(&kernel_page_directory(), vaddr);
|
|
|
|
}
|
|
|
|
|
2021-08-04 20:49:13 +00:00
|
|
|
CommittedPhysicalPageSet::~CommittedPhysicalPageSet()
|
|
|
|
{
|
|
|
|
if (m_page_count)
|
2022-07-14 12:27:22 +00:00
|
|
|
MM.uncommit_physical_pages({}, m_page_count);
|
2021-08-04 20:49:13 +00:00
|
|
|
}
|
|
|
|
|
2024-05-11 15:15:51 +00:00
|
|
|
NonnullRefPtr<PhysicalRAMPage> CommittedPhysicalPageSet::take_one()
|
2021-08-04 20:49:13 +00:00
|
|
|
{
|
|
|
|
VERIFY(m_page_count > 0);
|
|
|
|
--m_page_count;
|
2022-07-14 12:27:22 +00:00
|
|
|
return MM.allocate_committed_physical_page({}, MemoryManager::ShouldZeroFill::Yes);
|
2021-08-04 20:49:13 +00:00
|
|
|
}
|
|
|
|
|
2021-08-05 15:14:13 +00:00
|
|
|
void CommittedPhysicalPageSet::uncommit_one()
|
|
|
|
{
|
|
|
|
VERIFY(m_page_count > 0);
|
|
|
|
--m_page_count;
|
2022-07-14 12:27:22 +00:00
|
|
|
MM.uncommit_physical_pages({}, 1);
|
2021-08-05 15:14:13 +00:00
|
|
|
}
|
|
|
|
|
2024-05-11 15:15:51 +00:00
|
|
|
void MemoryManager::copy_physical_page(PhysicalRAMPage& physical_page, u8 page_buffer[PAGE_SIZE])
|
2021-11-17 18:31:30 +00:00
|
|
|
{
|
|
|
|
auto* quickmapped_page = quickmap_page(physical_page);
|
|
|
|
memcpy(page_buffer, quickmapped_page, PAGE_SIZE);
|
|
|
|
unquickmap_page();
|
|
|
|
}
|
|
|
|
|
2022-04-05 10:37:11 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<Memory::Region>> MemoryManager::create_identity_mapped_region(PhysicalAddress address, size_t size)
|
|
|
|
{
|
|
|
|
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(address, size));
|
|
|
|
auto region = TRY(Memory::Region::create_unplaced(move(vmobject), 0, {}, Memory::Region::Access::ReadWriteExecute));
|
|
|
|
Memory::VirtualRange range { VirtualAddress { (FlatPtr)address.get() }, size };
|
|
|
|
region->m_range = range;
|
|
|
|
TRY(region->map(MM.kernel_page_directory()));
|
|
|
|
return region;
|
|
|
|
}
|
|
|
|
|
2022-04-05 10:40:31 +00:00
|
|
|
ErrorOr<NonnullOwnPtr<Region>> MemoryManager::allocate_unbacked_region_anywhere(size_t size, size_t alignment)
|
|
|
|
{
|
|
|
|
auto region = TRY(Region::create_unbacked());
|
2022-08-25 14:46:13 +00:00
|
|
|
TRY(m_global_data.with([&](auto& global_data) { return global_data.region_tree.place_anywhere(*region, RandomizeVirtualAddress::No, size, alignment); }));
|
2022-04-05 10:40:31 +00:00
|
|
|
return region;
|
|
|
|
}
|
|
|
|
|
2022-08-25 14:46:13 +00:00
|
|
|
MemoryManager::SystemMemoryInfo MemoryManager::get_system_memory_info()
|
|
|
|
{
|
|
|
|
return m_global_data.with([&](auto& global_data) {
|
|
|
|
auto physical_pages_unused = global_data.system_memory_info.physical_pages_committed + global_data.system_memory_info.physical_pages_uncommitted;
|
|
|
|
VERIFY(global_data.system_memory_info.physical_pages == (global_data.system_memory_info.physical_pages_used + physical_pages_unused));
|
|
|
|
return global_data.system_memory_info;
|
|
|
|
});
|
|
|
|
}
|
2020-02-16 00:27:42 +00:00
|
|
|
}
|