Kernel: Rename Memory::Space => Memory::AddressSpace

This commit is contained in:
Andreas Kling 2021-08-06 13:57:39 +02:00
parent cd5faf4e42
commit b7476d7a1b
10 changed files with 58 additions and 58 deletions

View file

@ -131,6 +131,7 @@ set(KERNEL_SOURCES
KLexicalPath.cpp
KString.cpp
KSyms.cpp
Memory/AddressSpace.cpp
Memory/AnonymousVMObject.cpp
Memory/InodeVMObject.cpp
Memory/MemoryManager.cpp
@ -144,7 +145,6 @@ set(KERNEL_SOURCES
Memory/RingBuffer.cpp
Memory/ScatterGatherList.cpp
Memory/SharedInodeVMObject.cpp
Memory/Space.cpp
Memory/VMObject.cpp
Memory/VirtualRange.cpp
Memory/VirtualRangeAllocator.cpp

View file

@ -66,6 +66,7 @@ class WaitQueue;
class WorkQueue;
namespace Memory {
class AddressSpace;
class AnonymousVMObject;
class InodeVMObject;
class MappedROM;
@ -74,12 +75,11 @@ class PageDirectory;
class PhysicalPage;
class PhysicalRegion;
class PrivateInodeVMObject;
class VirtualRange;
class VirtualRangeAllocator;
class Region;
class SharedInodeVMObject;
class Space;
class VMObject;
class VirtualRange;
class VirtualRangeAllocator;
}
template<typename BaseType>

View file

@ -5,39 +5,39 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Memory/AddressSpace.h>
#include <Kernel/Memory/AnonymousVMObject.h>
#include <Kernel/Memory/InodeVMObject.h>
#include <Kernel/Memory/MemoryManager.h>
#include <Kernel/Memory/Space.h>
#include <Kernel/PerformanceManager.h>
#include <Kernel/Process.h>
#include <Kernel/SpinLock.h>
namespace Kernel::Memory {
OwnPtr<Space> Space::try_create(Process& process, Space const* parent)
OwnPtr<AddressSpace> AddressSpace::try_create(Process& process, AddressSpace const* parent)
{
auto page_directory = PageDirectory::try_create_for_userspace(parent ? &parent->page_directory().range_allocator() : nullptr);
if (!page_directory)
return {};
auto space = adopt_own_if_nonnull(new (nothrow) Space(process, page_directory.release_nonnull()));
auto space = adopt_own_if_nonnull(new (nothrow) AddressSpace(process, page_directory.release_nonnull()));
if (!space)
return {};
space->page_directory().set_space({}, *space);
return space;
}
Space::Space(Process& process, NonnullRefPtr<PageDirectory> page_directory)
AddressSpace::AddressSpace(Process& process, NonnullRefPtr<PageDirectory> page_directory)
: m_process(&process)
, m_page_directory(move(page_directory))
{
}
Space::~Space()
AddressSpace::~AddressSpace()
{
}
KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
KResult AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
{
if (!size)
return EINVAL;
@ -139,7 +139,7 @@ KResult Space::unmap_mmap_range(VirtualAddress addr, size_t size)
return KSuccess;
}
Optional<VirtualRange> Space::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
Optional<VirtualRange> AddressSpace::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
{
vaddr.mask(PAGE_MASK);
size = page_round_up(size);
@ -148,7 +148,7 @@ Optional<VirtualRange> Space::allocate_range(VirtualAddress vaddr, size_t size,
return page_directory().range_allocator().allocate_specific(vaddr, size);
}
KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject)
KResultOr<Region*> AddressSpace::try_allocate_split_region(Region const& source_region, VirtualRange const& range, size_t offset_in_vmobject)
{
auto new_region = Region::try_create_user_accessible(
range, source_region.vmobject(), offset_in_vmobject, KString::try_create(source_region.name()), source_region.access(), source_region.is_cacheable() ? Region::Cacheable::Yes : Region::Cacheable::No, source_region.is_shared());
@ -168,7 +168,7 @@ KResultOr<Region*> Space::try_allocate_split_region(Region const& source_region,
return region;
}
KResultOr<Region*> Space::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy)
KResultOr<Region*> AddressSpace::allocate_region(VirtualRange const& range, StringView name, int prot, AllocationStrategy strategy)
{
VERIFY(range.is_valid());
auto vmobject = AnonymousVMObject::try_create_with_size(range.size(), strategy);
@ -185,7 +185,7 @@ KResultOr<Region*> Space::allocate_region(VirtualRange const& range, StringView
return added_region;
}
KResultOr<Region*> Space::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
KResultOr<Region*> AddressSpace::allocate_region_with_vmobject(VirtualRange const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, StringView name, int prot, bool shared)
{
VERIFY(range.is_valid());
size_t end_in_vmobject = offset_in_vmobject + range.size();
@ -215,12 +215,12 @@ KResultOr<Region*> Space::allocate_region_with_vmobject(VirtualRange const& rang
return added_region;
}
void Space::deallocate_region(Region& region)
void AddressSpace::deallocate_region(Region& region)
{
take_region(region);
}
NonnullOwnPtr<Region> Space::take_region(Region& region)
NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
{
ScopedSpinLock lock(m_lock);
@ -232,7 +232,7 @@ NonnullOwnPtr<Region> Space::take_region(Region& region)
return found_region;
}
Region* Space::find_region_from_range(VirtualRange const& range)
Region* AddressSpace::find_region_from_range(VirtualRange const& range)
{
ScopedSpinLock lock(m_lock);
if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
@ -250,7 +250,7 @@ Region* Space::find_region_from_range(VirtualRange const& range)
return region;
}
Region* Space::find_region_containing(VirtualRange const& range)
Region* AddressSpace::find_region_containing(VirtualRange const& range)
{
ScopedSpinLock lock(m_lock);
auto candidate = m_regions.find_largest_not_above(range.base().get());
@ -259,7 +259,7 @@ Region* Space::find_region_containing(VirtualRange const& range)
return (*candidate)->range().contains(range) ? candidate->ptr() : nullptr;
}
Vector<Region*> Space::find_regions_intersecting(VirtualRange const& range)
Vector<Region*> AddressSpace::find_regions_intersecting(VirtualRange const& range)
{
Vector<Region*> regions = {};
size_t total_size_collected = 0;
@ -282,7 +282,7 @@ Vector<Region*> Space::find_regions_intersecting(VirtualRange const& range)
return regions;
}
Region* Space::add_region(NonnullOwnPtr<Region> region)
Region* AddressSpace::add_region(NonnullOwnPtr<Region> region)
{
auto* ptr = region.ptr();
ScopedSpinLock lock(m_lock);
@ -291,7 +291,7 @@ Region* Space::add_region(NonnullOwnPtr<Region> region)
}
// Carve out a virtual address range from a region and return the two regions on either side
KResultOr<Vector<Region*, 2>> Space::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range)
KResultOr<Vector<Region*, 2>> AddressSpace::try_split_region_around_range(const Region& source_region, VirtualRange const& desired_range)
{
VirtualRange old_region_range = source_region.range();
auto remaining_ranges_after_unmap = old_region_range.carve(desired_range);
@ -312,7 +312,7 @@ KResultOr<Vector<Region*, 2>> Space::try_split_region_around_range(const Region&
return new_regions;
}
void Space::dump_regions()
void AddressSpace::dump_regions()
{
dbgln("Process regions:");
#if ARCH(I386)
@ -339,13 +339,13 @@ void Space::dump_regions()
MM.dump_kernel_regions();
}
void Space::remove_all_regions(Badge<Process>)
void AddressSpace::remove_all_regions(Badge<Process>)
{
ScopedSpinLock lock(m_lock);
m_regions.clear();
}
size_t Space::amount_dirty_private() const
size_t AddressSpace::amount_dirty_private() const
{
ScopedSpinLock lock(m_lock);
// FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
@ -359,7 +359,7 @@ size_t Space::amount_dirty_private() const
return amount;
}
size_t Space::amount_clean_inode() const
size_t AddressSpace::amount_clean_inode() const
{
ScopedSpinLock lock(m_lock);
HashTable<const InodeVMObject*> vmobjects;
@ -373,7 +373,7 @@ size_t Space::amount_clean_inode() const
return amount;
}
size_t Space::amount_virtual() const
size_t AddressSpace::amount_virtual() const
{
ScopedSpinLock lock(m_lock);
size_t amount = 0;
@ -383,7 +383,7 @@ size_t Space::amount_virtual() const
return amount;
}
size_t Space::amount_resident() const
size_t AddressSpace::amount_resident() const
{
ScopedSpinLock lock(m_lock);
// FIXME: This will double count if multiple regions use the same physical page.
@ -394,7 +394,7 @@ size_t Space::amount_resident() const
return amount;
}
size_t Space::amount_shared() const
size_t AddressSpace::amount_shared() const
{
ScopedSpinLock lock(m_lock);
// FIXME: This will double count if multiple regions use the same physical page.
@ -408,7 +408,7 @@ size_t Space::amount_shared() const
return amount;
}
size_t Space::amount_purgeable_volatile() const
size_t AddressSpace::amount_purgeable_volatile() const
{
ScopedSpinLock lock(m_lock);
size_t amount = 0;
@ -422,7 +422,7 @@ size_t Space::amount_purgeable_volatile() const
return amount;
}
size_t Space::amount_purgeable_nonvolatile() const
size_t AddressSpace::amount_purgeable_nonvolatile() const
{
ScopedSpinLock lock(m_lock);
size_t amount = 0;

View file

@ -16,10 +16,10 @@
namespace Kernel::Memory {
class Space {
class AddressSpace {
public:
static OwnPtr<Space> try_create(Process&, Space const* parent);
~Space();
static OwnPtr<AddressSpace> try_create(Process&, AddressSpace const* parent);
~AddressSpace();
PageDirectory& page_directory() { return *m_page_directory; }
const PageDirectory& page_directory() const { return *m_page_directory; }
@ -66,7 +66,7 @@ public:
size_t amount_purgeable_nonvolatile() const;
private:
Space(Process&, NonnullRefPtr<PageDirectory>);
AddressSpace(Process&, NonnullRefPtr<PageDirectory>);
Process* m_process { nullptr };
mutable RecursiveSpinLock m_lock;

View file

@ -612,19 +612,19 @@ Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
return nullptr;
}
Region* MemoryManager::find_user_region_from_vaddr_no_lock(Space& space, VirtualAddress vaddr)
Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space, VirtualAddress vaddr)
{
VERIFY(space.get_lock().own_lock());
return space.find_region_containing({ vaddr, 1 });
}
Region* MemoryManager::find_user_region_from_vaddr(Space& space, VirtualAddress vaddr)
Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr)
{
ScopedSpinLock lock(space.get_lock());
return find_user_region_from_vaddr_no_lock(space, vaddr);
}
void MemoryManager::validate_syscall_preconditions(Space& space, RegisterState const& regs)
void MemoryManager::validate_syscall_preconditions(AddressSpace& space, RegisterState const& regs)
{
// We take the space lock once here and then use the no_lock variants
// to avoid excessive spinlock recursion in this extemely common path.
@ -933,7 +933,7 @@ void MemoryManager::enter_process_paging_scope(Process& process)
enter_space(process.space());
}
void MemoryManager::enter_space(Space& space)
void MemoryManager::enter_space(AddressSpace& space)
{
auto current_thread = Thread::current();
VERIFY(current_thread != nullptr);
@ -1039,7 +1039,7 @@ void MemoryManager::unquickmap_page()
mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);
}
bool MemoryManager::validate_user_stack_no_lock(Space& space, VirtualAddress vaddr) const
bool MemoryManager::validate_user_stack_no_lock(AddressSpace& space, VirtualAddress vaddr) const
{
VERIFY(space.get_lock().own_lock());
@ -1050,7 +1050,7 @@ bool MemoryManager::validate_user_stack_no_lock(Space& space, VirtualAddress vad
return region && region->is_user() && region->is_stack();
}
bool MemoryManager::validate_user_stack(Space& space, VirtualAddress vaddr) const
bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const
{
ScopedSpinLock lock(space.get_lock());
return validate_user_stack_no_lock(space, vaddr);

View file

@ -161,10 +161,10 @@ public:
void unmap_ksyms_after_init();
static void enter_process_paging_scope(Process&);
static void enter_space(Space&);
static void enter_space(AddressSpace&);
bool validate_user_stack_no_lock(Space&, VirtualAddress) const;
bool validate_user_stack(Space&, VirtualAddress) const;
bool validate_user_stack_no_lock(AddressSpace&, VirtualAddress) const;
bool validate_user_stack(AddressSpace&, VirtualAddress) const;
enum class ShouldZeroFill {
No,
@ -219,9 +219,9 @@ public:
callback(vmobject);
}
static Region* find_user_region_from_vaddr(Space&, VirtualAddress);
static Region* find_user_region_from_vaddr_no_lock(Space&, VirtualAddress);
static void validate_syscall_preconditions(Space&, RegisterState const&);
static Region* find_user_region_from_vaddr(AddressSpace&, VirtualAddress);
static Region* find_user_region_from_vaddr_no_lock(AddressSpace&, VirtualAddress);
static void validate_syscall_preconditions(AddressSpace&, RegisterState const&);
void dump_kernel_regions();

View file

@ -41,17 +41,17 @@ public:
VirtualRangeAllocator& identity_range_allocator() { return m_identity_range_allocator; }
Space* space() { return m_space; }
const Space* space() const { return m_space; }
AddressSpace* space() { return m_space; }
const AddressSpace* space() const { return m_space; }
void set_space(Badge<Space>, Space& space) { m_space = &space; }
void set_space(Badge<AddressSpace>, AddressSpace& space) { m_space = &space; }
RecursiveSpinLock& get_lock() { return m_lock; }
private:
PageDirectory();
Space* m_space { nullptr };
AddressSpace* m_space { nullptr };
VirtualRangeAllocator m_range_allocator;
VirtualRangeAllocator m_identity_range_allocator;
#if ARCH(X86_64)

View file

@ -267,7 +267,7 @@ Process::Process(const String& name, uid_t uid, gid_t gid, ProcessID ppid, bool
KResult Process::attach_resources(RefPtr<Thread>& first_thread, Process* fork_parent)
{
m_space = Memory::Space::try_create(*this, fork_parent ? &fork_parent->space() : nullptr);
m_space = Memory::AddressSpace::try_create(*this, fork_parent ? &fork_parent->space() : nullptr);
if (!m_space)
return ENOMEM;

View file

@ -20,7 +20,7 @@
#include <Kernel/FileSystem/InodeMetadata.h>
#include <Kernel/Forward.h>
#include <Kernel/FutexQueue.h>
#include <Kernel/Memory/Space.h>
#include <Kernel/Memory/AddressSpace.h>
#include <Kernel/Mutex.h>
#include <Kernel/PerformanceEventBuffer.h>
#include <Kernel/ProcessGroup.h>
@ -515,8 +515,8 @@ public:
PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; }
Memory::Space& space() { return *m_space; }
Memory::Space const& space() const { return *m_space; }
Memory::AddressSpace& space() { return *m_space; }
Memory::AddressSpace const& space() const { return *m_space; }
VirtualAddress signal_trampoline() const { return m_signal_trampoline; }
@ -582,7 +582,7 @@ private:
String m_name;
OwnPtr<Memory::Space> m_space;
OwnPtr<Memory::AddressSpace> m_space;
RefPtr<ProcessGroup> m_pg;

View file

@ -30,7 +30,7 @@ namespace Kernel {
extern Memory::Region* g_signal_trampoline_region;
struct LoadResult {
OwnPtr<Memory::Space> space;
OwnPtr<Memory::AddressSpace> space;
FlatPtr load_base { 0 };
FlatPtr entry_eip { 0 };
size_t size { 0 };
@ -263,7 +263,7 @@ enum class ShouldAllowSyscalls {
Yes,
};
static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::Space> new_space, FileDescription& object_description,
static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> new_space, FileDescription& object_description,
FlatPtr load_offset, ShouldAllocateTls should_allocate_tls, ShouldAllowSyscalls should_allow_syscalls)
{
auto& inode = *(object_description.inode());
@ -453,7 +453,7 @@ static KResultOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::Space> new_sp
KResultOr<LoadResult> Process::load(NonnullRefPtr<FileDescription> main_program_description,
RefPtr<FileDescription> interpreter_description, const ElfW(Ehdr) & main_program_header)
{
auto new_space = Memory::Space::try_create(*this, nullptr);
auto new_space = Memory::AddressSpace::try_create(*this, nullptr);
if (!new_space)
return ENOMEM;