Kernel: Make purgeable memory a VMObject level concept (again)

This patch changes the semantics of purgeable memory.

- AnonymousVMObject now has a "purgeable" flag. It can only be set when
  constructing the object. (Previously, all anonymous memory was
  effectively purgeable.)

- AnonymousVMObject now has a "volatile" flag. It covers the entire
  range of physical pages. (Previously, we tracked ranges of volatile
  pages, effectively making it a page-level concept.)

- Non-volatile objects maintain a physical page reservation via the
  committed pages mechanism, to ensure full coverage for page faults.

- When an object is made volatile, it relinquishes any unused committed
  pages immediately. If later made non-volatile again, we then attempt
  to make a new committed pages reservation. If this fails, we return
  ENOMEM to userspace.

mmap() now creates purgeable objects if passed the MAP_PURGEABLE option
together with MAP_ANONYMOUS. anon_create() memory is always purgeable.
This commit is contained in:
Andreas Kling 2021-07-25 01:46:44 +02:00
parent 6bb53d6a80
commit 2d1a651e0a
17 changed files with 189 additions and 1004 deletions

View file

@ -264,7 +264,6 @@ set(KERNEL_SOURCES
VM/PhysicalZone.cpp
VM/PrivateInodeVMObject.cpp
VM/ProcessPagingScope.cpp
VM/PurgeablePageRanges.cpp
VM/Range.cpp
VM/RangeAllocator.cpp
VM/Region.cpp

View file

@ -7,6 +7,7 @@
#pragma once
#include <Kernel/FileSystem/File.h>
#include <Kernel/VM/AnonymousVMObject.h>
namespace Kernel {

View file

@ -456,7 +456,7 @@ private:
region_object.add("syscall", region->is_syscall_region());
region_object.add("purgeable", region->vmobject().is_anonymous());
if (region->vmobject().is_anonymous()) {
region_object.add("volatile", static_cast<const AnonymousVMObject&>(region->vmobject()).is_any_volatile());
region_object.add("volatile", static_cast<AnonymousVMObject const&>(region->vmobject()).is_volatile());
}
region_object.add("cacheable", region->is_cacheable());
region_object.add("address", region->vaddr().get());

View file

@ -29,7 +29,7 @@ KResultOr<FlatPtr> Process::sys$anon_create(size_t size, int options)
if (new_fd < 0)
return new_fd;
auto vmobject = AnonymousVMObject::try_create_with_size(size, AllocationStrategy::Reserve);
auto vmobject = AnonymousVMObject::try_create_purgeable_with_size(size, AllocationStrategy::Reserve);
if (!vmobject)
return ENOMEM;

View file

@ -12,6 +12,7 @@
#include <Kernel/PerformanceEventBuffer.h>
#include <Kernel/PerformanceManager.h>
#include <Kernel/Process.h>
#include <Kernel/VM/AnonymousVMObject.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PageDirectory.h>
#include <Kernel/VM/PrivateInodeVMObject.h>
@ -217,7 +218,14 @@ KResultOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> u
if (map_anonymous) {
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
auto region_or_error = space().allocate_region(range.value(), {}, prot, strategy);
RefPtr<AnonymousVMObject> vmobject;
if (flags & MAP_PURGEABLE)
vmobject = AnonymousVMObject::try_create_purgeable_with_size(page_round_up(size), strategy);
else
vmobject = AnonymousVMObject::try_create_with_size(page_round_up(size), strategy);
if (!vmobject)
return ENOMEM;
auto region_or_error = space().allocate_region_with_vmobject(range.value(), vmobject.release_nonnull(), 0, {}, prot, map_shared);
if (region_or_error.is_error())
return region_or_error.error().error();
region = region_or_error.value();
@ -465,23 +473,17 @@ KResultOr<FlatPtr> Process::sys$madvise(Userspace<void*> address, size_t size, i
if (set_volatile || set_nonvolatile) {
if (!region->vmobject().is_anonymous())
return EPERM;
auto& vmobject = static_cast<AnonymousVMObject&>(region->vmobject());
bool was_purged = false;
switch (region->set_volatile(VirtualAddress(address), size, set_volatile, was_purged)) {
case Region::SetVolatileError::Success:
break;
case Region::SetVolatileError::NotPurgeable:
return EPERM;
case Region::SetVolatileError::OutOfMemory:
return ENOMEM;
}
if (set_nonvolatile)
return was_purged ? 1 : 0;
return 0;
auto result = vmobject.set_volatile(set_volatile, was_purged);
if (result.is_error())
return result.error();
return was_purged ? 1 : 0;
}
if (advice & MADV_GET_VOLATILE) {
if (!region->vmobject().is_anonymous())
return EPERM;
return region->is_volatile(VirtualAddress(address), size) ? 0 : 1;
return static_cast<AnonymousVMObject&>(region->vmobject()).is_volatile() ? 0 : 1;
}
return EINVAL;
}
@ -668,5 +670,4 @@ KResultOr<FlatPtr> Process::sys$msyscall(Userspace<void*> address)
region->set_syscall_region(true);
return 0;
}
}

View file

@ -19,7 +19,7 @@ KResultOr<FlatPtr> Process::sys$purge(int mode)
REQUIRE_NO_PROMISES;
if (!is_superuser())
return EPERM;
int purged_page_count = 0;
size_t purged_page_count = 0;
if (mode & PURGE_ALL_VOLATILE) {
NonnullRefPtrVector<AnonymousVMObject> vmobjects;
{

View file

@ -92,6 +92,7 @@ enum {
#define MAP_STACK 0x40
#define MAP_NORESERVE 0x80
#define MAP_RANDOMIZED 0x100
#define MAP_PURGEABLE 0x200
#define PROT_READ 0x1
#define PROT_WRITE 0x2

View file

@ -22,16 +22,17 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
// commit the number of pages that we need to potentially allocate
// so that the parent is still guaranteed to be able to have all
// non-volatile memory available.
size_t need_cow_pages = 0;
size_t new_cow_pages_needed = 0;
// We definitely need to commit non-volatile areas
for_each_nonvolatile_range([&](VolatilePageRange const& nonvolatile_range) {
need_cow_pages += nonvolatile_range.count;
});
if (is_volatile()) {
// NOTE: If this object is currently volatile, we don't own any committed pages.
} else {
new_cow_pages_needed = page_count();
}
dbgln_if(COMMIT_DEBUG, "Cloning {:p}, need {} committed cow pages", this, need_cow_pages);
dbgln_if(COMMIT_DEBUG, "Cloning {:p}, need {} committed cow pages", this, new_cow_pages_needed);
if (!MM.commit_user_physical_pages(need_cow_pages))
if (!MM.commit_user_physical_pages(new_cow_pages_needed))
return {};
// Create or replace the committed cow pages. When cloning a previously
@ -40,10 +41,10 @@ RefPtr<VMObject> AnonymousVMObject::try_clone()
// one would keep the one it still has. This ensures that the original
// one and this one, as well as the clone have sufficient resources
// to cow all pages as needed
m_shared_committed_cow_pages = try_create<CommittedCowPages>(need_cow_pages);
m_shared_committed_cow_pages = try_create<CommittedCowPages>(new_cow_pages_needed);
if (!m_shared_committed_cow_pages) {
MM.uncommit_user_physical_pages(need_cow_pages);
MM.uncommit_user_physical_pages(new_cow_pages_needed);
return {};
}
@ -65,6 +66,20 @@ RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_with_size(size_t size, A
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, commit));
}
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_purgeable_with_size(size_t size, AllocationStrategy commit)
{
if (commit == AllocationStrategy::Reserve || commit == AllocationStrategy::AllocateNow) {
// We need to attempt to commit before actually creating the object
if (!MM.commit_user_physical_pages(ceil_div(size, static_cast<size_t>(PAGE_SIZE))))
return {};
}
auto vmobject = adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(size, commit));
if (!vmobject)
return {};
vmobject->m_purgeable = true;
return vmobject;
}
RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
{
return adopt_ref_if_nonnull(new (nothrow) AnonymousVMObject(physical_pages));
@ -81,7 +96,6 @@ RefPtr<AnonymousVMObject> AnonymousVMObject::try_create_for_physical_range(Physi
AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy)
: VMObject(size)
, m_volatile_ranges_cache({ 0, page_count() })
, m_unused_committed_pages(strategy == AllocationStrategy::Reserve ? page_count() : 0)
{
if (strategy == AllocationStrategy::AllocateNow) {
@ -97,7 +111,6 @@ AnonymousVMObject::AnonymousVMObject(size_t size, AllocationStrategy strategy)
AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
: VMObject(size)
, m_volatile_ranges_cache({ 0, page_count() })
{
VERIFY(paddr.page_base() == paddr);
for (size_t i = 0; i < page_count(); ++i)
@ -106,7 +119,6 @@ AnonymousVMObject::AnonymousVMObject(PhysicalAddress paddr, size_t size)
AnonymousVMObject::AnonymousVMObject(Span<NonnullRefPtr<PhysicalPage>> physical_pages)
: VMObject(physical_pages.size() * PAGE_SIZE)
, m_volatile_ranges_cache({ 0, page_count() })
{
for (size_t i = 0; i < physical_pages.size(); ++i) {
m_physical_pages[i] = physical_pages[i];
@ -115,9 +127,6 @@ AnonymousVMObject::AnonymousVMObject(Span<NonnullRefPtr<PhysicalPage>> physical_
AnonymousVMObject::AnonymousVMObject(AnonymousVMObject const& other)
: VMObject(other)
, m_volatile_ranges_cache({ 0, page_count() }) // do *not* clone this
, m_volatile_ranges_cache_dirty(true) // do *not* clone this
, m_purgeable_ranges() // do *not* clone this
, m_unused_committed_pages(other.m_unused_committed_pages)
, m_cow_map() // do *not* clone this
, m_shared_committed_cow_pages(other.m_shared_committed_cow_pages) // share the pool
@ -152,217 +161,94 @@ AnonymousVMObject::~AnonymousVMObject()
MM.uncommit_user_physical_pages(m_unused_committed_pages);
}
int AnonymousVMObject::purge()
{
int purged_page_count = 0;
ScopedSpinLock lock(m_lock);
for_each_volatile_range([&](auto const& range) {
int purged_in_range = 0;
auto range_end = range.base + range.count;
for (size_t i = range.base; i < range_end; i++) {
auto& phys_page = m_physical_pages[i];
if (phys_page && !phys_page->is_shared_zero_page()) {
VERIFY(!phys_page->is_lazy_committed_page());
++purged_in_range;
}
phys_page = MM.shared_zero_page();
}
if (purged_in_range > 0) {
purged_page_count += purged_in_range;
set_was_purged(range);
for_each_region([&](auto& region) {
if (auto owner = region.get_owner()) {
// we need to hold a reference the process here (if there is one) as we may not own this region
dmesgln("Purged {} pages from region {} owned by {} at {} - {}",
purged_in_range,
region.name(),
*owner,
region.vaddr_from_page_index(range.base),
region.vaddr_from_page_index(range.base + range.count));
} else {
dmesgln("Purged {} pages from region {} (no ownership) at {} - {}",
purged_in_range,
region.name(),
region.vaddr_from_page_index(range.base),
region.vaddr_from_page_index(range.base + range.count));
}
region.remap_vmobject_page_range(range.base, range.count);
});
}
});
return purged_page_count;
}
void AnonymousVMObject::set_was_purged(VolatilePageRange const& range)
{
VERIFY(m_lock.is_locked());
for (auto* purgeable_ranges : m_purgeable_ranges)
purgeable_ranges->set_was_purged(range);
}
void AnonymousVMObject::register_purgeable_page_ranges(PurgeablePageRanges& purgeable_page_ranges)
size_t AnonymousVMObject::purge()
{
ScopedSpinLock lock(m_lock);
purgeable_page_ranges.set_vmobject(this);
VERIFY(!m_purgeable_ranges.contains_slow(&purgeable_page_ranges));
m_purgeable_ranges.append(&purgeable_page_ranges);
}
void AnonymousVMObject::unregister_purgeable_page_ranges(PurgeablePageRanges& purgeable_page_ranges)
{
ScopedSpinLock lock(m_lock);
for (size_t i = 0; i < m_purgeable_ranges.size(); i++) {
if (m_purgeable_ranges[i] != &purgeable_page_ranges)
if (!is_purgeable() || !is_volatile())
return 0;
size_t total_pages_purged = 0;
for (auto& page : m_physical_pages) {
VERIFY(page);
if (page->is_shared_zero_page())
continue;
purgeable_page_ranges.set_vmobject(nullptr);
m_purgeable_ranges.remove(i);
return;
page = MM.shared_zero_page();
++total_pages_purged;
}
VERIFY_NOT_REACHED();
}
bool AnonymousVMObject::is_any_volatile() const
{
ScopedSpinLock lock(m_lock);
for (auto& volatile_ranges : m_purgeable_ranges) {
ScopedSpinLock lock(volatile_ranges->m_volatile_ranges_lock);
if (!volatile_ranges->is_empty())
return true;
}
return false;
}
m_was_purged = true;
size_t AnonymousVMObject::remove_lazy_commit_pages(VolatilePageRange const& range)
{
VERIFY(m_lock.is_locked());
size_t removed_count = 0;
auto range_end = range.base + range.count;
for (size_t i = range.base; i < range_end; i++) {
auto& phys_page = m_physical_pages[i];
if (phys_page && phys_page->is_lazy_committed_page()) {
phys_page = MM.shared_zero_page();
removed_count++;
VERIFY(m_unused_committed_pages > 0);
if (--m_unused_committed_pages == 0)
break;
}
}
return removed_count;
}
void AnonymousVMObject::update_volatile_cache()
{
VERIFY(m_lock.is_locked());
VERIFY(m_volatile_ranges_cache_dirty);
m_volatile_ranges_cache.clear();
for_each_nonvolatile_range([&](VolatilePageRange const& range) {
m_volatile_ranges_cache.add_unchecked(range);
for_each_region([](Region& region) {
region.remap();
});
m_volatile_ranges_cache_dirty = false;
return total_pages_purged;
}
void AnonymousVMObject::range_made_volatile(VolatilePageRange const& range)
KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
{
VERIFY(m_lock.is_locked());
VERIFY(is_purgeable());
if (m_unused_committed_pages == 0)
return;
ScopedSpinLock locker(m_lock);
// We need to check this range for any pages that are marked for
// lazy committed allocation and turn them into shared zero pages
// and also adjust the m_unused_committed_pages for each such page.
// Take into account all the other views as well.
size_t uncommit_page_count = 0;
for_each_volatile_range([&](auto const& r) {
auto intersected = range.intersected(r);
if (!intersected.is_empty()) {
uncommit_page_count += remove_lazy_commit_pages(intersected);
if (m_unused_committed_pages == 0)
return IterationDecision::Break;
was_purged = m_was_purged;
if (m_volatile == is_volatile)
return KSuccess;
if (is_volatile) {
// When a VMObject is made volatile, it gives up all of its committed memory.
// Any physical pages already allocated remain in the VMObject for now, but the kernel is free to take them at any moment.
for (auto& page : m_physical_pages) {
if (page && page->is_lazy_committed_page())
page = MM.shared_zero_page();
}
return IterationDecision::Continue;
});
// Return those committed pages back to the system
if (uncommit_page_count > 0) {
dbgln_if(COMMIT_DEBUG, "Uncommit {} lazy-commit pages from {:p}", uncommit_page_count, this);
MM.uncommit_user_physical_pages(uncommit_page_count);
}
m_volatile_ranges_cache_dirty = true;
}
void AnonymousVMObject::range_made_nonvolatile(VolatilePageRange const&)
{
VERIFY(m_lock.is_locked());
m_volatile_ranges_cache_dirty = true;
}
size_t AnonymousVMObject::count_needed_commit_pages_for_nonvolatile_range(VolatilePageRange const& range)
{
VERIFY(m_lock.is_locked());
VERIFY(!range.is_empty());
size_t need_commit_pages = 0;
auto range_end = range.base + range.count;
for (size_t page_index = range.base; page_index < range_end; page_index++) {
// COW pages are accounted for in m_shared_committed_cow_pages
if (!m_cow_map.is_null() && m_cow_map.get(page_index))
continue;
auto& phys_page = m_physical_pages[page_index];
if (phys_page && phys_page->is_shared_zero_page())
need_commit_pages++;
}
return need_commit_pages;
}
size_t AnonymousVMObject::mark_committed_pages_for_nonvolatile_range(VolatilePageRange const& range, size_t mark_total)
{
VERIFY(m_lock.is_locked());
VERIFY(!range.is_empty());
VERIFY(mark_total > 0);
size_t pages_updated = 0;
auto range_end = range.base + range.count;
for (size_t page_index = range.base; page_index < range_end; page_index++) {
// COW pages are accounted for in m_shared_committed_cow_pages
if (!m_cow_map.is_null() && m_cow_map.get(page_index))
continue;
auto& phys_page = m_physical_pages[page_index];
if (phys_page && phys_page->is_shared_zero_page()) {
phys_page = MM.lazy_committed_page();
if (++pages_updated == mark_total)
break;
if (m_unused_committed_pages) {
MM.uncommit_user_physical_pages(m_unused_committed_pages);
m_unused_committed_pages = 0;
}
m_volatile = true;
m_was_purged = false;
return KSuccess;
}
// When a VMObject is made non-volatile, we try to commit however many pages are not currently available.
// If that fails, we return false to indicate that memory allocation failed.
size_t committed_pages_needed = 0;
for (auto& page : m_physical_pages) {
VERIFY(page);
if (page->is_shared_zero_page())
++committed_pages_needed;
}
dbgln_if(COMMIT_DEBUG, "Added {} lazy-commit pages to {:p}", pages_updated, this);
if (!committed_pages_needed) {
m_volatile = false;
return KSuccess;
}
m_unused_committed_pages += pages_updated;
return pages_updated;
if (!MM.commit_user_physical_pages(committed_pages_needed))
return ENOMEM;
m_unused_committed_pages = committed_pages_needed;
for (auto& page : m_physical_pages) {
if (page->is_shared_zero_page())
page = MM.lazy_committed_page();
}
m_volatile = false;
m_was_purged = false;
return KSuccess;
}
NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>, size_t page_index)
NonnullRefPtr<PhysicalPage> AnonymousVMObject::allocate_committed_page(Badge<Region>)
{
{
ScopedSpinLock lock(m_lock);
VERIFY(m_unused_committed_pages > 0);
// We shouldn't have any committed page tags in volatile regions
VERIFY([&]() {
for (auto* purgeable_ranges : m_purgeable_ranges) {
if (purgeable_ranges->is_volatile(page_index))
return false;
}
return true;
}());
m_unused_committed_pages--;
--m_unused_committed_pages;
}
return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
}
@ -404,19 +290,20 @@ size_t AnonymousVMObject::cow_pages() const
return m_cow_map.count_slow(true);
}
bool AnonymousVMObject::is_nonvolatile(size_t page_index)
{
if (m_volatile_ranges_cache_dirty)
update_volatile_cache();
return !m_volatile_ranges_cache.contains(page_index);
}
PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, VirtualAddress vaddr)
{
VERIFY_INTERRUPTS_DISABLED();
ScopedSpinLock lock(m_lock);
if (is_volatile()) {
// A COW fault in a volatile region? Userspace is writing to volatile memory, this is a bug. Crash.
dbgln("COW fault in volatile region, will crash.");
return PageFaultResponse::ShouldCrash;
}
auto& page_slot = physical_pages()[page_index];
bool have_committed = m_shared_committed_cow_pages && is_nonvolatile(page_index);
bool have_committed = m_shared_committed_cow_pages;
if (page_slot->ref_count() == 1) {
dbgln_if(PAGE_FAULT_DEBUG, " >> It's a COW page but nobody is sharing it anymore. Remap r/w");
set_should_cow(page_index, false);
@ -462,4 +349,33 @@ PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, Virtual
return PageFaultResponse::Continue;
}
CommittedCowPages::CommittedCowPages(size_t committed_pages)
: m_committed_pages(committed_pages)
{
}
CommittedCowPages::~CommittedCowPages()
{
// Return unused committed pages
if (m_committed_pages > 0)
MM.uncommit_user_physical_pages(m_committed_pages);
}
NonnullRefPtr<PhysicalPage> CommittedCowPages::allocate_one()
{
VERIFY(m_committed_pages > 0);
m_committed_pages--;
return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
}
bool CommittedCowPages::return_one()
{
VERIFY(m_committed_pages > 0);
m_committed_pages--;
MM.uncommit_user_physical_pages(1);
return m_committed_pages == 0;
}
}

View file

@ -10,108 +10,48 @@
#include <Kernel/VM/AllocationStrategy.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PageFaultResponse.h>
#include <Kernel/VM/PurgeablePageRanges.h>
#include <Kernel/VM/VMObject.h>
namespace Kernel {
class AnonymousVMObject final : public VMObject {
friend class PurgeablePageRanges;
class CommittedCowPages : public RefCounted<CommittedCowPages> {
AK_MAKE_NONCOPYABLE(CommittedCowPages);
public:
CommittedCowPages() = delete;
CommittedCowPages(size_t);
~CommittedCowPages();
NonnullRefPtr<PhysicalPage> allocate_one();
bool return_one();
private:
size_t m_committed_pages;
};
class AnonymousVMObject final : public VMObject {
public:
virtual ~AnonymousVMObject() override;
static RefPtr<AnonymousVMObject> try_create_with_size(size_t, AllocationStrategy);
static RefPtr<AnonymousVMObject> try_create_for_physical_range(PhysicalAddress paddr, size_t size);
static RefPtr<AnonymousVMObject> try_create_with_physical_pages(Span<NonnullRefPtr<PhysicalPage>>);
static RefPtr<AnonymousVMObject> try_create_purgeable_with_size(size_t, AllocationStrategy);
virtual RefPtr<VMObject> try_clone() override;
[[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>, size_t);
[[nodiscard]] NonnullRefPtr<PhysicalPage> allocate_committed_page(Badge<Region>);
PageFaultResponse handle_cow_fault(size_t, VirtualAddress);
size_t cow_pages() const;
bool should_cow(size_t page_index, bool) const;
void set_should_cow(size_t page_index, bool);
void register_purgeable_page_ranges(PurgeablePageRanges&);
void unregister_purgeable_page_ranges(PurgeablePageRanges&);
bool is_purgeable() const { return m_purgeable; }
bool is_volatile() const { return m_volatile; }
int purge();
KResult set_volatile(bool is_volatile, bool& was_purged);
bool is_any_volatile() const;
template<IteratorFunction<VolatilePageRange const&> F>
IterationDecision for_each_volatile_range(F f) const
{
VERIFY(m_lock.is_locked());
// This is a little ugly. Basically, we're trying to find the
// volatile ranges that all share, because those are the only
// pages we can actually purge
for (auto* purgeable_range : m_purgeable_ranges) {
ScopedSpinLock purgeable_lock(purgeable_range->m_volatile_ranges_lock);
for (auto& r1 : purgeable_range->volatile_ranges().ranges()) {
VolatilePageRange range(r1);
for (auto* purgeable_range2 : m_purgeable_ranges) {
if (purgeable_range2 == purgeable_range)
continue;
ScopedSpinLock purgeable2_lock(purgeable_range2->m_volatile_ranges_lock);
if (purgeable_range2->is_empty()) {
// If just one doesn't allow any purging, we can
// immediately bail
return IterationDecision::Continue;
}
for (auto const& r2 : purgeable_range2->volatile_ranges().ranges()) {
range = range.intersected(r2);
if (range.is_empty())
break;
}
if (range.is_empty())
break;
}
if (range.is_empty())
continue;
IterationDecision decision = f(range);
if (decision != IterationDecision::Continue)
return decision;
}
}
return IterationDecision::Continue;
}
template<IteratorFunction<VolatilePageRange const&> F>
IterationDecision for_each_nonvolatile_range(F f) const
{
size_t base = 0;
for_each_volatile_range([&](VolatilePageRange const& volatile_range) {
if (volatile_range.base == base)
return IterationDecision::Continue;
IterationDecision decision = f(VolatilePageRange { base, volatile_range.base - base });
if (decision != IterationDecision::Continue)
return decision;
base = volatile_range.base + volatile_range.count;
return IterationDecision::Continue;
});
if (base < page_count())
return f(VolatilePageRange { base, page_count() - base });
return IterationDecision::Continue;
}
template<VoidFunction<VolatilePageRange const&> F>
IterationDecision for_each_volatile_range(F f) const
{
return for_each_volatile_range([&](auto& range) {
f(range);
return IterationDecision::Continue;
});
}
template<VoidFunction<VolatilePageRange const&> F>
IterationDecision for_each_nonvolatile_range(F f) const
{
return for_each_nonvolatile_range([&](auto range) {
f(move(range));
return IterationDecision::Continue;
});
}
size_t purge();
private:
explicit AnonymousVMObject(size_t, AllocationStrategy);
@ -121,15 +61,6 @@ private:
virtual StringView class_name() const override { return "AnonymousVMObject"sv; }
void update_volatile_cache();
void set_was_purged(VolatilePageRange const&);
size_t remove_lazy_commit_pages(VolatilePageRange const&);
void range_made_volatile(VolatilePageRange const&);
void range_made_nonvolatile(VolatilePageRange const&);
size_t count_needed_commit_pages_for_nonvolatile_range(VolatilePageRange const&);
size_t mark_committed_pages_for_nonvolatile_range(VolatilePageRange const&, size_t);
bool is_nonvolatile(size_t page_index);
AnonymousVMObject& operator=(AnonymousVMObject const&) = delete;
AnonymousVMObject& operator=(AnonymousVMObject&&) = delete;
AnonymousVMObject(AnonymousVMObject&&) = delete;
@ -139,15 +70,15 @@ private:
Bitmap& ensure_cow_map();
void ensure_or_reset_cow_map();
VolatilePageRanges m_volatile_ranges_cache;
bool m_volatile_ranges_cache_dirty { true };
Vector<PurgeablePageRanges*> m_purgeable_ranges;
size_t m_unused_committed_pages { 0 };
Bitmap m_cow_map;
// We share a pool of committed cow-pages with clones
RefPtr<CommittedCowPages> m_shared_committed_cow_pages;
bool m_purgeable { false };
bool m_volatile { false };
bool m_was_purged { false };
};
}

View file

@ -1,310 +0,0 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/BinarySearch.h>
#include <AK/ScopeGuard.h>
#include <Kernel/Debug.h>
#include <Kernel/Process.h>
#include <Kernel/VM/AnonymousVMObject.h>
#include <Kernel/VM/MemoryManager.h>
#include <Kernel/VM/PhysicalPage.h>
#include <Kernel/VM/PurgeablePageRanges.h>
namespace AK {
template<>
struct Formatter<Kernel::VolatilePageRange> : Formatter<String> {
void format(FormatBuilder& builder, const Kernel::VolatilePageRange& value)
{
return Formatter<String>::format(builder, String::formatted("{{{} ({}) purged: {}}}", value.base, value.count, value.was_purged));
}
};
}
namespace Kernel {
static void dump_volatile_page_ranges(const Vector<VolatilePageRange>& ranges)
{
if constexpr (VOLATILE_PAGE_RANGES_DEBUG) {
for (size_t i = 0; i < ranges.size(); i++) {
dbgln("[{}] {}", i, ranges[i]);
}
}
}
void VolatilePageRanges::add_unchecked(const VolatilePageRange& range)
{
auto add_range = m_total_range.intersected(range);
if (add_range.is_empty())
return;
m_ranges.append(range);
}
bool VolatilePageRanges::add(const VolatilePageRange& range)
{
auto add_range = m_total_range.intersected(range);
if (add_range.is_empty())
return false;
add_range.was_purged = range.was_purged;
if constexpr (VOLATILE_PAGE_RANGES_DEBUG) {
dbgln("ADD {} (total range: {}) -->", range, m_total_range);
dump_volatile_page_ranges(m_ranges);
ScopeGuard debug_guard([&]() {
dbgln("After adding {} (total range: {})", range, m_total_range);
dump_volatile_page_ranges(m_ranges);
dbgln("<-- ADD {} (total range: {})", range, m_total_range);
});
}
size_t nearby_index = 0;
auto* existing_range = binary_search(
m_ranges.span(), add_range, &nearby_index, [](auto& a, auto& b) {
if (a.intersects_or_adjacent(b))
return 0;
return (signed)(a.base - (b.base + b.count - 1));
});
size_t inserted_index = 0;
if (existing_range) {
if (*existing_range == add_range)
return false;
if (existing_range->was_purged != add_range.was_purged) {
// Found an intersecting or adjacent range, but the purge flag
// doesn't match. Subtract what we're adding from it, and
existing_range->subtract_intersecting(add_range);
if (existing_range->is_empty()) {
*existing_range = add_range;
} else {
m_ranges.insert(++nearby_index, add_range);
existing_range = &m_ranges[nearby_index];
}
} else {
// Found an intersecting or adjacent range that can be merged
existing_range->combine_intersecting_or_adjacent(add_range);
}
inserted_index = nearby_index;
} else {
// Insert into the sorted list
m_ranges.insert_before_matching(
VolatilePageRange(add_range), [&](auto& entry) {
return entry.base >= add_range.base + add_range.count;
},
nearby_index, &inserted_index);
existing_range = &m_ranges[inserted_index];
}
// See if we can merge any of the following ranges
inserted_index++;
while (inserted_index < m_ranges.size()) {
auto& next_range = m_ranges[inserted_index];
if (!next_range.intersects_or_adjacent(*existing_range))
break;
if (next_range.was_purged != existing_range->was_purged) {
// The purged flag of following range is not the same.
// Subtract the added/combined range from it
next_range.subtract_intersecting(*existing_range);
if (next_range.is_empty())
m_ranges.remove(inserted_index);
} else {
existing_range->combine_intersecting_or_adjacent(next_range);
m_ranges.remove(inserted_index);
}
}
return true;
}
bool VolatilePageRanges::remove(const VolatilePageRange& range, bool& was_purged)
{
auto remove_range = m_total_range.intersected(range);
if (remove_range.is_empty())
return false;
if constexpr (VOLATILE_PAGE_RANGES_DEBUG) {
dbgln("REMOVE {} (total range: {}) -->", range, m_total_range);
dump_volatile_page_ranges(m_ranges);
ScopeGuard debug_guard([&]() {
dbgln("After removing {} (total range: {})", range, m_total_range);
dump_volatile_page_ranges(m_ranges);
dbgln("<-- REMOVE {} (total range: {}) was_purged: {}", range, m_total_range, was_purged);
});
}
size_t nearby_index = 0;
auto* existing_range = binary_search(
m_ranges.span(), remove_range, &nearby_index, [](auto& a, auto& b) {
if (a.intersects(b))
return 0;
return (signed)(a.base - (b.base + b.count - 1));
});
if (!existing_range)
return false;
was_purged = existing_range->was_purged;
if (existing_range->range_equals(remove_range)) {
m_ranges.remove(nearby_index);
} else {
// See if we need to remove any of the following ranges
VERIFY(existing_range == &m_ranges[nearby_index]); // sanity check
while (nearby_index < m_ranges.size()) {
existing_range = &m_ranges[nearby_index];
if (!existing_range->intersects(range))
break;
was_purged |= existing_range->was_purged;
existing_range->subtract_intersecting(remove_range);
if (existing_range->is_empty()) {
m_ranges.remove(nearby_index);
break;
}
}
}
return true;
}
bool VolatilePageRanges::intersects(const VolatilePageRange& range) const
{
auto* existing_range = binary_search(
m_ranges.span(), range, nullptr, [](auto& a, auto& b) {
if (a.intersects(b))
return 0;
return (signed)(a.base - (b.base + b.count - 1));
});
return existing_range != nullptr;
}
PurgeablePageRanges::PurgeablePageRanges(const VMObject& vmobject)
: m_volatile_ranges({ 0, vmobject.is_anonymous() ? vmobject.page_count() : 0 })
{
}
bool PurgeablePageRanges::add_volatile_range(const VolatilePageRange& range)
{
if (range.is_empty())
return false;
// Since we may need to call into AnonymousVMObject we need to acquire
// its lock as well, and acquire it first. This is important so that
// we don't deadlock when a page fault (e.g. on another processor)
// happens that is meant to lazy-allocate a committed page. It would
// call into AnonymousVMObject::range_made_volatile, which then would
// also call into this object and need to acquire m_lock. By acquiring
// the vmobject lock first in both cases, we avoid deadlocking.
// We can access m_vmobject without any locks for that purpose because
// add_volatile_range and remove_volatile_range can only be called
// by same object that calls set_vmobject.
ScopedSpinLock vmobject_lock(m_vmobject->m_lock);
ScopedSpinLock lock(m_volatile_ranges_lock);
bool added = m_volatile_ranges.add(range);
if (added)
m_vmobject->range_made_volatile(range);
return added;
}
auto PurgeablePageRanges::remove_volatile_range(const VolatilePageRange& range, bool& was_purged) -> RemoveVolatileError
{
if (range.is_empty()) {
was_purged = false;
return RemoveVolatileError::Success;
}
ScopedSpinLock vmobject_lock(m_vmobject->m_lock); // see comment in add_volatile_range
ScopedSpinLock lock(m_volatile_ranges_lock);
VERIFY(m_vmobject);
// Before we actually remove this range, we need to check if we need
// to commit any pages, which may fail. If it fails, we don't actually
// want to make any modifications. COW pages are already accounted for
// in m_shared_committed_cow_pages
size_t need_commit_pages = 0;
m_volatile_ranges.for_each_intersecting_range(range, [&](const VolatilePageRange& intersected_range) {
need_commit_pages += m_vmobject->count_needed_commit_pages_for_nonvolatile_range(intersected_range);
return IterationDecision::Continue;
});
if (need_commit_pages > 0) {
// See if we can grab enough pages for what we're marking non-volatile
if (!MM.commit_user_physical_pages(need_commit_pages))
return RemoveVolatileError::OutOfMemory;
// Now that we are committed to these pages, mark them for lazy-commit allocation
auto pages_to_mark = need_commit_pages;
m_volatile_ranges.for_each_intersecting_range(range, [&](const VolatilePageRange& intersected_range) {
auto pages_marked = m_vmobject->mark_committed_pages_for_nonvolatile_range(intersected_range, pages_to_mark);
pages_to_mark -= pages_marked;
return IterationDecision::Continue;
});
}
// Now actually remove the range
if (m_volatile_ranges.remove(range, was_purged)) {
m_vmobject->range_made_nonvolatile(range);
return RemoveVolatileError::Success;
}
VERIFY(need_commit_pages == 0); // We should have not touched anything
return RemoveVolatileError::SuccessNoChange;
}
bool PurgeablePageRanges::is_volatile_range(const VolatilePageRange& range) const
{
if (range.is_empty())
return false;
ScopedSpinLock lock(m_volatile_ranges_lock);
return m_volatile_ranges.intersects(range);
}
bool PurgeablePageRanges::is_volatile(size_t index) const
{
ScopedSpinLock lock(m_volatile_ranges_lock);
return m_volatile_ranges.contains(index);
}
void PurgeablePageRanges::set_was_purged(const VolatilePageRange& range)
{
ScopedSpinLock lock(m_volatile_ranges_lock);
m_volatile_ranges.add({ range.base, range.count, true });
}
void PurgeablePageRanges::set_vmobject(AnonymousVMObject* vmobject)
{
// No lock needed here
if (vmobject) {
VERIFY(!m_vmobject);
m_vmobject = vmobject;
} else {
VERIFY(m_vmobject);
m_vmobject = nullptr;
}
}
CommittedCowPages::CommittedCowPages(size_t committed_pages)
: m_committed_pages(committed_pages)
{
}
CommittedCowPages::~CommittedCowPages()
{
// Return unused committed pages
if (m_committed_pages > 0)
MM.uncommit_user_physical_pages(m_committed_pages);
}
NonnullRefPtr<PhysicalPage> CommittedCowPages::allocate_one()
{
VERIFY(m_committed_pages > 0);
m_committed_pages--;
return MM.allocate_committed_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
}
bool CommittedCowPages::return_one()
{
VERIFY(m_committed_pages > 0);
m_committed_pages--;
MM.uncommit_user_physical_pages(1);
return m_committed_pages == 0;
}
}

View file

@ -1,245 +0,0 @@
/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Bitmap.h>
#include <AK/RefCounted.h>
#include <Kernel/SpinLock.h>
namespace Kernel {
struct VolatilePageRange {
size_t base { 0 };
size_t count { 0 };
bool was_purged { false };
bool is_empty() const { return count == 0; }
bool intersects(const VolatilePageRange& other) const
{
return other.base < base + count || other.base + other.count > base;
}
bool intersects_or_adjacent(const VolatilePageRange& other) const
{
return other.base <= base + count || other.base + other.count >= base;
}
bool contains(const VolatilePageRange& other) const
{
return base <= other.base && base + count >= other.base + other.count;
}
VolatilePageRange intersected(const VolatilePageRange& other) const
{
auto b = max(base, other.base);
auto e = min(base + count, other.base + other.count);
if (b >= e)
return {};
return { b, e - b, was_purged };
}
void combine_intersecting_or_adjacent(const VolatilePageRange& other)
{
VERIFY(intersects_or_adjacent(other));
if (base <= other.base) {
count = (other.base - base) + other.count;
} else {
count = (base - other.base) + count;
base = other.base;
}
was_purged |= other.was_purged;
}
void subtract_intersecting(const VolatilePageRange& other)
{
if (!intersects(other))
return;
if (other.contains(*this)) {
count = 0;
return;
}
if (base <= other.base) {
count = (other.base - base);
} else {
auto new_base = other.base + other.count;
count = (base + count) - new_base;
base = new_base;
}
}
bool range_equals(const VolatilePageRange& other) const
{
return base == other.base && count == other.count;
}
bool operator==(const VolatilePageRange& other) const
{
return base == other.base && count == other.count && was_purged == other.was_purged;
}
bool operator!=(const VolatilePageRange& other) const
{
return base != other.base || count != other.count || was_purged != other.was_purged;
}
};
class VolatilePageRanges {
public:
VolatilePageRanges(const VolatilePageRange& total_range)
: m_total_range(total_range)
{
}
VolatilePageRanges(const VolatilePageRanges& other)
: m_ranges(other.m_ranges)
, m_total_range(other.m_total_range)
{
}
bool is_empty() const { return m_ranges.is_empty(); }
void clear() { m_ranges.clear_with_capacity(); }
bool is_all() const
{
if (m_ranges.size() != 1)
return false;
return m_ranges[0] == m_total_range;
}
void set_all()
{
if (m_ranges.size() != 1)
m_ranges = { m_total_range };
else
m_ranges[0] = m_total_range;
}
bool intersects(const VolatilePageRange&) const;
bool contains(size_t index) const
{
return intersects({ index, 1 });
}
bool add(const VolatilePageRange&);
void add_unchecked(const VolatilePageRange&);
bool remove(const VolatilePageRange&, bool&);
template<typename F>
IterationDecision for_each_intersecting_range(const VolatilePageRange& range, F f)
{
auto r = m_total_range.intersected(range);
if (r.is_empty())
return IterationDecision::Continue;
size_t nearby_index = 0;
auto* existing_range = binary_search(
m_ranges.span(), r, &nearby_index, [](auto& a, auto& b) {
if (a.intersects(b))
return 0;
return (signed)(a.base - (b.base + b.count - 1));
});
if (!existing_range)
return IterationDecision::Continue;
if (existing_range->range_equals(r))
return f(r);
VERIFY(existing_range == &m_ranges[nearby_index]); // sanity check
while (nearby_index < m_ranges.size()) {
existing_range = &m_ranges[nearby_index];
if (!existing_range->intersects(range))
break;
IterationDecision decision = f(existing_range->intersected(r));
if (decision != IterationDecision::Continue)
return decision;
nearby_index++;
}
return IterationDecision::Continue;
}
template<typename F>
IterationDecision for_each_nonvolatile_range(F f) const
{
size_t base = m_total_range.base;
for (const auto& volatile_range : m_ranges) {
if (volatile_range.base == base)
continue;
IterationDecision decision = f({ base, volatile_range.base - base });
if (decision != IterationDecision::Continue)
return decision;
base = volatile_range.base + volatile_range.count;
}
if (base < m_total_range.base + m_total_range.count)
return f({ base, (m_total_range.base + m_total_range.count) - base });
return IterationDecision::Continue;
}
Vector<VolatilePageRange>& ranges() { return m_ranges; }
const Vector<VolatilePageRange>& ranges() const { return m_ranges; }
private:
Vector<VolatilePageRange> m_ranges;
VolatilePageRange m_total_range;
};
class AnonymousVMObject;
class PurgeablePageRanges {
friend class AnonymousVMObject;
public:
PurgeablePageRanges(const VMObject&);
void copy_purgeable_page_ranges(const PurgeablePageRanges& other)
{
if (this == &other)
return;
ScopedSpinLock lock(m_volatile_ranges_lock);
ScopedSpinLock other_lock(other.m_volatile_ranges_lock);
m_volatile_ranges = other.m_volatile_ranges;
}
bool add_volatile_range(const VolatilePageRange& range);
enum class RemoveVolatileError {
Success = 0,
SuccessNoChange,
OutOfMemory
};
RemoveVolatileError remove_volatile_range(const VolatilePageRange& range, bool& was_purged);
bool is_volatile_range(const VolatilePageRange& range) const;
bool is_volatile(size_t) const;
bool is_empty() const { return m_volatile_ranges.is_empty(); }
void set_was_purged(const VolatilePageRange&);
const VolatilePageRanges& volatile_ranges() const { return m_volatile_ranges; }
protected:
void set_vmobject(AnonymousVMObject*);
VolatilePageRanges m_volatile_ranges;
mutable RecursiveSpinLock m_volatile_ranges_lock;
AnonymousVMObject* m_vmobject { nullptr };
};
class CommittedCowPages : public RefCounted<CommittedCowPages> {
AK_MAKE_NONCOPYABLE(CommittedCowPages);
public:
CommittedCowPages() = delete;
CommittedCowPages(size_t);
~CommittedCowPages();
NonnullRefPtr<PhysicalPage> allocate_one();
bool return_one();
private:
size_t m_committed_pages;
};
}

View file

@ -20,8 +20,7 @@
namespace Kernel {
Region::Region(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offset_in_vmobject, OwnPtr<KString> name, Region::Access access, Cacheable cacheable, bool shared)
: PurgeablePageRanges(vmobject)
, m_range(range)
: m_range(range)
, m_offset_in_vmobject(offset_in_vmobject)
, m_vmobject(move(vmobject))
, m_name(move(name))
@ -34,14 +33,12 @@ Region::Region(Range const& range, NonnullRefPtr<VMObject> vmobject, size_t offs
VERIFY((m_range.size() % PAGE_SIZE) == 0);
m_vmobject->add_region(*this);
register_purgeable_page_ranges();
MM.register_region(*this);
}
Region::~Region()
{
m_vmobject->remove_region(*this);
unregister_purgeable_page_ranges();
// Make sure we disable interrupts so we don't get interrupted between unmapping and unregistering.
// Unmapping the region will give the VM back to the RangeAllocator, so an interrupt handler would
@ -55,22 +52,6 @@ Region::~Region()
MM.unregister_region(*this);
}
void Region::register_purgeable_page_ranges()
{
if (m_vmobject->is_anonymous()) {
auto& vmobject = static_cast<AnonymousVMObject&>(*m_vmobject);
vmobject.register_purgeable_page_ranges(*this);
}
}
void Region::unregister_purgeable_page_ranges()
{
if (m_vmobject->is_anonymous()) {
auto& vmobject = static_cast<AnonymousVMObject&>(*m_vmobject);
vmobject.unregister_purgeable_page_ranges(*this);
}
}
OwnPtr<Region> Region::clone(Process& new_owner)
{
VERIFY(Process::current());
@ -89,8 +70,6 @@ OwnPtr<Region> Region::clone(Process& new_owner)
dbgln("Region::clone: Unable to allocate new Region");
return nullptr;
}
if (m_vmobject->is_anonymous())
region->copy_purgeable_page_ranges(*this);
region->set_mmap(m_mmap);
region->set_shared(m_shared);
region->set_syscall_region(is_syscall_region());
@ -112,8 +91,6 @@ OwnPtr<Region> Region::clone(Process& new_owner)
dbgln("Region::clone: Unable to allocate new Region for COW");
return nullptr;
}
if (m_vmobject->is_anonymous())
clone_region->copy_purgeable_page_ranges(*this);
if (m_stack) {
VERIFY(is_readable());
VERIFY(is_writable());
@ -129,55 +106,9 @@ void Region::set_vmobject(NonnullRefPtr<VMObject>&& obj)
{
if (m_vmobject.ptr() == obj.ptr())
return;
unregister_purgeable_page_ranges();
m_vmobject->remove_region(*this);
m_vmobject = move(obj);
m_vmobject->add_region(*this);
register_purgeable_page_ranges();
}
bool Region::is_volatile(VirtualAddress vaddr, size_t size) const
{
if (!m_vmobject->is_anonymous())
return false;
auto offset_in_vmobject = vaddr.get() - (this->vaddr().get() - m_offset_in_vmobject);
size_t first_page_index = page_round_down(offset_in_vmobject) / PAGE_SIZE;
size_t last_page_index = page_round_up(offset_in_vmobject + size) / PAGE_SIZE;
return is_volatile_range({ first_page_index, last_page_index - first_page_index });
}
auto Region::set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, bool& was_purged) -> SetVolatileError
{
was_purged = false;
if (!m_vmobject->is_anonymous())
return SetVolatileError::NotPurgeable;
auto offset_in_vmobject = vaddr.get() - (this->vaddr().get() - m_offset_in_vmobject);
if (is_volatile) {
// If marking pages as volatile, be prudent by not marking
// partial pages volatile to prevent potentially non-volatile
// data to be discarded. So rund up the first page and round
// down the last page.
size_t first_page_index = page_round_up(offset_in_vmobject) / PAGE_SIZE;
size_t last_page_index = page_round_down(offset_in_vmobject + size) / PAGE_SIZE;
if (first_page_index != last_page_index)
add_volatile_range({ first_page_index, last_page_index - first_page_index });
} else {
// If marking pages as non-volatile, round down the first page
// and round up the last page to make sure the beginning and
// end of the range doesn't inadvertedly get discarded.
size_t first_page_index = page_round_down(offset_in_vmobject) / PAGE_SIZE;
size_t last_page_index = page_round_up(offset_in_vmobject + size) / PAGE_SIZE;
switch (remove_volatile_range({ first_page_index, last_page_index - first_page_index }, was_purged)) {
case PurgeablePageRanges::RemoveVolatileError::Success:
case PurgeablePageRanges::RemoveVolatileError::SuccessNoChange:
break;
case PurgeablePageRanges::RemoveVolatileError::OutOfMemory:
return SetVolatileError::OutOfMemory;
}
}
return SetVolatileError::Success;
}
size_t Region::cow_pages() const
@ -279,43 +210,6 @@ bool Region::map_individual_page_impl(size_t page_index)
return true;
}
bool Region::do_remap_vmobject_page_range(size_t page_index, size_t page_count)
{
bool success = true;
if (!m_page_directory)
return success; // not an error, region may have not yet mapped it
if (!translate_vmobject_page_range(page_index, page_count))
return success; // not an error, region doesn't map this page range
ScopedSpinLock page_lock(m_page_directory->get_lock());
size_t index = page_index;
while (index < page_index + page_count) {
if (!map_individual_page_impl(index)) {
success = false;
break;
}
index++;
}
if (index > page_index)
MM.flush_tlb(m_page_directory, vaddr_from_page_index(page_index), index - page_index);
return success;
}
bool Region::remap_vmobject_page_range(size_t page_index, size_t page_count)
{
bool success = true;
auto& vmobject = this->vmobject();
if (vmobject.is_shared_by_multiple_regions()) {
vmobject.for_each_region([&](auto& region) {
if (!region.do_remap_vmobject_page_range(page_index, page_count))
success = false;
});
} else {
if (!do_remap_vmobject_page_range(page_index, page_count))
success = false;
}
return success;
}
bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
{
ScopedSpinLock lock(vmobject().m_lock);
@ -428,7 +322,7 @@ PageFaultResponse Region::handle_fault(PageFault const& fault)
if (page_slot->is_lazy_committed_page()) {
auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
VERIFY(m_vmobject->is_anonymous());
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({}, page_index_in_vmobject);
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
remap_vmobject_page(page_index_in_vmobject);
return PageFaultResponse::Continue;
}
@ -472,7 +366,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
if (page_slot->is_lazy_committed_page()) {
VERIFY(m_vmobject->is_anonymous());
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({}, page_index_in_vmobject);
page_slot = static_cast<AnonymousVMObject&>(*m_vmobject).allocate_committed_page({});
dbgln_if(PAGE_FAULT_DEBUG, " >> ALLOCATED COMMITTED {}", page_slot->paddr());
} else {
page_slot = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);

View file

@ -17,7 +17,6 @@
#include <Kernel/Sections.h>
#include <Kernel/UnixTypes.h>
#include <Kernel/VM/PageFaultResponse.h>
#include <Kernel/VM/PurgeablePageRanges.h>
#include <Kernel/VM/RangeAllocator.h>
namespace Kernel {
@ -28,8 +27,7 @@ enum class ShouldFlushTLB {
};
class Region final
: public Weakable<Region>
, public PurgeablePageRanges {
: public Weakable<Region> {
friend class MemoryManager;
MAKE_SLAB_ALLOCATED(Region)
@ -201,15 +199,11 @@ public:
void remap();
bool remap_vmobject_page_range(size_t page_index, size_t page_count);
bool is_volatile(VirtualAddress vaddr, size_t size) const;
enum class SetVolatileError {
Success = 0,
NotPurgeable,
OutOfMemory
};
SetVolatileError set_volatile(VirtualAddress vaddr, size_t size, bool is_volatile, bool& was_purged);
RefPtr<Process> get_owner();
@ -219,7 +213,8 @@ public:
private:
Region(Range const&, NonnullRefPtr<VMObject>, size_t offset_in_vmobject, OwnPtr<KString>, Region::Access access, Cacheable, bool shared);
bool do_remap_vmobject_page_range(size_t page_index, size_t page_count);
bool remap_vmobject_page(size_t page_index, bool with_flush = true);
bool do_remap_vmobject_page(size_t page_index, bool with_flush = true);
void set_access_bit(Access access, bool b)
{
@ -229,18 +224,12 @@ private:
m_access &= ~access;
}
bool do_remap_vmobject_page(size_t index, bool with_flush = true);
bool remap_vmobject_page(size_t index, bool with_flush = true);
PageFaultResponse handle_cow_fault(size_t page_index);
PageFaultResponse handle_inode_fault(size_t page_index);
PageFaultResponse handle_zero_fault(size_t page_index);
bool map_individual_page_impl(size_t page_index);
void register_purgeable_page_ranges();
void unregister_purgeable_page_ranges();
RefPtr<PageDirectory> m_page_directory;
Range m_range;
size_t m_offset_in_vmobject { 0 };

View file

@ -414,7 +414,10 @@ size_t Space::amount_purgeable_volatile() const
ScopedSpinLock lock(m_lock);
size_t amount = 0;
for (auto& region : m_regions) {
if (region->vmobject().is_anonymous() && static_cast<const AnonymousVMObject&>(region->vmobject()).is_any_volatile())
if (!region->vmobject().is_anonymous())
continue;
auto const& vmobject = static_cast<AnonymousVMObject const&>(region->vmobject());
if (vmobject.is_purgeable() && vmobject.is_volatile())
amount += region->amount_resident();
}
return amount;
@ -425,7 +428,10 @@ size_t Space::amount_purgeable_nonvolatile() const
ScopedSpinLock lock(m_lock);
size_t amount = 0;
for (auto& region : m_regions) {
if (region->vmobject().is_anonymous() && !static_cast<const AnonymousVMObject&>(region->vmobject()).is_any_volatile())
if (!region->vmobject().is_anonymous())
continue;
auto const& vmobject = static_cast<AnonymousVMObject const&>(region->vmobject());
if (vmobject.is_purgeable() && !vmobject.is_volatile())
amount += region->amount_resident();
}
return amount;

View file

@ -158,7 +158,7 @@ extern "C" {
static void* os_alloc(size_t size, const char* name)
{
auto* ptr = serenity_mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, 0, 0, ChunkedBlock::block_size, name);
auto* ptr = serenity_mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_PURGEABLE, 0, 0, ChunkedBlock::block_size, name);
VERIFY(ptr != MAP_FAILED);
return ptr;
}

View file

@ -18,6 +18,7 @@
#define MAP_STACK 0x40
#define MAP_NORESERVE 0x80
#define MAP_RANDOMIZED 0x100
#define MAP_PURGEABLE 0x200
#define PROT_READ 0x1
#define PROT_WRITE 0x2

View file

@ -544,11 +544,11 @@ void Bitmap::set_volatile()
int rc = madvise(m_data, size_in_bytes(), MADV_SET_NONVOLATILE);
if (rc < 0) {
if (errno == ENOMEM) {
was_purged = was_purged_int;
was_purged = true;
return false;
}
perror("madvise(MADV_SET_NONVOLATILE)");
VERIFY_NOT_REACHED();
}
was_purged = rc != 0;
#endif
@ -574,6 +574,7 @@ Optional<BackingStore> Bitmap::try_allocate_backing_store(BitmapFormat format, I
int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
#ifdef __serenity__
map_flags |= MAP_PURGEABLE;
void* data = mmap_with_name(nullptr, data_size_in_bytes, PROT_READ | PROT_WRITE, map_flags, 0, 0, String::formatted("GraphicsBitmap [{}]", size).characters());
#else
void* data = mmap(nullptr, data_size_in_bytes, PROT_READ | PROT_WRITE, map_flags, 0, 0);