mirror of
https://github.com/freebsd/freebsd-src
synced 2024-10-04 15:40:44 +00:00
pmap: Skip some superpage promotion attempts that will fail
Implement a simple heuristic to skip pointless promotion attempts by pmap_enter_quick_locked() and moea64_enter(). Specifically, when vm_fault() calls pmap_enter_quick() to map neighboring pages at the end of a copy-on-write fault, there is no point in attempting promotion in pmap_enter_quick_locked() and moea64_enter(). Promotion will fail because the base pages have differing protection. Reviewed by: kib Differential Revision: https://reviews.freebsd.org/D45431 MFC after: 1 week
This commit is contained in:
parent
3b35e7ee8d
commit
f1d73aacdc
|
@ -7818,7 +7818,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
|||
* If both the PTP and the reservation are fully populated, then
|
||||
* attempt promotion.
|
||||
*/
|
||||
if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
|
||||
if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
|
||||
(mpte == NULL || mpte->ref_count == NPTEPG) &&
|
||||
(m->flags & PG_FICTITIOUS) == 0 &&
|
||||
vm_reserv_level_iffullpop(m) == 0) {
|
||||
if (pde == NULL)
|
||||
|
|
|
@ -6052,7 +6052,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
|||
* If both the PTP and the reservation are fully populated, then
|
||||
* attempt promotion.
|
||||
*/
|
||||
if ((mpte == NULL || mpte->ref_count == NL3PG) &&
|
||||
if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
|
||||
(mpte == NULL || mpte->ref_count == NL3PG) &&
|
||||
(m->flags & PG_FICTITIOUS) == 0 &&
|
||||
vm_reserv_level_iffullpop(m) == 0) {
|
||||
if (l2 == NULL)
|
||||
|
|
|
@ -4250,7 +4250,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
|||
* If both the PTP and the reservation are fully populated, then
|
||||
* attempt promotion.
|
||||
*/
|
||||
if ((mpte == NULL || mpte->ref_count == NPTEPG) &&
|
||||
if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
|
||||
(mpte == NULL || mpte->ref_count == NPTEPG) &&
|
||||
(m->flags & PG_FICTITIOUS) == 0 &&
|
||||
vm_reserv_level_iffullpop(m) == 0) {
|
||||
if (pde == NULL)
|
||||
|
|
|
@ -1755,10 +1755,14 @@ moea64_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
|||
* If the VA of the entered page is not aligned with its PA,
|
||||
* don't try page promotion as it is not possible.
|
||||
* This reduces the number of promotion failures dramatically.
|
||||
*
|
||||
* Ignore VM_PROT_NO_PROMOTE unless PMAP_ENTER_QUICK_LOCKED.
|
||||
*/
|
||||
if (moea64_ps_enabled(pmap) && pmap != kernel_pmap && pvo != NULL &&
|
||||
(pvo->pvo_vaddr & PVO_MANAGED) != 0 &&
|
||||
(va & HPT_SP_MASK) == (pa & HPT_SP_MASK) &&
|
||||
((prot & VM_PROT_NO_PROMOTE) == 0 ||
|
||||
(flags & PMAP_ENTER_QUICK_LOCKED) == 0) &&
|
||||
(m->flags & PG_FICTITIOUS) == 0 &&
|
||||
vm_reserv_level_iffullpop(m) == 0)
|
||||
moea64_sp_promote(pmap, va, m);
|
||||
|
@ -1850,8 +1854,9 @@ moea64_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m,
|
|||
vm_prot_t prot)
|
||||
{
|
||||
|
||||
moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
|
||||
PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED, 0);
|
||||
moea64_enter(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE |
|
||||
VM_PROT_NO_PROMOTE), PMAP_ENTER_NOSLEEP | PMAP_ENTER_QUICK_LOCKED,
|
||||
0);
|
||||
}
|
||||
|
||||
vm_paddr_t
|
||||
|
|
|
@ -3519,7 +3519,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
|
|||
* If both the PTP and the reservation are fully populated, then attempt
|
||||
* promotion.
|
||||
*/
|
||||
if ((mpte == NULL || mpte->ref_count == Ln_ENTRIES) &&
|
||||
if ((prot & VM_PROT_NO_PROMOTE) == 0 &&
|
||||
(mpte == NULL || mpte->ref_count == Ln_ENTRIES) &&
|
||||
(m->flags & PG_FICTITIOUS) == 0 &&
|
||||
vm_reserv_level_iffullpop(m) == 0) {
|
||||
if (l2 == NULL)
|
||||
|
|
|
@ -76,6 +76,7 @@ typedef u_char vm_prot_t; /* protection codes */
|
|||
#define VM_PROT_COPY ((vm_prot_t) 0x08) /* copy-on-read */
|
||||
#define VM_PROT_PRIV_FLAG ((vm_prot_t) 0x10)
|
||||
#define VM_PROT_FAULT_LOOKUP VM_PROT_PRIV_FLAG
|
||||
#define VM_PROT_NO_PROMOTE VM_PROT_PRIV_FLAG
|
||||
#define VM_PROT_QUICK_NOFAULT VM_PROT_PRIV_FLAG /* same to save bits */
|
||||
|
||||
#define VM_PROT_ALL (VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE)
|
||||
|
|
|
@ -1891,6 +1891,7 @@ vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
|
|||
vm_offset_t addr, starta;
|
||||
vm_pindex_t pindex;
|
||||
vm_page_t m;
|
||||
vm_prot_t prot;
|
||||
int i;
|
||||
|
||||
pmap = fs->map->pmap;
|
||||
|
@ -1906,6 +1907,14 @@ vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
|
|||
if (starta < entry->start)
|
||||
starta = entry->start;
|
||||
}
|
||||
prot = entry->protection;
|
||||
|
||||
/*
|
||||
* If pmap_enter() has enabled write access on a nearby mapping, then
|
||||
* don't attempt promotion, because it will fail.
|
||||
*/
|
||||
if ((fs->prot & VM_PROT_WRITE) != 0)
|
||||
prot |= VM_PROT_NO_PROMOTE;
|
||||
|
||||
/*
|
||||
* Generate the sequence of virtual addresses that are candidates for
|
||||
|
@ -1949,7 +1958,7 @@ vm_fault_prefault(const struct faultstate *fs, vm_offset_t addra,
|
|||
}
|
||||
if (vm_page_all_valid(m) &&
|
||||
(m->flags & PG_FICTITIOUS) == 0)
|
||||
pmap_enter_quick(pmap, addr, m, entry->protection);
|
||||
pmap_enter_quick(pmap, addr, m, prot);
|
||||
if (!obj_locked || lobject != entry->object.vm_object)
|
||||
VM_OBJECT_RUNLOCK(lobject);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue