Invalidate the mapping before updating its physical address.

Doing so ensures that all threads sharing the pmap have a consistent
view of the mapping.  This fixes the problem described in the commit
log message for r329254 without the overhead of an extra fault in the
common case.  (Once the riscv pmap_enter() implementation is similarly
modified, the workaround added in r329254 can be removed, reducing the
overhead of CoW faults.)

See also r335784 for amd64.  The mips implementation of pmap_enter()
already reused the PV entry from the old mapping.

Reviewed by:	kib, markj
MFC after:	3 weeks
Differential Revision:	https://reviews.freebsd.org/D16199
This commit is contained in:
Alan Cox 2018-07-13 17:12:50 +00:00
parent c96ac12e5c
commit 37657ad5fb
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=336248

View file

@ -2037,6 +2037,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (is_kernel_pmap(pmap))
newpte |= PTE_G;
PMAP_PTE_SET_CACHE_BITS(newpte, pa, m);
if ((m->oflags & VPO_UNMANAGED) == 0)
newpte |= PTE_MANAGED;
mpte = NULL;
@ -2066,8 +2068,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
panic("pmap_enter: invalid page directory, pdir=%p, va=%p",
(void *)pmap->pm_segtab, (void *)va);
}
om = NULL;
origpte = *pte;
KASSERT(!pte_test(&origpte, PTE_D | PTE_RO | PTE_V),
("pmap_enter: modified page not writable: va: %p, pte: %#jx",
(void *)va, (uintmax_t)origpte));
opa = TLBLO_PTE_TO_PA(origpte);
/*
@ -2086,10 +2091,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
PTE_W))
pmap->pm_stats.wired_count--;
KASSERT(!pte_test(&origpte, PTE_D | PTE_RO),
("%s: modified page not writable: va: %p, pte: %#jx",
__func__, (void *)va, (uintmax_t)origpte));
/*
* Remove extra pte reference
*/
@ -2098,8 +2099,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (pte_test(&origpte, PTE_MANAGED)) {
m->md.pv_flags |= PV_TABLE_REF;
om = m;
newpte |= PTE_MANAGED;
if (!pte_test(&newpte, PTE_RO))
vm_page_aflag_set(m, PGA_WRITEABLE);
}
@ -2113,13 +2112,29 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
* handle validating new mapping.
*/
if (opa) {
if (is_kernel_pmap(pmap))
*pte = PTE_G;
else
*pte = 0;
if (pte_test(&origpte, PTE_W))
pmap->pm_stats.wired_count--;
if (pte_test(&origpte, PTE_MANAGED)) {
om = PHYS_TO_VM_PAGE(opa);
if (pte_test(&origpte, PTE_D))
vm_page_dirty(om);
if ((om->md.pv_flags & PV_TABLE_REF) != 0) {
om->md.pv_flags &= ~PV_TABLE_REF;
vm_page_aflag_set(om, PGA_REFERENCED);
}
pv = pmap_pvh_remove(&om->md, pmap, va);
if (!pte_test(&newpte, PTE_MANAGED))
free_pv_entry(pmap, pv);
if ((om->aflags & PGA_WRITEABLE) != 0 &&
TAILQ_EMPTY(&om->md.pv_list))
vm_page_aflag_clear(om, PGA_WRITEABLE);
}
pmap_invalidate_page(pmap, va);
origpte = 0;
if (mpte != NULL) {
mpte->wire_count--;
KASSERT(mpte->wire_count > 0,
@ -2132,17 +2147,16 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
/*
* Enter on the PV list if part of our managed memory.
*/
if ((m->oflags & VPO_UNMANAGED) == 0) {
if (pte_test(&newpte, PTE_MANAGED)) {
m->md.pv_flags |= PV_TABLE_REF;
if (pv == NULL)
if (pv == NULL) {
pv = get_pv_entry(pmap, FALSE);
pv->pv_va = va;
pv->pv_va = va;
}
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
newpte |= PTE_MANAGED;
if (!pte_test(&newpte, PTE_RO))
vm_page_aflag_set(m, PGA_WRITEABLE);
} else if (pv != NULL)
free_pv_entry(pmap, pv);
}
/*
* Increment counters
@ -2163,21 +2177,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
if (origpte != newpte) {
*pte = newpte;
if (pte_test(&origpte, PTE_V)) {
if (pte_test(&origpte, PTE_MANAGED) && opa != pa) {
if (om->md.pv_flags & PV_TABLE_REF)
vm_page_aflag_set(om, PGA_REFERENCED);
om->md.pv_flags &= ~PV_TABLE_REF;
}
KASSERT(opa == pa, ("pmap_enter: invalid update"));
if (pte_test(&origpte, PTE_D)) {
KASSERT(!pte_test(&origpte, PTE_RO),
("pmap_enter: modified page not writable:"
" va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte));
if (pte_test(&origpte, PTE_MANAGED))
vm_page_dirty(om);
vm_page_dirty(m);
}
if (pte_test(&origpte, PTE_MANAGED) &&
TAILQ_EMPTY(&om->md.pv_list))
vm_page_aflag_clear(om, PGA_WRITEABLE);
pmap_update_page(pmap, va, newpte);
}
}