Clearing a page table entry's accessed bit (PG_A) and setting the

page's PG_REFERENCED flag in pmap_protect() can't really be justified.
In contrast to pmap_remove() or pmap_remove_all(), the mapping is not
being destroyed, so the notion that the page was accessed is not lost.
Moreover, clearing the page table entry's accessed bit and setting the
page's PG_REFERENCED flag can throw off the page daemon's activity
count calculation.  Finally, in my tests, I found that 15% of the
atomic memory operations being performed by pmap_protect() were only
to clear PG_A, and not change protection.  This could, by itself, be
fixed, but I don't see the point given the above argument.

Remove a comment from pmap_protect_pde() that is no longer meaningful
after the above change.
This commit is contained in:
Alan Cox 2010-04-25 20:40:45 +00:00
parent 6dbd88581d
commit 0d2e1c3e39
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=207205
2 changed files with 12 additions and 45 deletions

View file

@ -2833,18 +2833,9 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
if (oldpde & PG_MANAGED) {
eva = sva + NBPDR;
for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
va < eva; va += PAGE_SIZE, m++) {
/*
* In contrast to the analogous operation on a 4KB page
* mapping, the mapping's PG_A flag is not cleared and
* the page's PG_REFERENCED flag is not set. The
* reason is that pmap_demote_pde() expects that a 2MB
* page mapping with a stored page table page has PG_A
* set.
*/
va < eva; va += PAGE_SIZE, m++)
if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
}
}
if ((prot & VM_PROT_WRITE) == 0)
newpde &= ~(PG_RW | PG_M);
@ -2953,23 +2944,15 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
obits = pbits = *pte;
if ((pbits & PG_V) == 0)
continue;
if (pbits & PG_MANAGED) {
m = NULL;
if (pbits & PG_A) {
if ((prot & VM_PROT_WRITE) == 0) {
if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
(PG_MANAGED | PG_M | PG_RW)) {
m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
vm_page_flag_set(m, PG_REFERENCED);
pbits &= ~PG_A;
}
if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
if (m == NULL)
m = PHYS_TO_VM_PAGE(pbits &
PG_FRAME);
vm_page_dirty(m);
}
}
if ((prot & VM_PROT_WRITE) == 0)
pbits &= ~(PG_RW | PG_M);
}
if ((prot & VM_PROT_EXECUTE) == 0)
pbits |= pg_nx;

View file

@ -2955,18 +2955,9 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
if (oldpde & PG_MANAGED) {
eva = sva + NBPDR;
for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
va < eva; va += PAGE_SIZE, m++) {
/*
* In contrast to the analogous operation on a 4KB page
* mapping, the mapping's PG_A flag is not cleared and
* the page's PG_REFERENCED flag is not set. The
* reason is that pmap_demote_pde() expects that a 2/4MB
* page mapping with a stored page table page has PG_A
* set.
*/
va < eva; va += PAGE_SIZE, m++)
if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
vm_page_dirty(m);
}
}
if ((prot & VM_PROT_WRITE) == 0)
newpde &= ~(PG_RW | PG_M);
@ -3074,22 +3065,15 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
obits = pbits = *pte;
if ((pbits & PG_V) == 0)
continue;
if (pbits & PG_MANAGED) {
m = NULL;
if (pbits & PG_A) {
if ((prot & VM_PROT_WRITE) == 0) {
if ((pbits & (PG_MANAGED | PG_M | PG_RW)) ==
(PG_MANAGED | PG_M | PG_RW)) {
m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
vm_page_flag_set(m, PG_REFERENCED);
pbits &= ~PG_A;
}
if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
if (m == NULL)
m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
vm_page_dirty(m);
}
}
if ((prot & VM_PROT_WRITE) == 0)
pbits &= ~(PG_RW | PG_M);
}
#ifdef PAE
if ((prot & VM_PROT_EXECUTE) == 0)
pbits |= pg_nx;