pmap: Convert boolean_t to bool.

Reviewed by:	kib (older version)
Differential Revision:	https://reviews.freebsd.org/D39921
This commit is contained in:
John Baldwin 2024-01-31 14:48:26 -08:00
parent 009d3f66cb
commit 1f1b2286fd
21 changed files with 524 additions and 529 deletions

View file

@ -174,14 +174,14 @@
#define PMAP_MEMDOM 1
#endif
static __inline boolean_t
static __inline bool
pmap_type_guest(pmap_t pmap)
{
return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
}
static __inline boolean_t
static __inline bool
pmap_emulate_ad_bits(pmap_t pmap)
{
@ -309,12 +309,12 @@ pmap_pku_mask_bit(pmap_t pmap)
return (pmap->pm_type == PT_X86 ? X86_PG_PKU_MASK : 0);
}
static __inline boolean_t
static __inline bool
safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
{
if (!pmap_emulate_ad_bits(pmap))
return (TRUE);
return (true);
KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type));
@ -324,16 +324,16 @@ safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
* if the EPT_PG_WRITE bit is set.
*/
if ((pte & EPT_PG_WRITE) != 0)
return (FALSE);
return (false);
/*
* XWR = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set.
*/
if ((pte & EPT_PG_EXECUTE) == 0 ||
((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0))
return (TRUE);
return (true);
else
return (FALSE);
return (false);
}
#ifdef PV_STATS
@ -1280,10 +1280,10 @@ static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
static int pmap_change_props_locked(vm_offset_t va, vm_size_t size,
vm_prot_t prot, int mode, int flags);
static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
static bool pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
static bool pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde,
vm_offset_t va, struct rwlock **lockp);
static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
static bool pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe,
vm_offset_t va);
static int pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot, struct rwlock **lockp);
@ -1307,7 +1307,7 @@ static vm_paddr_t pmap_large_map_kextract(vm_offset_t va);
static bool pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
vm_page_t mpte, struct rwlock **lockp);
#endif
static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
static bool pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
vm_prot_t prot);
static void pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask);
static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva,
@ -1322,10 +1322,10 @@ static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va);
static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
struct spglist *free);
static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
pd_entry_t *pde, struct spglist *free,
struct rwlock **lockp);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m, struct rwlock **lockp);
static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
pd_entry_t newpde);
@ -2482,7 +2482,7 @@ pmap_init(void)
"at physical 1G\n");
for (i = 0; i < atop(0x400000); i++) {
ret = vm_page_blacklist_add(0x40000000 +
ptoa(i), FALSE);
ptoa(i), false);
if (!ret && bootverbose)
printf("page at %#lx already used\n",
0x40000000 + ptoa(i));
@ -2676,7 +2676,7 @@ pmap_swap_pat(pmap_t pmap, pt_entry_t entry)
return (entry);
}
boolean_t
bool
pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
{
@ -2689,7 +2689,7 @@ pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
* caching mode.
*/
int
pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
pmap_cache_bits(pmap_t pmap, int mode, bool is_pde)
{
int cache_bits, pat_flag, pat_idx;
@ -2727,7 +2727,7 @@ pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
}
static int
pmap_cache_mask(pmap_t pmap, boolean_t is_pde)
pmap_cache_mask(pmap_t pmap, bool is_pde)
{
int mask;
@ -3814,7 +3814,7 @@ pmap_flush_cache_phys_range(vm_paddr_t spa, vm_paddr_t epa, vm_memattr_t mattr)
spa = dmaplimit;
}
pte_bits = pmap_cache_bits(kernel_pmap, mattr, 0) | X86_PG_RW |
pte_bits = pmap_cache_bits(kernel_pmap, mattr, false) | X86_PG_RW |
X86_PG_V;
error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK,
&vaddr);
@ -3985,7 +3985,7 @@ pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
int cache_bits;
pte = vtopte(va);
cache_bits = pmap_cache_bits(kernel_pmap, mode, 0);
cache_bits = pmap_cache_bits(kernel_pmap, mode, false);
pte_store(pte, pa | pg_g | pg_nx | X86_PG_A | X86_PG_M |
X86_PG_RW | X86_PG_V | cache_bits);
}
@ -4042,7 +4042,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
endpte = pte + count;
while (pte < endpte) {
m = *ma++;
cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0);
cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, false);
pa = VM_PAGE_TO_PHYS(m) | cache_bits;
if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) {
oldpte |= *pte;
@ -4084,8 +4084,7 @@ pmap_qremove(vm_offset_t sva, int count)
* physical memory manager after the TLB has been updated.
*/
static __inline void
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
boolean_t set_PG_ZERO)
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO)
{
if (set_PG_ZERO)
@ -4141,19 +4140,19 @@ pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
/*
* Decrements a page table page's reference count, which is used to record the
* number of valid page table entries within the page. If the reference count
* drops to zero, then the page table page is unmapped. Returns TRUE if the
* page table page was unmapped and FALSE otherwise.
* drops to zero, then the page table page is unmapped. Returns true if the
* page table page was unmapped and false otherwise.
*/
static inline boolean_t
static inline bool
pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
--m->ref_count;
if (m->ref_count == 0) {
_pmap_unwire_ptp(pmap, va, m, free);
return (TRUE);
return (true);
} else
return (FALSE);
return (false);
}
static void
@ -4217,7 +4216,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
* Put page on a list so that it is released after
* *ALL* TLB shootdown is done
*/
pmap_add_delayed_free_list(m, free, TRUE);
pmap_add_delayed_free_list(m, free, true);
}
/*
@ -4363,14 +4362,14 @@ pmap_pinit_pml5(vm_page_t pml5pg)
*/
pm_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V |
X86_PG_RW | X86_PG_A | X86_PG_M | pg_g |
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE);
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, false);
/*
* Install self-referential address mapping entry.
*/
pm_pml5[PML5PML5I] = VM_PAGE_TO_PHYS(pml5pg) |
X86_PG_RW | X86_PG_V | X86_PG_M | X86_PG_A |
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE);
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, false);
}
static void
@ -4400,7 +4399,7 @@ pmap_pinit_pml5_pti(vm_page_t pml5pgu)
pm_pml5u[pmap_pml5e_index(UPT_MAX_ADDRESS)] =
pmap_kextract((vm_offset_t)pti_pml4) |
X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M | pg_g |
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE);
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, false);
}
/* Allocate a page table page and do related bookkeeping */
@ -5899,7 +5898,7 @@ pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
* Conditionally create the PV entry for a 4KB page mapping if the required
* memory can be allocated without resorting to reclamation.
*/
static boolean_t
static bool
pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
struct rwlock **lockp)
{
@ -5912,9 +5911,9 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
m->md.pv_gen++;
return (TRUE);
return (true);
} else
return (FALSE);
return (false);
}
/*
@ -5962,11 +5961,11 @@ pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
* Tries to demote a 2MB page mapping. If demotion fails, the 2MB page
* mapping is invalidated.
*/
static boolean_t
static bool
pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
{
struct rwlock *lock;
boolean_t rv;
bool rv;
lock = NULL;
rv = pmap_demote_pde_locked(pmap, pde, va, &lock);
@ -6019,7 +6018,7 @@ pmap_demote_pde_abort(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
va, pmap);
}
static boolean_t
static bool
pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
struct rwlock **lockp)
{
@ -6036,7 +6035,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
PG_M = pmap_modified_bit(pmap);
PG_RW = pmap_rw_bit(pmap);
PG_V = pmap_valid_bit(pmap);
PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
PG_PTE_CACHE = pmap_cache_mask(pmap, false);
PG_PKU_MASK = pmap_pku_mask_bit(pmap);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@ -6053,7 +6052,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
KASSERT((oldpde & PG_W) == 0,
("pmap_demote_pde: a wired mapping is missing PG_A"));
pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp);
return (FALSE);
return (false);
}
mpte = pmap_remove_pt_page(pmap, va);
@ -6090,7 +6089,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
*/
if (mpte == NULL) {
pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp);
return (FALSE);
return (false);
}
if (!in_kernel)
@ -6158,7 +6157,7 @@ pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
counter_u64_add(pmap_pde_demotions, 1);
CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx in pmap %p",
va, pmap);
return (TRUE);
return (true);
}
/*
@ -6256,7 +6255,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
KASSERT(mpte->ref_count == NPTEPG,
("pmap_remove_pde: pte page ref count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, free, FALSE);
pmap_add_delayed_free_list(mpte, free, false);
}
}
return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free));
@ -6642,12 +6641,12 @@ pmap_remove_all(vm_page_t m)
/*
* pmap_protect_pde: do the things to protect a 2mpage in a process
*/
static boolean_t
static bool
pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
{
pd_entry_t newpde, oldpde;
vm_page_t m, mt;
boolean_t anychanged;
bool anychanged;
pt_entry_t PG_G, PG_M, PG_RW;
PG_G = pmap_global_bit(pmap);
@ -6657,7 +6656,7 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT((sva & PDRMASK) == 0,
("pmap_protect_pde: sva is not 2mpage aligned"));
anychanged = FALSE;
anychanged = false;
retry:
oldpde = newpde = *pde;
if ((prot & VM_PROT_WRITE) == 0) {
@ -6682,7 +6681,7 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
if ((oldpde & PG_G) != 0)
pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
else
anychanged = TRUE;
anychanged = true;
}
return (anychanged);
}
@ -6701,7 +6700,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
pd_entry_t ptpaddr, *pde;
pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V;
pt_entry_t obits, pbits;
boolean_t anychanged;
bool anychanged;
KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
if (prot == VM_PROT_NONE) {
@ -6717,7 +6716,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
PG_M = pmap_modified_bit(pmap);
PG_V = pmap_valid_bit(pmap);
PG_RW = pmap_rw_bit(pmap);
anychanged = FALSE;
anychanged = false;
/*
* Although this function delays and batches the invalidation
@ -6773,7 +6772,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if (!atomic_cmpset_long(pdpe, obits, pbits))
/* PG_PS cannot be cleared under us, */
goto retry_pdpe;
anychanged = TRUE;
anychanged = true;
}
continue;
}
@ -6805,7 +6804,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
* invalidated by pmap_protect_pde().
*/
if (pmap_protect_pde(pmap, pde, sva, prot))
anychanged = TRUE;
anychanged = true;
continue;
} else if (!pmap_demote_pde(pmap, pde, sva)) {
/*
@ -6842,7 +6841,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
if (obits & PG_G)
pmap_invalidate_page(pmap, sva);
else
anychanged = TRUE;
anychanged = true;
}
}
}
@ -6887,7 +6886,7 @@ pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, vm_page_t mpte,
PG_V = pmap_valid_bit(pmap);
PG_RW = pmap_rw_bit(pmap);
PG_PKU_MASK = pmap_pku_mask_bit(pmap);
PG_PTE_CACHE = pmap_cache_mask(pmap, 0);
PG_PTE_CACHE = pmap_cache_mask(pmap, false);
/*
* Examine the first PTE in the specified PTP. Abort if this PTE is
@ -7158,7 +7157,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
vm_paddr_t opa, pa;
vm_page_t mpte, om;
int rv;
boolean_t nosleep;
bool nosleep;
PG_A = pmap_accessed_bit(pmap);
PG_G = pmap_global_bit(pmap);
@ -7434,8 +7433,8 @@ pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
PG_V = pmap_valid_bit(pmap);
newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) |
PG_PS | PG_V;
newpde = VM_PAGE_TO_PHYS(m) |
pmap_cache_bits(pmap, m->md.pat_mode, true) | PG_PS | PG_V;
if ((m->oflags & VPO_UNMANAGED) == 0)
newpde |= PG_MANAGED;
if ((prot & VM_PROT_EXECUTE) == 0)
@ -7804,7 +7803,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
pmap_resident_count_adj(pmap, 1);
newpte = VM_PAGE_TO_PHYS(m) | PG_V |
pmap_cache_bits(pmap, m->md.pat_mode, 0);
pmap_cache_bits(pmap, m->md.pat_mode, false);
if ((m->oflags & VPO_UNMANAGED) == 0)
newpte |= PG_MANAGED;
if ((prot & VM_PROT_EXECUTE) == 0)
@ -7914,7 +7913,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
* will not affect the termination of this loop.
*/
PMAP_LOCK(pmap);
for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1);
for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, true);
pa < ptepa + size; pa += NBPDR) {
pde = pmap_alloc_pde(pmap, addr, &pdpg, NULL);
if (pde == NULL) {
@ -8322,7 +8321,7 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
vm_page_t pages[2];
vm_offset_t vaddr[2], a_pg_offset, b_pg_offset;
int cnt;
boolean_t mapped;
bool mapped;
while (xfersize > 0) {
a_pg_offset = a_offset & PAGE_MASK;
@ -8331,12 +8330,12 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
pages[1] = mb[b_offset >> PAGE_SHIFT];
cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
cnt = min(cnt, PAGE_SIZE - b_pg_offset);
mapped = pmap_map_io_transient(pages, vaddr, 2, FALSE);
mapped = pmap_map_io_transient(pages, vaddr, 2, false);
a_cp = (char *)vaddr[0] + a_pg_offset;
b_cp = (char *)vaddr[1] + b_pg_offset;
bcopy(a_cp, b_cp, cnt);
if (__predict_false(mapped))
pmap_unmap_io_transient(pages, vaddr, 2, FALSE);
pmap_unmap_io_transient(pages, vaddr, 2, false);
a_offset += cnt;
b_offset += cnt;
xfersize -= cnt;
@ -8350,23 +8349,23 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
* is only necessary that true be returned for a small
* subset of pmaps for proper page aging.
*/
boolean_t
bool
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
struct md_page *pvh;
struct rwlock *lock;
pv_entry_t pv;
int loops = 0;
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_page_exists_quick: page %p is not managed", m));
rv = FALSE;
rv = false;
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -8377,7 +8376,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -8455,17 +8454,17 @@ pmap_page_wired_mappings(vm_page_t m)
}
/*
* Returns TRUE if the given page is mapped individually or as part of
* a 2mpage. Otherwise, returns FALSE.
* Returns true if the given page is mapped individually or as part of
* a 2mpage. Otherwise, returns false.
*/
boolean_t
bool
pmap_page_is_mapped(vm_page_t m)
{
struct rwlock *lock;
boolean_t rv;
bool rv;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (FALSE);
return (false);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
@ -8519,7 +8518,7 @@ pmap_remove_pages(pmap_t pmap)
#ifdef PV_STATS
int freed;
#endif
boolean_t superpage;
bool superpage;
vm_paddr_t pa;
/*
@ -8569,7 +8568,7 @@ pmap_remove_pages(pmap_t pmap)
pte = pmap_pdpe_to_pde(pte, pv->pv_va);
tpte = *pte;
if ((tpte & (PG_PS | PG_V)) == PG_V) {
superpage = FALSE;
superpage = false;
ptepde = tpte;
pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
PG_FRAME);
@ -8586,7 +8585,7 @@ pmap_remove_pages(pmap_t pmap)
* regular page could be mistaken for
* a superpage.
*/
superpage = TRUE;
superpage = true;
}
if ((tpte & PG_V) == 0) {
@ -8660,7 +8659,7 @@ pmap_remove_pages(pmap_t pmap)
KASSERT(mpte->ref_count == NPTEPG,
("pmap_remove_pages: pte page reference count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, &free, FALSE);
pmap_add_delayed_free_list(mpte, &free, false);
}
} else {
pmap_resident_count_adj(pmap, -1);
@ -8697,8 +8696,8 @@ pmap_remove_pages(pmap_t pmap)
vm_page_free_pages_toq(&free, true);
}
static boolean_t
pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
static bool
pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified)
{
struct rwlock *lock;
pv_entry_t pv;
@ -8707,9 +8706,9 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
pt_entry_t PG_A, PG_M, PG_RW, PG_V;
pmap_t pmap;
int md_gen, pvh_gen;
boolean_t rv;
bool rv;
rv = FALSE;
rv = false;
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
restart:
@ -8787,7 +8786,7 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
* Return whether or not the specified physical page was modified
* in any physical maps.
*/
boolean_t
bool
pmap_is_modified(vm_page_t m)
{
@ -8798,8 +8797,8 @@ pmap_is_modified(vm_page_t m)
* If the page is not busied then this check is racy.
*/
if (!pmap_page_is_write_mapped(m))
return (FALSE);
return (pmap_page_test_mappings(m, FALSE, TRUE));
return (false);
return (pmap_page_test_mappings(m, false, true));
}
/*
@ -8808,20 +8807,20 @@ pmap_is_modified(vm_page_t m)
* Return whether or not the specified virtual address is eligible
* for prefault.
*/
boolean_t
bool
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
pd_entry_t *pde;
pt_entry_t *pte, PG_V;
boolean_t rv;
bool rv;
PG_V = pmap_valid_bit(pmap);
/*
* Return TRUE if and only if the PTE for the specified virtual
* Return true if and only if the PTE for the specified virtual
* address is allocated but invalid.
*/
rv = FALSE;
rv = false;
PMAP_LOCK(pmap);
pde = pmap_pde(pmap, addr);
if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) {
@ -8838,13 +8837,13 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
boolean_t
bool
pmap_is_referenced(vm_page_t m)
{
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_referenced: page %p is not managed", m));
return (pmap_page_test_mappings(m, TRUE, FALSE));
return (pmap_page_test_mappings(m, true, false));
}
/*
@ -8966,7 +8965,7 @@ pmap_ts_referenced(vm_page_t m)
vm_paddr_t pa;
int cleared, md_gen, not_cleared, pvh_gen;
struct spglist free;
boolean_t demoted;
bool demoted;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_ts_referenced: page %p is not managed", m));
@ -9034,7 +9033,7 @@ pmap_ts_referenced(vm_page_t m)
if (safe_to_clear_referenced(pmap, oldpde)) {
atomic_clear_long(pde, PG_A);
pmap_invalidate_page(pmap, pv->pv_va);
demoted = FALSE;
demoted = false;
} else if (pmap_demote_pde_locked(pmap, pde,
pv->pv_va, &lock)) {
/*
@ -9045,7 +9044,7 @@ pmap_ts_referenced(vm_page_t m)
* this removal never frees a page
* table page.
*/
demoted = TRUE;
demoted = true;
va += VM_PAGE_TO_PHYS(m) - (oldpde &
PG_PS_FRAME);
pte = pmap_pde_to_pte(pde, va);
@ -9053,7 +9052,7 @@ pmap_ts_referenced(vm_page_t m)
NULL, &lock);
pmap_invalidate_page(pmap, va);
} else
demoted = TRUE;
demoted = true;
if (demoted) {
/*
@ -9544,7 +9543,7 @@ pmap_unmapdev(void *p, vm_size_t size)
/*
* Tries to demote a 1GB page mapping.
*/
static boolean_t
static bool
pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
{
pdp_entry_t newpdpe, oldpdpe;
@ -9567,7 +9566,7 @@ pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
if (pdpg == NULL) {
CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
" in pmap %p", va, pmap);
return (FALSE);
return (false);
}
pdpgpa = VM_PAGE_TO_PHYS(pdpg);
firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa);
@ -9599,7 +9598,7 @@ pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va)
counter_u64_add(pmap_pdpe_demotions, 1);
CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
" in pmap %p", va, pmap);
return (TRUE);
return (true);
}
/*
@ -9942,12 +9941,12 @@ pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot,
* is not mandatory. The caller may, however, request a TLB invalidation.
*/
void
pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, bool invalidate)
{
pdp_entry_t *pdpe;
pd_entry_t *pde;
vm_offset_t va;
boolean_t changed;
bool changed;
if (len == 0)
return;
@ -9956,7 +9955,7 @@ pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
("pmap_demote_DMAP: base is not a multiple of len"));
if (len < NBPDP && base < dmaplimit) {
va = PHYS_TO_DMAP(base);
changed = FALSE;
changed = false;
PMAP_LOCK(kernel_pmap);
pdpe = pmap_pdpe(kernel_pmap, va);
if ((*pdpe & X86_PG_V) == 0)
@ -9964,7 +9963,7 @@ pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
if ((*pdpe & PG_PS) != 0) {
if (!pmap_demote_pdpe(kernel_pmap, pdpe, va))
panic("pmap_demote_DMAP: PDPE failed");
changed = TRUE;
changed = true;
}
if (len < NBPDR) {
pde = pmap_pdpe_to_pde(pdpe, va);
@ -9973,7 +9972,7 @@ pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate)
if ((*pde & PG_PS) != 0) {
if (!pmap_demote_pde(kernel_pmap, pde, va))
panic("pmap_demote_DMAP: PDE failed");
changed = TRUE;
changed = true;
}
}
if (changed && invalidate)
@ -10581,7 +10580,7 @@ pmap_quick_enter_page(vm_page_t m)
invlpg(qframe);
pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A |
X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0));
X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, false));
return (qframe);
}
@ -10794,7 +10793,7 @@ pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
MPASS(*pdpe == 0);
*pdpe = pa | pg_g | X86_PG_PS | X86_PG_RW |
X86_PG_V | X86_PG_A | pg_nx |
pmap_cache_bits(kernel_pmap, mattr, TRUE);
pmap_cache_bits(kernel_pmap, mattr, true);
inc = NBPDP;
} else if (len >= NBPDR && (pa & PDRMASK) == 0 &&
(va & PDRMASK) == 0) {
@ -10802,7 +10801,7 @@ pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
MPASS(*pde == 0);
*pde = pa | pg_g | X86_PG_PS | X86_PG_RW |
X86_PG_V | X86_PG_A | pg_nx |
pmap_cache_bits(kernel_pmap, mattr, TRUE);
pmap_cache_bits(kernel_pmap, mattr, true);
PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))->
ref_count++;
inc = NBPDR;
@ -10811,7 +10810,7 @@ pmap_large_map(vm_paddr_t spa, vm_size_t len, void **addr,
MPASS(*pte == 0);
*pte = pa | pg_g | X86_PG_RW | X86_PG_V |
X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap,
mattr, FALSE);
mattr, false);
PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte))->
ref_count++;
inc = PAGE_SIZE;
@ -11325,7 +11324,7 @@ pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva, bool exec)
pa = pmap_kextract(sva);
ptev = pa | X86_PG_RW | X86_PG_V | X86_PG_A | X86_PG_G |
(exec ? 0 : pg_nx) | pmap_cache_bits(kernel_pmap,
VM_MEMATTR_DEFAULT, FALSE);
VM_MEMATTR_DEFAULT, false);
if (*pte == 0) {
pte_store(pte, ptev);
pmap_pti_wire_pte(pte);

View file

@ -443,10 +443,10 @@ void pmap_activate_boot(pmap_t pmap);
void pmap_activate_sw(struct thread *);
void pmap_allow_2m_x_ept_recalculate(void);
void pmap_bootstrap(vm_paddr_t *);
int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde);
int pmap_cache_bits(pmap_t pmap, int mode, bool is_pde);
int pmap_change_attr(vm_offset_t, vm_size_t, int);
int pmap_change_prot(vm_offset_t, vm_size_t, vm_prot_t);
void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate);
void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, bool invalidate);
void pmap_flush_cache_range(vm_offset_t, vm_offset_t);
void pmap_flush_cache_phys_range(vm_paddr_t, vm_paddr_t, vm_memattr_t);
void pmap_init_pat(void);
@ -462,7 +462,7 @@ void *pmap_mapdev(vm_paddr_t, vm_size_t);
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
void *pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size);
bool pmap_not_in_di(void);
boolean_t pmap_page_is_mapped(vm_page_t m);
bool pmap_page_is_mapped(vm_page_t m);
void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
void pmap_page_set_memattr_noflush(vm_page_t m, vm_memattr_t ma);
void pmap_pinit_pml4(vm_page_t);

View file

@ -324,7 +324,7 @@ static struct mtx PMAP2mutex;
#define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */
static __inline void pt2_wirecount_init(vm_page_t m);
static boolean_t pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p,
static bool pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p,
vm_offset_t va);
static int pmap_enter_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1,
u_int flags, vm_page_t m);
@ -397,7 +397,7 @@ CTASSERT(VM_MEMATTR_SO == 3);
CTASSERT(VM_MEMATTR_WRITE_THROUGH == 4);
#define VM_MEMATTR_END (VM_MEMATTR_WRITE_THROUGH + 1)
boolean_t
bool
pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
{
@ -2294,15 +2294,15 @@ pmap_pinit(pmap_t pmap)
}
#ifdef INVARIANTS
static boolean_t
static bool
pt2tab_user_is_empty(pt2_entry_t *tab)
{
u_int i, end;
end = pt2tab_index(VM_MAXUSER_ADDRESS);
for (i = 0; i < end; i++)
if (tab[i] != 0) return (FALSE);
return (TRUE);
if (tab[i] != 0) return (false);
return (true);
}
#endif
/*
@ -2437,14 +2437,14 @@ pt2_wirecount_get(vm_page_t m, uint32_t pte1_idx)
return (m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]);
}
static __inline boolean_t
static __inline bool
pt2_is_empty(vm_page_t m, vm_offset_t va)
{
return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 0);
}
static __inline boolean_t
static __inline bool
pt2_is_full(vm_page_t m, vm_offset_t va)
{
@ -2452,7 +2452,7 @@ pt2_is_full(vm_page_t m, vm_offset_t va)
NPTE2_IN_PT2);
}
static __inline boolean_t
static __inline bool
pt2pg_is_empty(vm_page_t m)
{
@ -2645,10 +2645,10 @@ pmap_unwire_pt2pg(pmap_t pmap, vm_offset_t va, vm_page_t m)
/*
* Decrements a L2 page table page's wire count, which is used to record the
* number of valid page table entries within the page. If the wire count
* drops to zero, then the page table page is unmapped. Returns TRUE if the
* page table page was unmapped and FALSE otherwise.
* drops to zero, then the page table page is unmapped. Returns true if the
* page table page was unmapped and false otherwise.
*/
static __inline boolean_t
static __inline bool
pmap_unwire_pt2(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
pt2_wirecount_dec(m, pte1_index(va));
@ -2661,9 +2661,9 @@ pmap_unwire_pt2(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
*/
pmap_unwire_pt2pg(pmap, va, m);
pmap_add_delayed_free_list(m, free);
return (TRUE);
return (true);
} else
return (FALSE);
return (false);
}
/*
@ -2716,14 +2716,14 @@ pmap_unwire_pt2_all(pmap_t pmap, vm_offset_t va, vm_page_t m,
* After removing a L2 page table entry, this routine is used to
* conditionally free the page, and manage the hold/wire counts.
*/
static boolean_t
static bool
pmap_unuse_pt2(pmap_t pmap, vm_offset_t va, struct spglist *free)
{
pt1_entry_t pte1;
vm_page_t mpte;
if (va >= VM_MAXUSER_ADDRESS)
return (FALSE);
return (false);
pte1 = pte1_load(pmap_pte1(pmap, va));
mpte = PHYS_TO_VM_PAGE(pte1_link_pa(pte1));
return (pmap_unwire_pt2(pmap, va, mpte, free));
@ -2993,7 +2993,7 @@ free_pv_entry(pmap_t pmap, pv_entry_t pv)
* when needed.
*/
static pv_entry_t
get_pv_entry(pmap_t pmap, boolean_t try)
get_pv_entry(pmap_t pmap, bool try)
{
static const struct timeval printinterval = { 60, 0 };
static struct timeval lastprint;
@ -3077,7 +3077,7 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
rw_assert(&pvh_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
pv = get_pv_entry(pmap, FALSE);
pv = get_pv_entry(pmap, false);
pv->pv_va = va;
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
}
@ -3193,7 +3193,7 @@ pmap_pv_promote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
/*
* Conditionally create a pv entry.
*/
static boolean_t
static bool
pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
pv_entry_t pv;
@ -3201,12 +3201,12 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
rw_assert(&pvh_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
if (pv_entry_count < pv_entry_high_water &&
(pv = get_pv_entry(pmap, TRUE)) != NULL) {
(pv = get_pv_entry(pmap, true)) != NULL) {
pv->pv_va = va;
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
return (TRUE);
return (true);
} else
return (FALSE);
return (false);
}
/*
@ -3662,7 +3662,7 @@ pmap_fill_pt2(pt2_entry_t *fpte2p, pt2_entry_t npte2)
* Tries to demote a 1MB page mapping. If demotion fails, the
* 1MB page mapping is invalidated.
*/
static boolean_t
static bool
pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
{
pt1_entry_t opte1, npte1;
@ -3696,7 +3696,7 @@ pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
vm_page_free_pages_toq(&free, false);
CTR3(KTR_PMAP, "%s: failure for va %#x in pmap %p",
__func__, va, pmap);
return (FALSE);
return (false);
}
m->pindex = pte1_index(va) & ~PT2PG_MASK;
if (va < VM_MAXUSER_ADDRESS)
@ -3819,7 +3819,7 @@ pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va)
PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n",
__func__, pmap, va, npte1, pte1_load(pte1p), pte1p));
return (TRUE);
return (true);
}
/*
@ -3975,7 +3975,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
*/
if ((m->oflags & VPO_UNMANAGED) == 0) {
if (pv == NULL) {
pv = get_pv_entry(pmap, FALSE);
pv = get_pv_entry(pmap, false);
pv->pv_va = va;
}
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
@ -4399,7 +4399,7 @@ pmap_remove_pages(pmap_t pmap)
int field, idx;
int32_t bit;
uint32_t inuse, bitmask;
boolean_t allfree;
bool allfree;
/*
* Assert that the given pmap is only active on the current
@ -4427,7 +4427,7 @@ pmap_remove_pages(pmap_t pmap)
TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
KASSERT(pc->pc_pmap == pmap, ("%s: wrong pmap %p %p",
__func__, pmap, pc->pc_pmap));
allfree = TRUE;
allfree = true;
for (field = 0; field < _NPCM; field++) {
inuse = (~(pc->pc_map[field])) & pc_freemask[field];
while (inuse != 0) {
@ -4445,7 +4445,7 @@ pmap_remove_pages(pmap_t pmap)
pte1 = pte1_load(pte1p);
if (pte1_is_section(pte1)) {
if (pte1_is_wired(pte1)) {
allfree = FALSE;
allfree = false;
continue;
}
pte1_clear(pte1p);
@ -4464,7 +4464,7 @@ pmap_remove_pages(pmap_t pmap)
}
if (pte2_is_wired(pte2)) {
allfree = FALSE;
allfree = false;
continue;
}
pte2_clear(pte2p);
@ -4932,7 +4932,7 @@ pmap_protect_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva,
void
pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
{
boolean_t pv_lists_locked;
bool pv_lists_locked;
vm_offset_t nextva;
pt1_entry_t *pte1p, pte1;
pt2_entry_t *pte2p, opte2, npte2;
@ -4948,9 +4948,9 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
return;
if (pmap_is_current(pmap))
pv_lists_locked = FALSE;
pv_lists_locked = false;
else {
pv_lists_locked = TRUE;
pv_lists_locked = true;
resume:
rw_wlock(&pvh_global_lock);
sched_pin();
@ -4985,7 +4985,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
continue;
} else {
if (!pv_lists_locked) {
pv_lists_locked = TRUE;
pv_lists_locked = true;
if (!rw_try_wlock(&pvh_global_lock)) {
PMAP_UNLOCK(pmap);
goto resume;
@ -5118,21 +5118,21 @@ pmap_page_wired_mappings(vm_page_t m)
}
/*
* Returns TRUE if any of the given mappings were used to modify
* physical memory. Otherwise, returns FALSE. Both page and 1mpage
* Returns true if any of the given mappings were used to modify
* physical memory. Otherwise, returns false. Both page and 1mpage
* mappings are supported.
*/
static boolean_t
static bool
pmap_is_modified_pvh(struct md_page *pvh)
{
pv_entry_t pv;
pt1_entry_t pte1;
pt2_entry_t pte2;
pmap_t pmap;
boolean_t rv;
bool rv;
rw_assert(&pvh_global_lock, RA_WLOCKED);
rv = FALSE;
rv = false;
sched_pin();
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
@ -5160,10 +5160,10 @@ pmap_is_modified_pvh(struct md_page *pvh)
* Return whether or not the specified physical page was modified
* in any physical maps.
*/
boolean_t
bool
pmap_is_modified(vm_page_t m)
{
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("%s: page %p is not managed", __func__, m));
@ -5172,7 +5172,7 @@ pmap_is_modified(vm_page_t m)
* If the page is not busied then this check is racy.
*/
if (!pmap_page_is_write_mapped(m))
return (FALSE);
return (false);
rw_wlock(&pvh_global_lock);
rv = pmap_is_modified_pvh(&m->md) ||
((m->flags & PG_FICTITIOUS) == 0 &&
@ -5187,14 +5187,14 @@ pmap_is_modified(vm_page_t m)
* Return whether or not the specified virtual address is eligible
* for prefault.
*/
boolean_t
bool
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
pt1_entry_t pte1;
pt2_entry_t pte2;
boolean_t rv;
bool rv;
rv = FALSE;
rv = false;
PMAP_LOCK(pmap);
pte1 = pte1_load(pmap_pte1(pmap, addr));
if (pte1_is_link(pte1)) {
@ -5206,10 +5206,10 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
}
/*
* Returns TRUE if any of the given mappings were referenced and FALSE
* Returns true if any of the given mappings were referenced and false
* otherwise. Both page and 1mpage mappings are supported.
*/
static boolean_t
static bool
pmap_is_referenced_pvh(struct md_page *pvh)
{
@ -5217,10 +5217,10 @@ pmap_is_referenced_pvh(struct md_page *pvh)
pt1_entry_t pte1;
pt2_entry_t pte2;
pmap_t pmap;
boolean_t rv;
bool rv;
rw_assert(&pvh_global_lock, RA_WLOCKED);
rv = FALSE;
rv = false;
sched_pin();
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
@ -5246,10 +5246,10 @@ pmap_is_referenced_pvh(struct md_page *pvh)
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
boolean_t
bool
pmap_is_referenced(vm_page_t m)
{
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("%s: page %p is not managed", __func__, m));
@ -5396,12 +5396,12 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
vm_offset_t nextva;
pt1_entry_t *pte1p, pte1;
pt2_entry_t *pte2p, pte2;
boolean_t pv_lists_locked;
bool pv_lists_locked;
if (pmap_is_current(pmap))
pv_lists_locked = FALSE;
pv_lists_locked = false;
else {
pv_lists_locked = TRUE;
pv_lists_locked = true;
resume:
rw_wlock(&pvh_global_lock);
sched_pin();
@ -5437,7 +5437,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
continue;
} else {
if (!pv_lists_locked) {
pv_lists_locked = TRUE;
pv_lists_locked = true;
if (!rw_try_wlock(&pvh_global_lock)) {
PMAP_UNLOCK(pmap);
/* Repeat sva. */
@ -5559,14 +5559,14 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
pt2_entry_t *pte2p, pte2;
vm_offset_t pdnxt;
vm_page_t m;
boolean_t pv_lists_locked;
bool pv_lists_locked;
if (advice != MADV_DONTNEED && advice != MADV_FREE)
return;
if (pmap_is_current(pmap))
pv_lists_locked = FALSE;
pv_lists_locked = false;
else {
pv_lists_locked = TRUE;
pv_lists_locked = true;
resume:
rw_wlock(&pvh_global_lock);
sched_pin();
@ -5584,7 +5584,7 @@ pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
if (!pte1_is_managed(opte1))
continue;
if (!pv_lists_locked) {
pv_lists_locked = TRUE;
pv_lists_locked = true;
if (!rw_try_wlock(&pvh_global_lock)) {
PMAP_UNLOCK(pmap);
goto resume;
@ -5769,16 +5769,16 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
*/
/*
* Returns TRUE if the given page is mapped individually or as part of
* a 1mpage. Otherwise, returns FALSE.
* Returns true if the given page is mapped individually or as part of
* a 1mpage. Otherwise, returns false.
*/
boolean_t
bool
pmap_page_is_mapped(vm_page_t m)
{
boolean_t rv;
bool rv;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (FALSE);
return (false);
rw_wlock(&pvh_global_lock);
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
((m->flags & PG_FICTITIOUS) == 0 &&
@ -5794,21 +5794,21 @@ pmap_page_is_mapped(vm_page_t m)
* is only necessary that true be returned for a small
* subset of pmaps for proper page aging.
*/
boolean_t
bool
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
struct md_page *pvh;
pv_entry_t pv;
int loops = 0;
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("%s: page %p is not managed", __func__, m));
rv = FALSE;
rv = false;
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -5819,7 +5819,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -6756,7 +6756,7 @@ dump_section(pmap_t pmap, uint32_t pte1_idx)
}
static void
dump_link(pmap_t pmap, uint32_t pte1_idx, boolean_t invalid_ok)
dump_link(pmap_t pmap, uint32_t pte1_idx, bool invalid_ok)
{
uint32_t i;
vm_offset_t va;
@ -6788,14 +6788,14 @@ dump_link(pmap_t pmap, uint32_t pte1_idx, boolean_t invalid_ok)
}
}
static __inline boolean_t
static __inline bool
is_pv_chunk_space(vm_offset_t va)
{
if ((((vm_offset_t)pv_chunkbase) <= va) &&
(va < ((vm_offset_t)pv_chunkbase + PAGE_SIZE * pv_maxchunks)))
return (TRUE);
return (FALSE);
return (true);
return (false);
}
DB_SHOW_COMMAND(pmap, pmap_pmap_print)
@ -6807,7 +6807,7 @@ DB_SHOW_COMMAND(pmap, pmap_pmap_print)
vm_offset_t va, eva;
vm_page_t m;
uint32_t i;
boolean_t invalid_ok, dump_link_ok, dump_pv_chunk;
bool invalid_ok, dump_link_ok, dump_pv_chunk;
if (have_addr) {
pmap_t pm;
@ -6822,7 +6822,7 @@ DB_SHOW_COMMAND(pmap, pmap_pmap_print)
pmap = PCPU_GET(curpmap);
eva = (modif[0] == 'u') ? VM_MAXUSER_ADDRESS : 0xFFFFFFFF;
dump_pv_chunk = FALSE; /* XXX evaluate from modif[] */
dump_pv_chunk = false; /* XXX evaluate from modif[] */
printf("pmap: 0x%08X\n", (uint32_t)pmap);
printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP);
@ -6841,8 +6841,8 @@ DB_SHOW_COMMAND(pmap, pmap_pmap_print)
!!(pte1 & PTE1_S), !(pte1 & PTE1_NG));
dump_section(pmap, i);
} else if (pte1_is_link(pte1)) {
dump_link_ok = TRUE;
invalid_ok = FALSE;
dump_link_ok = true;
invalid_ok = false;
pte2 = pte2_load(pmap_pt2tab_entry(pmap, va));
m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1));
printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X m: %p",
@ -6850,9 +6850,9 @@ DB_SHOW_COMMAND(pmap, pmap_pmap_print)
if (is_pv_chunk_space(va)) {
printf(" - pv_chunk space");
if (dump_pv_chunk)
invalid_ok = TRUE;
invalid_ok = true;
else
dump_link_ok = FALSE;
dump_link_ok = false;
}
else if (m != NULL)
printf(" w:%d w2:%u", m->ref_count,

View file

@ -137,7 +137,7 @@ extern ttb_entry_t pmap_kern_ttb; /* TTB for kernel pmap */
void pmap_bootstrap(vm_offset_t);
void pmap_kenter(vm_offset_t, vm_paddr_t);
void pmap_kremove(vm_offset_t);
boolean_t pmap_page_is_mapped(vm_page_t);
bool pmap_page_is_mapped(vm_page_t);
bool pmap_ps_enabled(pmap_t pmap);
void pmap_tlb_flush(pmap_t, vm_offset_t);

View file

@ -161,7 +161,7 @@ pte1_clear_bit(pt1_entry_t *pte1p, uint32_t bit)
pte1_sync(pte1p);
}
static __inline boolean_t
static __inline bool
pte1_is_link(pt1_entry_t pte1)
{
@ -175,21 +175,21 @@ pte1_is_section(pt1_entry_t pte1)
return ((pte1 & L1_TYPE_MASK) == L1_TYPE_S);
}
static __inline boolean_t
static __inline bool
pte1_is_dirty(pt1_entry_t pte1)
{
return ((pte1 & (PTE1_NM | PTE1_RO)) == 0);
}
static __inline boolean_t
static __inline bool
pte1_is_global(pt1_entry_t pte1)
{
return ((pte1 & PTE1_NG) == 0);
}
static __inline boolean_t
static __inline bool
pte1_is_valid(pt1_entry_t pte1)
{
int l1_type;
@ -198,7 +198,7 @@ pte1_is_valid(pt1_entry_t pte1)
return ((l1_type == L1_TYPE_C) || (l1_type == L1_TYPE_S));
}
static __inline boolean_t
static __inline bool
pte1_is_wired(pt1_entry_t pte1)
{
@ -301,28 +301,28 @@ pte2_clear_bit(pt2_entry_t *pte2p, uint32_t bit)
pte2_sync(pte2p);
}
static __inline boolean_t
static __inline bool
pte2_is_dirty(pt2_entry_t pte2)
{
return ((pte2 & (PTE2_NM | PTE2_RO)) == 0);
}
static __inline boolean_t
static __inline bool
pte2_is_global(pt2_entry_t pte2)
{
return ((pte2 & PTE2_NG) == 0);
}
static __inline boolean_t
static __inline bool
pte2_is_valid(pt2_entry_t pte2)
{
return (pte2 & PTE2_V);
}
static __inline boolean_t
static __inline bool
pte2_is_wired(pt2_entry_t pte2)
{
@ -358,7 +358,7 @@ pte2_set_bit(pt2_entry_t *pte2p, uint32_t bit)
}
static __inline void
pte2_set_wired(pt2_entry_t *pte2p, boolean_t wired)
pte2_set_wired(pt2_entry_t *pte2p, bool wired)
{
/*

View file

@ -466,7 +466,7 @@ static int pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
pd_entry_t l2e, struct spglist *free, struct rwlock **lockp);
static void pmap_reset_asid_set(pmap_t pmap);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m, struct rwlock **lockp);
static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
@ -2166,8 +2166,7 @@ pmap_qremove(vm_offset_t sva, int count)
* physical memory manager after the TLB has been updated.
*/
static __inline void
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
boolean_t set_PG_ZERO)
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO)
{
if (set_PG_ZERO)
@ -2180,19 +2179,19 @@ pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
/*
* Decrements a page table page's reference count, which is used to record the
* number of valid page table entries within the page. If the reference count
* drops to zero, then the page table page is unmapped. Returns TRUE if the
* page table page was unmapped and FALSE otherwise.
* drops to zero, then the page table page is unmapped. Returns true if the
* page table page was unmapped and false otherwise.
*/
static inline boolean_t
static inline bool
pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
--m->ref_count;
if (m->ref_count == 0) {
_pmap_unwire_l3(pmap, va, m, free);
return (TRUE);
return (true);
} else
return (FALSE);
return (false);
}
static void
@ -2248,7 +2247,7 @@ _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
* Put page on a list so that it is released after
* *ALL* TLB shootdown is done
*/
pmap_add_delayed_free_list(m, free, TRUE);
pmap_add_delayed_free_list(m, free, true);
}
/*
@ -2630,7 +2629,7 @@ pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
void
pmap_release(pmap_t pmap)
{
boolean_t rv __diagused;
bool rv __diagused;
struct spglist free;
struct asid_set *set;
vm_page_t m;
@ -2649,7 +2648,7 @@ pmap_release(pmap_t pmap)
PMAP_LOCK(pmap);
rv = pmap_unwire_l3(pmap, 0, m, &free);
PMAP_UNLOCK(pmap);
MPASS(rv == TRUE);
MPASS(rv == true);
vm_page_free_pages_toq(&free, true);
}
@ -3348,7 +3347,7 @@ pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
* Conditionally create the PV entry for a 4KB page mapping if the required
* memory can be allocated without resorting to reclamation.
*/
static boolean_t
static bool
pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
struct rwlock **lockp)
{
@ -3361,9 +3360,9 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
m->md.pv_gen++;
return (TRUE);
return (true);
} else
return (FALSE);
return (false);
}
/*
@ -3479,7 +3478,7 @@ pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
KASSERT(ml3->ref_count == NL3PG,
("pmap_remove_l2: l3 page ref count error"));
ml3->ref_count = 0;
pmap_add_delayed_free_list(ml3, free, FALSE);
pmap_add_delayed_free_list(ml3, free, false);
}
}
return (pmap_unuse_pt(pmap, sva, l1e, free));
@ -4432,7 +4431,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
pv_entry_t pv;
vm_paddr_t opa, pa;
vm_page_t mpte, om;
boolean_t nosleep;
bool nosleep;
int lvl, rv;
KASSERT(ADDR_IS_CANONICAL(va),
@ -5574,23 +5573,23 @@ pmap_quick_remove_page(vm_offset_t addr)
* is only necessary that true be returned for a small
* subset of pmaps for proper page aging.
*/
boolean_t
bool
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
struct md_page *pvh;
struct rwlock *lock;
pv_entry_t pv;
int loops = 0;
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_page_exists_quick: page %p is not managed", m));
rv = FALSE;
rv = false;
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -5601,7 +5600,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
pvh = page_to_pvh(m);
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -5849,7 +5848,7 @@ pmap_remove_pages(pmap_t pmap)
("pmap_remove_pages: l3 page ref count error"));
ml3->ref_count = 0;
pmap_add_delayed_free_list(ml3,
&free, FALSE);
&free, false);
}
break;
case 2:
@ -5892,8 +5891,8 @@ pmap_remove_pages(pmap_t pmap)
/*
* This is used to check if a page has been accessed or modified.
*/
static boolean_t
pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
static bool
pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified)
{
struct rwlock *lock;
pv_entry_t pv;
@ -5901,9 +5900,9 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
pt_entry_t *pte, mask, value;
pmap_t pmap;
int md_gen, pvh_gen;
boolean_t rv;
bool rv;
rv = FALSE;
rv = false;
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
restart:
@ -5981,7 +5980,7 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
* Return whether or not the specified physical page was modified
* in any physical maps.
*/
boolean_t
bool
pmap_is_modified(vm_page_t m)
{
@ -5992,8 +5991,8 @@ pmap_is_modified(vm_page_t m)
* If the page is not busied then this check is racy.
*/
if (!pmap_page_is_write_mapped(m))
return (FALSE);
return (pmap_page_test_mappings(m, FALSE, TRUE));
return (false);
return (pmap_page_test_mappings(m, false, true));
}
/*
@ -6002,19 +6001,19 @@ pmap_is_modified(vm_page_t m)
* Return whether or not the specified virtual address is eligible
* for prefault.
*/
boolean_t
bool
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
pd_entry_t *pde;
pt_entry_t *pte;
boolean_t rv;
bool rv;
int lvl;
/*
* Return TRUE if and only if the L3 entry for the specified virtual
* Return true if and only if the L3 entry for the specified virtual
* address is allocated but invalid.
*/
rv = FALSE;
rv = false;
PMAP_LOCK(pmap);
pde = pmap_pde(pmap, addr, &lvl);
if (pde != NULL && lvl == 2) {
@ -6031,13 +6030,13 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
boolean_t
bool
pmap_is_referenced(vm_page_t m)
{
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_referenced: page %p is not managed", m));
return (pmap_page_test_mappings(m, TRUE, FALSE));
return (pmap_page_test_mappings(m, true, false));
}
/*
@ -7841,7 +7840,7 @@ pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
}
}
boolean_t
bool
pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
{

View file

@ -285,7 +285,7 @@ static struct mtx PMAP2mutex;
static void free_pv_chunk(struct pv_chunk *pc);
static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try);
static pv_entry_t get_pv_entry(pmap_t pmap, bool try);
static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde,
u_int flags);
@ -298,7 +298,7 @@ static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
static int pmap_pvh_wired_mappings(struct md_page *pvh, int count);
static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte);
static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
static bool pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
static int pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_prot_t prot);
static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde,
@ -310,8 +310,8 @@ static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte, bool promoted,
static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va,
pd_entry_t pde);
static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
static boolean_t pmap_is_referenced_pvh(struct md_page *pvh);
static bool pmap_is_modified_pvh(struct md_page *pvh);
static bool pmap_is_referenced_pvh(struct md_page *pvh);
static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde);
static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
@ -319,7 +319,7 @@ static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
static bool pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
vm_page_t mpte);
#endif
static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
static bool pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
vm_prot_t prot);
static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
@ -332,7 +332,7 @@ static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
struct spglist *free);
static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va);
static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m);
static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde,
pd_entry_t newpde);
@ -1079,7 +1079,7 @@ extern u_long pmap_pde_promotions;
* Low level helper routines.....
***************************************************/
static boolean_t
static bool
__CONCAT(PMTYPE, is_valid_memattr)(pmap_t pmap __unused, vm_memattr_t mode)
{
@ -1092,7 +1092,7 @@ __CONCAT(PMTYPE, is_valid_memattr)(pmap_t pmap __unused, vm_memattr_t mode)
* caching mode.
*/
static int
__CONCAT(PMTYPE, cache_bits)(pmap_t pmap, int mode, boolean_t is_pde)
__CONCAT(PMTYPE, cache_bits)(pmap_t pmap, int mode, bool is_pde)
{
int cache_bits, pat_flag, pat_idx;
@ -1901,8 +1901,7 @@ __CONCAT(PMTYPE, qremove)(vm_offset_t sva, int count)
* physical memory manager after the TLB has been updated.
*/
static __inline void
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
boolean_t set_PG_ZERO)
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO)
{
if (set_PG_ZERO)
@ -1958,19 +1957,19 @@ pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
/*
* Decrements a page table page's reference count, which is used to record the
* number of valid page table entries within the page. If the reference count
* drops to zero, then the page table page is unmapped. Returns TRUE if the
* page table page was unmapped and FALSE otherwise.
* drops to zero, then the page table page is unmapped. Returns true if the
* page table page was unmapped and false otherwise.
*/
static inline boolean_t
static inline bool
pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
{
--m->ref_count;
if (m->ref_count == 0) {
_pmap_unwire_ptp(pmap, m, free);
return (TRUE);
return (true);
} else
return (FALSE);
return (false);
}
static void
@ -1991,7 +1990,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free)
* shootdown is done.
*/
MPASS(pmap != kernel_pmap);
pmap_add_delayed_free_list(m, free, TRUE);
pmap_add_delayed_free_list(m, free, true);
}
/*
@ -2500,7 +2499,7 @@ free_pv_chunk(struct pv_chunk *pc)
* when needed.
*/
static pv_entry_t
get_pv_entry(pmap_t pmap, boolean_t try)
get_pv_entry(pmap_t pmap, bool try)
{
static const struct timeval printinterval = { 60, 0 };
static struct timeval lastprint;
@ -2692,7 +2691,7 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
rw_assert(&pvh_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
pv = get_pv_entry(pmap, FALSE);
pv = get_pv_entry(pmap, false);
pv->pv_va = va;
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
}
@ -2700,7 +2699,7 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
/*
* Conditionally create a pv entry.
*/
static boolean_t
static bool
pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
{
pv_entry_t pv;
@ -2708,12 +2707,12 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
rw_assert(&pvh_global_lock, RA_WLOCKED);
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
if (pv_entry_count < pv_entry_high_water &&
(pv = get_pv_entry(pmap, TRUE)) != NULL) {
(pv = get_pv_entry(pmap, true)) != NULL) {
pv->pv_va = va;
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
return (TRUE);
return (true);
} else
return (FALSE);
return (false);
}
/*
@ -2755,7 +2754,7 @@ pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
* Tries to demote a 2- or 4MB page mapping. If demotion fails, the
* 2- or 4MB page mapping is invalidated.
*/
static boolean_t
static bool
pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
{
pd_entry_t newpde, oldpde;
@ -2790,7 +2789,7 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
vm_page_free_pages_toq(&free, true);
CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
" in pmap %p", va, pmap);
return (FALSE);
return (false);
}
mpte->pindex = va >> PDRSHIFT;
if (pmap != kernel_pmap) {
@ -2898,7 +2897,7 @@ pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
pmap_pde_demotions++;
CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x"
" in pmap %p", va, pmap);
return (TRUE);
return (true);
}
/*
@ -2993,7 +2992,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
KASSERT(mpte->ref_count == NPTEPG,
("pmap_remove_pde: pte page ref count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, free, FALSE);
pmap_add_delayed_free_list(mpte, free, false);
}
}
}
@ -3264,17 +3263,17 @@ __CONCAT(PMTYPE, remove_all)(vm_page_t m)
/*
* pmap_protect_pde: do the things to protect a 4mpage in a process
*/
static boolean_t
static bool
pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
{
pd_entry_t newpde, oldpde;
vm_page_t m, mt;
boolean_t anychanged;
bool anychanged;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT((sva & PDRMASK) == 0,
("pmap_protect_pde: sva is not 4mpage aligned"));
anychanged = FALSE;
anychanged = false;
retry:
oldpde = newpde = *pde;
if ((prot & VM_PROT_WRITE) == 0) {
@ -3301,7 +3300,7 @@ pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
if ((oldpde & PG_G) != 0)
pmap_invalidate_pde_page(kernel_pmap, sva, oldpde);
else
anychanged = TRUE;
anychanged = true;
}
return (anychanged);
}
@ -3317,7 +3316,7 @@ __CONCAT(PMTYPE, protect)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
vm_offset_t pdnxt;
pd_entry_t ptpaddr;
pt_entry_t *pte;
boolean_t anychanged, pv_lists_locked;
bool anychanged, pv_lists_locked;
KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot));
if (prot == VM_PROT_NONE) {
@ -3335,14 +3334,14 @@ __CONCAT(PMTYPE, protect)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
#endif
if (pmap_is_current(pmap))
pv_lists_locked = FALSE;
pv_lists_locked = false;
else {
pv_lists_locked = TRUE;
pv_lists_locked = true;
resume:
rw_wlock(&pvh_global_lock);
sched_pin();
}
anychanged = FALSE;
anychanged = false;
PMAP_LOCK(pmap);
for (; sva < eva; sva = pdnxt) {
@ -3378,11 +3377,11 @@ __CONCAT(PMTYPE, protect)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
*/
if (pmap_protect_pde(pmap,
&pmap->pm_pdir[pdirindex], sva, prot))
anychanged = TRUE;
anychanged = true;
continue;
} else {
if (!pv_lists_locked) {
pv_lists_locked = TRUE;
pv_lists_locked = true;
if (!rw_try_wlock(&pvh_global_lock)) {
if (anychanged)
pmap_invalidate_all_int(
@ -3445,7 +3444,7 @@ __CONCAT(PMTYPE, protect)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
if (obits & PG_G)
pmap_invalidate_page_int(pmap, sva);
else
anychanged = TRUE;
anychanged = true;
}
}
}
@ -3846,7 +3845,7 @@ __CONCAT(PMTYPE, enter)(pmap_t pmap, vm_offset_t va, vm_page_t m,
*/
if ((newpte & PG_MANAGED) != 0) {
if (pv == NULL) {
pv = get_pv_entry(pmap, FALSE);
pv = get_pv_entry(pmap, false);
pv->pv_va = va;
}
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
@ -4372,12 +4371,12 @@ __CONCAT(PMTYPE, unwire)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
vm_offset_t pdnxt;
pd_entry_t *pde;
pt_entry_t *pte;
boolean_t pv_lists_locked;
bool pv_lists_locked;
if (pmap_is_current(pmap))
pv_lists_locked = FALSE;
pv_lists_locked = false;
else {
pv_lists_locked = TRUE;
pv_lists_locked = true;
resume:
rw_wlock(&pvh_global_lock);
sched_pin();
@ -4411,7 +4410,7 @@ __CONCAT(PMTYPE, unwire)(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
continue;
} else {
if (!pv_lists_locked) {
pv_lists_locked = TRUE;
pv_lists_locked = true;
if (!rw_try_wlock(&pvh_global_lock)) {
PMAP_UNLOCK(pmap);
/* Repeat sva. */
@ -4723,21 +4722,21 @@ __CONCAT(PMTYPE, copy_pages)(vm_page_t ma[], vm_offset_t a_offset,
* is only necessary that true be returned for a small
* subset of pmaps for proper page aging.
*/
static boolean_t
static bool
__CONCAT(PMTYPE, page_exists_quick)(pmap_t pmap, vm_page_t m)
{
struct md_page *pvh;
pv_entry_t pv;
int loops = 0;
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_page_exists_quick: page %p is not managed", m));
rv = FALSE;
rv = false;
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -4748,7 +4747,7 @@ __CONCAT(PMTYPE, page_exists_quick)(pmap_t pmap, vm_page_t m)
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -4811,16 +4810,16 @@ pmap_pvh_wired_mappings(struct md_page *pvh, int count)
}
/*
* Returns TRUE if the given page is mapped individually or as part of
* a 4mpage. Otherwise, returns FALSE.
* Returns true if the given page is mapped individually or as part of
* a 4mpage. Otherwise, returns false.
*/
static boolean_t
static bool
__CONCAT(PMTYPE, page_is_mapped)(vm_page_t m)
{
boolean_t rv;
bool rv;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (FALSE);
return (false);
rw_wlock(&pvh_global_lock);
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
((m->flags & PG_FICTITIOUS) == 0 &&
@ -4940,7 +4939,7 @@ __CONCAT(PMTYPE, remove_pages)(pmap_t pmap)
KASSERT(mpte->ref_count == NPTEPG,
("pmap_remove_pages: pte page ref count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, &free, FALSE);
pmap_add_delayed_free_list(mpte, &free, false);
}
} else {
pmap->pm_stats.resident_count--;
@ -4973,10 +4972,10 @@ __CONCAT(PMTYPE, remove_pages)(pmap_t pmap)
* Return whether or not the specified physical page was modified
* in any physical maps.
*/
static boolean_t
static bool
__CONCAT(PMTYPE, is_modified)(vm_page_t m)
{
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_modified: page %p is not managed", m));
@ -4985,7 +4984,7 @@ __CONCAT(PMTYPE, is_modified)(vm_page_t m)
* If the page is not busied then this check is racy.
*/
if (!pmap_page_is_write_mapped(m))
return (FALSE);
return (false);
rw_wlock(&pvh_global_lock);
rv = pmap_is_modified_pvh(&m->md) ||
((m->flags & PG_FICTITIOUS) == 0 &&
@ -4995,20 +4994,20 @@ __CONCAT(PMTYPE, is_modified)(vm_page_t m)
}
/*
* Returns TRUE if any of the given mappings were used to modify
* physical memory. Otherwise, returns FALSE. Both page and 2mpage
* Returns true if any of the given mappings were used to modify
* physical memory. Otherwise, returns false. Both page and 2mpage
* mappings are supported.
*/
static boolean_t
static bool
pmap_is_modified_pvh(struct md_page *pvh)
{
pv_entry_t pv;
pt_entry_t *pte;
pmap_t pmap;
boolean_t rv;
bool rv;
rw_assert(&pvh_global_lock, RA_WLOCKED);
rv = FALSE;
rv = false;
sched_pin();
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
@ -5029,13 +5028,13 @@ pmap_is_modified_pvh(struct md_page *pvh)
* Return whether or not the specified virtual address is elgible
* for prefault.
*/
static boolean_t
static bool
__CONCAT(PMTYPE, is_prefaultable)(pmap_t pmap, vm_offset_t addr)
{
pd_entry_t pde;
boolean_t rv;
bool rv;
rv = FALSE;
rv = false;
PMAP_LOCK(pmap);
pde = *pmap_pde(pmap, addr);
if (pde != 0 && (pde & PG_PS) == 0)
@ -5050,10 +5049,10 @@ __CONCAT(PMTYPE, is_prefaultable)(pmap_t pmap, vm_offset_t addr)
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
static boolean_t
static bool
__CONCAT(PMTYPE, is_referenced)(vm_page_t m)
{
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_referenced: page %p is not managed", m));
@ -5066,19 +5065,19 @@ __CONCAT(PMTYPE, is_referenced)(vm_page_t m)
}
/*
* Returns TRUE if any of the given mappings were referenced and FALSE
* Returns true if any of the given mappings were referenced and false
* otherwise. Both page and 4mpage mappings are supported.
*/
static boolean_t
static bool
pmap_is_referenced_pvh(struct md_page *pvh)
{
pv_entry_t pv;
pt_entry_t *pte;
pmap_t pmap;
boolean_t rv;
bool rv;
rw_assert(&pvh_global_lock, RA_WLOCKED);
rv = FALSE;
rv = false;
sched_pin();
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
pmap = PV_PMAP(pv);
@ -5713,7 +5712,7 @@ __CONCAT(PMTYPE, change_attr)(vm_offset_t va, vm_size_t size, int mode)
pd_entry_t *pde;
pt_entry_t *pte;
int cache_bits_pte, cache_bits_pde;
boolean_t changed;
bool changed;
base = trunc_page(va);
offset = va & PAGE_MASK;
@ -5727,7 +5726,7 @@ __CONCAT(PMTYPE, change_attr)(vm_offset_t va, vm_size_t size, int mode)
cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1);
cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0);
changed = FALSE;
changed = false;
/*
* Pages that aren't mapped aren't supported. Also break down
@ -5786,14 +5785,14 @@ __CONCAT(PMTYPE, change_attr)(vm_offset_t va, vm_size_t size, int mode)
if (*pde & PG_PS) {
if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
pmap_pde_attr(pde, cache_bits_pde);
changed = TRUE;
changed = true;
}
tmpva = trunc_4mpage(tmpva) + NBPDR;
} else {
pte = vtopte(tmpva);
if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
pmap_pte_attr(pte, cache_bits_pte);
changed = TRUE;
changed = true;
}
tmpva += PAGE_SIZE;
}
@ -5990,7 +5989,7 @@ pmap_trm_import(void *unused __unused, vmem_size_t size, int flags,
m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK);
pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) |
PG_M | PG_A | PG_RW | PG_V | pgeflag |
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE));
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, false));
}
*addrp = prev_addr;
return (0);
@ -6009,7 +6008,7 @@ pmap_init_trm(void)
pd_m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK |
VM_ALLOC_ZERO);
PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V |
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE);
pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, true);
}
static void *
@ -6112,7 +6111,7 @@ __CONCAT(PMTYPE, cp_slow0_map)(vm_offset_t kaddr, int plen, vm_page_t *ma)
for (i = 0, pte = vtopte(kaddr); i < plen; i++, pte++) {
*pte = PG_V | PG_RW | PG_A | PG_M | VM_PAGE_TO_PHYS(ma[i]) |
pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(ma[i]),
FALSE);
false);
invlpg(kaddr + ptoa(i));
}
}

View file

@ -557,7 +557,7 @@ pmap_bootstrap(vm_paddr_t firstaddr)
pmap_methods_ptr->pm_bootstrap(firstaddr);
}
boolean_t
bool
pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode)
{
@ -565,7 +565,7 @@ pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode)
}
int
pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde)
pmap_cache_bits(pmap_t pmap, int mode, bool is_pde)
{
return (pmap_methods_ptr->pm_cache_bits(pmap, mode, is_pde));
@ -716,7 +716,7 @@ pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
pmap_methods_ptr->pm_unwire(pmap, sva, eva);
}
boolean_t
bool
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
@ -730,7 +730,7 @@ pmap_page_wired_mappings(vm_page_t m)
return (pmap_methods_ptr->pm_page_wired_mappings(m));
}
boolean_t
bool
pmap_page_is_mapped(vm_page_t m)
{
@ -744,21 +744,21 @@ pmap_remove_pages(pmap_t pmap)
pmap_methods_ptr->pm_remove_pages(pmap);
}
boolean_t
bool
pmap_is_modified(vm_page_t m)
{
return (pmap_methods_ptr->pm_is_modified(m));
}
boolean_t
bool
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
return (pmap_methods_ptr->pm_is_prefaultable(pmap, addr));
}
boolean_t
bool
pmap_is_referenced(vm_page_t m)
{

View file

@ -218,7 +218,7 @@ void pmap_basemem_setup(u_int basemem);
void *pmap_bios16_enter(void);
void pmap_bios16_leave(void *handle);
void pmap_bootstrap(vm_paddr_t);
int pmap_cache_bits(pmap_t, int mode, boolean_t is_pde);
int pmap_cache_bits(pmap_t, int mode, bool is_pde);
int pmap_change_attr(vm_offset_t, vm_size_t, int);
caddr_t pmap_cmap3(vm_paddr_t pa, u_int pte_bits);
void pmap_cp_slow0_map(vm_offset_t kaddr, int plen, vm_page_t *ma);
@ -236,7 +236,7 @@ void pmap_ksetrw(vm_offset_t va);
void *pmap_mapbios(vm_paddr_t, vm_size_t);
void *pmap_mapdev(vm_paddr_t, vm_size_t);
void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
boolean_t pmap_page_is_mapped(vm_page_t m);
bool pmap_page_is_mapped(vm_page_t m);
void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
vm_paddr_t pmap_pg_frame(vm_paddr_t pa);
bool pmap_ps_enabled(pmap_t pmap);

View file

@ -58,8 +58,8 @@ struct pmap_methods {
void *(*pm_bios16_enter)(void);
void (*pm_bios16_leave)(void *handle);
void (*pm_bootstrap)(vm_paddr_t firstaddr);
boolean_t (*pm_is_valid_memattr)(pmap_t, vm_memattr_t);
int (*pm_cache_bits)(pmap_t, int, boolean_t);
bool (*pm_is_valid_memattr)(pmap_t, vm_memattr_t);
int (*pm_cache_bits)(pmap_t, int, bool);
bool (*pm_ps_enabled)(pmap_t);
void (*pm_pinit0)(pmap_t);
int (*pm_pinit)(pmap_t);
@ -84,13 +84,13 @@ struct pmap_methods {
void (*pm_object_init_pt)(pmap_t, vm_offset_t, vm_object_t,
vm_pindex_t, vm_size_t);
void (*pm_unwire)(pmap_t, vm_offset_t, vm_offset_t);
boolean_t (*pm_page_exists_quick)(pmap_t, vm_page_t);
bool (*pm_page_exists_quick)(pmap_t, vm_page_t);
int (*pm_page_wired_mappings)(vm_page_t);
boolean_t (*pm_page_is_mapped)(vm_page_t);
bool (*pm_page_is_mapped)(vm_page_t);
void (*pm_remove_pages)(pmap_t);
boolean_t (*pm_is_modified)(vm_page_t);
boolean_t (*pm_is_prefaultable)(pmap_t, vm_offset_t);
boolean_t (*pm_is_referenced)(vm_page_t);
bool (*pm_is_modified)(vm_page_t);
bool (*pm_is_prefaultable)(pmap_t, vm_offset_t);
bool (*pm_is_referenced)(vm_page_t);
void (*pm_remove_write)(vm_page_t);
int (*pm_ts_referenced)(vm_page_t);
void *(*pm_mapdev_attr)(vm_paddr_t, vm_size_t, int, int);

View file

@ -214,7 +214,7 @@ static int moea_bpvo_pool_index = 0;
#define VSID_NBPW (sizeof(u_int32_t) * 8)
static u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW];
static boolean_t moea_initialized = FALSE;
static bool moea_initialized = false;
/*
* Statistics.
@ -266,7 +266,7 @@ static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int);
static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t,
vm_prot_t, u_int, int8_t);
static void moea_syncicache(vm_paddr_t, vm_size_t);
static boolean_t moea_query_bit(vm_page_t, int);
static bool moea_query_bit(vm_page_t, int);
static u_int moea_clear_bit(vm_page_t, int);
static void moea_kremove(vm_offset_t);
int moea_pte_spill(vm_offset_t);
@ -286,13 +286,13 @@ void moea_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
vm_paddr_t moea_extract(pmap_t, vm_offset_t);
vm_page_t moea_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
void moea_init(void);
boolean_t moea_is_modified(vm_page_t);
boolean_t moea_is_prefaultable(pmap_t, vm_offset_t);
boolean_t moea_is_referenced(vm_page_t);
bool moea_is_modified(vm_page_t);
bool moea_is_prefaultable(pmap_t, vm_offset_t);
bool moea_is_referenced(vm_page_t);
int moea_ts_referenced(vm_page_t);
vm_offset_t moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
static int moea_mincore(pmap_t, vm_offset_t, vm_paddr_t *);
boolean_t moea_page_exists_quick(pmap_t, vm_page_t);
bool moea_page_exists_quick(pmap_t, vm_page_t);
void moea_page_init(vm_page_t);
int moea_page_wired_mappings(vm_page_t);
int moea_pinit(pmap_t);
@ -324,7 +324,7 @@ void moea_dumpsys_map(vm_paddr_t pa, size_t sz, void **va);
void moea_scan_init(void);
vm_offset_t moea_quick_enter_page(vm_page_t m);
void moea_quick_remove_page(vm_offset_t addr);
boolean_t moea_page_is_mapped(vm_page_t m);
bool moea_page_is_mapped(vm_page_t m);
bool moea_ps_enabled(pmap_t pmap);
static int moea_map_user_ptr(pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
@ -1116,7 +1116,7 @@ moea_quick_remove_page(vm_offset_t addr)
{
}
boolean_t
bool
moea_page_is_mapped(vm_page_t m)
{
return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
@ -1318,13 +1318,13 @@ moea_init(void)
moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
UMA_ZONE_VM | UMA_ZONE_NOFREE);
moea_initialized = TRUE;
moea_initialized = true;
}
boolean_t
bool
moea_is_referenced(vm_page_t m)
{
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea_is_referenced: page %p is not managed", m));
@ -1334,10 +1334,10 @@ moea_is_referenced(vm_page_t m)
return (rv);
}
boolean_t
bool
moea_is_modified(vm_page_t m)
{
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea_is_modified: page %p is not managed", m));
@ -1346,7 +1346,7 @@ moea_is_modified(vm_page_t m)
* If the page is not busied then this check is racy.
*/
if (!pmap_page_is_write_mapped(m))
return (FALSE);
return (false);
rw_wlock(&pvh_global_lock);
rv = moea_query_bit(m, PTE_CHG);
@ -1354,11 +1354,11 @@ moea_is_modified(vm_page_t m)
return (rv);
}
boolean_t
bool
moea_is_prefaultable(pmap_t pmap, vm_offset_t va)
{
struct pvo_entry *pvo;
boolean_t rv;
bool rv;
PMAP_LOCK(pmap);
pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL);
@ -1657,21 +1657,21 @@ moea_map(vm_offset_t *virt, vm_paddr_t pa_start,
* is only necessary that true be returned for a small
* subset of pmaps for proper page aging.
*/
boolean_t
bool
moea_page_exists_quick(pmap_t pmap, vm_page_t m)
{
int loops;
struct pvo_entry *pvo;
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea_page_exists_quick: page %p is not managed", m));
loops = 0;
rv = FALSE;
rv = false;
rw_wlock(&pvh_global_lock);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
if (pvo->pvo_pmap == pmap) {
rv = TRUE;
rv = true;
break;
}
if (++loops >= 16)
@ -2525,7 +2525,7 @@ moea_pte_insert(u_int ptegidx, struct pte *pvo_pt)
return (victim_idx & 7);
}
static boolean_t
static bool
moea_query_bit(vm_page_t m, int ptebit)
{
struct pvo_entry *pvo;
@ -2533,7 +2533,7 @@ moea_query_bit(vm_page_t m, int ptebit)
rw_assert(&pvh_global_lock, RA_WLOCKED);
if (moea_attr_fetch(m) & ptebit)
return (TRUE);
return (true);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
/*
@ -2542,7 +2542,7 @@ moea_query_bit(vm_page_t m, int ptebit)
*/
if (pvo->pvo_pte.pte.pte_lo & ptebit) {
moea_attr_save(m, ptebit);
return (TRUE);
return (true);
}
}
@ -2564,12 +2564,12 @@ moea_query_bit(vm_page_t m, int ptebit)
mtx_unlock(&moea_table_mutex);
if (pvo->pvo_pte.pte.pte_lo & ptebit) {
moea_attr_save(m, ptebit);
return (TRUE);
return (true);
}
}
}
return (FALSE);
return (false);
}
static u_int

View file

@ -242,7 +242,7 @@ SYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
#endif
static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
static boolean_t moea64_initialized = FALSE;
static bool moea64_initialized = false;
#ifdef MOEA64_STATS
/*
@ -288,7 +288,7 @@ static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
/*
* Utility routines.
*/
static boolean_t moea64_query_bit(vm_page_t, uint64_t);
static bool moea64_query_bit(vm_page_t, uint64_t);
static u_int moea64_clear_bit(vm_page_t, uint64_t);
static void moea64_kremove(vm_offset_t);
static void moea64_syncicache(pmap_t pmap, vm_offset_t va,
@ -397,12 +397,12 @@ void moea64_enter_quick(pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
vm_paddr_t moea64_extract(pmap_t, vm_offset_t);
vm_page_t moea64_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
void moea64_init(void);
boolean_t moea64_is_modified(vm_page_t);
boolean_t moea64_is_prefaultable(pmap_t, vm_offset_t);
boolean_t moea64_is_referenced(vm_page_t);
bool moea64_is_modified(vm_page_t);
bool moea64_is_prefaultable(pmap_t, vm_offset_t);
bool moea64_is_referenced(vm_page_t);
int moea64_ts_referenced(vm_page_t);
vm_offset_t moea64_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
boolean_t moea64_page_exists_quick(pmap_t, vm_page_t);
bool moea64_page_exists_quick(pmap_t, vm_page_t);
void moea64_page_init(vm_page_t);
int moea64_page_wired_mappings(vm_page_t);
int moea64_pinit(pmap_t);
@ -436,7 +436,7 @@ void moea64_scan_init(void);
vm_offset_t moea64_quick_enter_page(vm_page_t m);
vm_offset_t moea64_quick_enter_page_dmap(vm_page_t m);
void moea64_quick_remove_page(vm_offset_t addr);
boolean_t moea64_page_is_mapped(vm_page_t m);
bool moea64_page_is_mapped(vm_page_t m);
static int moea64_map_user_ptr(pmap_t pm,
volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen);
static int moea64_decode_kernel_ptr(vm_offset_t addr,
@ -1629,7 +1629,7 @@ moea64_quick_remove_page(vm_offset_t addr)
sched_unpin();
}
boolean_t
bool
moea64_page_is_mapped(vm_page_t m)
{
return (!LIST_EMPTY(&(m)->md.mdpg_pvoh));
@ -1984,10 +1984,10 @@ moea64_init(void)
elf32_nxstack = 1;
#endif
moea64_initialized = TRUE;
moea64_initialized = true;
}
boolean_t
bool
moea64_is_referenced(vm_page_t m)
{
@ -1997,7 +1997,7 @@ moea64_is_referenced(vm_page_t m)
return (moea64_query_bit(m, LPTE_REF));
}
boolean_t
bool
moea64_is_modified(vm_page_t m)
{
@ -2008,21 +2008,21 @@ moea64_is_modified(vm_page_t m)
* If the page is not busied then this check is racy.
*/
if (!pmap_page_is_write_mapped(m))
return (FALSE);
return (false);
return (moea64_query_bit(m, LPTE_CHG));
}
boolean_t
bool
moea64_is_prefaultable(pmap_t pmap, vm_offset_t va)
{
struct pvo_entry *pvo;
boolean_t rv = TRUE;
bool rv = true;
PMAP_LOCK(pmap);
pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
if (pvo != NULL)
rv = FALSE;
rv = false;
PMAP_UNLOCK(pmap);
return (rv);
}
@ -2376,21 +2376,21 @@ moea64_map(vm_offset_t *virt, vm_paddr_t pa_start,
* is only necessary that true be returned for a small
* subset of pmaps for proper page aging.
*/
boolean_t
bool
moea64_page_exists_quick(pmap_t pmap, vm_page_t m)
{
int loops;
struct pvo_entry *pvo;
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("moea64_page_exists_quick: page %p is not managed", m));
loops = 0;
rv = FALSE;
rv = false;
PV_PAGE_LOCK(m);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
rv = TRUE;
rv = true;
break;
}
if (++loops >= 16)
@ -3031,12 +3031,12 @@ moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
}
static boolean_t
static bool
moea64_query_bit(vm_page_t m, uint64_t ptebit)
{
struct pvo_entry *pvo;
int64_t ret;
boolean_t rv;
bool rv;
vm_page_t sp;
/*
@ -3048,13 +3048,13 @@ moea64_query_bit(vm_page_t m, uint64_t ptebit)
((sp = PHYS_TO_VM_PAGE(VM_PAGE_TO_PHYS(m) & ~HPT_SP_MASK)) != NULL &&
(sp->md.mdpg_attrs & (ptebit | MDPG_ATTR_SP)) ==
(ptebit | MDPG_ATTR_SP)))
return (TRUE);
return (true);
/*
* Examine each PTE. Sync so that any pending REF/CHG bits are
* flushed to the PTEs.
*/
rv = FALSE;
rv = false;
powerpc_sync();
PV_PAGE_LOCK(m);
LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
@ -3065,7 +3065,7 @@ moea64_query_bit(vm_page_t m, uint64_t ptebit)
*/
if (ret != -1) {
if ((ret & ptebit) != 0) {
rv = TRUE;
rv = true;
break;
}
continue;
@ -3089,7 +3089,7 @@ moea64_query_bit(vm_page_t m, uint64_t ptebit)
atomic_set_32(&m->md.mdpg_attrs,
ret & (LPTE_CHG | LPTE_REF));
if (ret & ptebit) {
rv = TRUE;
rv = true;
break;
}
}

View file

@ -439,14 +439,14 @@ vm_page_t mmu_radix_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t);
void mmu_radix_kenter(vm_offset_t, vm_paddr_t);
vm_paddr_t mmu_radix_kextract(vm_offset_t);
void mmu_radix_kremove(vm_offset_t);
boolean_t mmu_radix_is_modified(vm_page_t);
boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t);
boolean_t mmu_radix_is_referenced(vm_page_t);
bool mmu_radix_is_modified(vm_page_t);
bool mmu_radix_is_prefaultable(pmap_t, vm_offset_t);
bool mmu_radix_is_referenced(vm_page_t);
void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t,
vm_pindex_t, vm_size_t);
boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t);
bool mmu_radix_page_exists_quick(pmap_t, vm_page_t);
void mmu_radix_page_init(vm_page_t);
boolean_t mmu_radix_page_is_mapped(vm_page_t m);
bool mmu_radix_page_is_mapped(vm_page_t m);
void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t);
int mmu_radix_page_wired_mappings(vm_page_t);
int mmu_radix_pinit(pmap_t);
@ -563,9 +563,9 @@ static struct pmap_funcs mmu_radix_methods = {
MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods);
static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
static bool pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
struct rwlock **lockp);
static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va);
static bool pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va);
static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *);
static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
struct spglist *free, struct rwlock **lockp);
@ -602,7 +602,7 @@ static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va,
struct rwlock **lockp);
static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m,
struct spglist *free);
static boolean_t pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free);
static bool pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free);
static void pmap_invalidate_page(pmap_t pmap, vm_offset_t start);
static void pmap_invalidate_all(pmap_t pmap);
@ -1028,17 +1028,17 @@ pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags)
}
/*
* Returns TRUE if the given page is mapped individually or as part of
* a 2mpage. Otherwise, returns FALSE.
* Returns true if the given page is mapped individually or as part of
* a 2mpage. Otherwise, returns false.
*/
boolean_t
bool
mmu_radix_page_is_mapped(vm_page_t m)
{
struct rwlock *lock;
boolean_t rv;
bool rv;
if ((m->oflags & VPO_UNMANAGED) != 0)
return (FALSE);
return (false);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
rv = !TAILQ_EMPTY(&m->md.pv_list) ||
@ -1727,7 +1727,7 @@ pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
* Conditionally create the PV entry for a 4KB page mapping if the required
* memory can be allocated without resorting to reclamation.
*/
static boolean_t
static bool
pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
struct rwlock **lockp)
{
@ -1740,9 +1740,9 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link);
m->md.pv_gen++;
return (TRUE);
return (true);
} else
return (FALSE);
return (false);
}
vm_paddr_t phys_avail_debug[2 * VM_PHYSSEG_MAX];
@ -2834,7 +2834,7 @@ mmu_radix_enter(pmap_t pmap, vm_offset_t va, vm_page_t m,
vm_paddr_t opa, pa;
vm_page_t mpte, om;
int rv, retrycount;
boolean_t nosleep, invalidate_all, invalidate_page;
bool nosleep, invalidate_all, invalidate_page;
va = trunc_page(va);
retrycount = 0;
@ -3735,8 +3735,8 @@ mmu_radix_init(void)
1, 1, M_WAITOK);
}
static boolean_t
pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
static bool
pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified)
{
struct rwlock *lock;
pv_entry_t pv;
@ -3744,9 +3744,9 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
pt_entry_t *pte, mask;
pmap_t pmap;
int md_gen, pvh_gen;
boolean_t rv;
bool rv;
rv = FALSE;
rv = false;
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
restart:
@ -3812,7 +3812,7 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
* Return whether or not the specified physical page was modified
* in any physical maps.
*/
boolean_t
bool
mmu_radix_is_modified(vm_page_t m)
{
@ -3824,19 +3824,19 @@ mmu_radix_is_modified(vm_page_t m)
* If the page is not busied then this check is racy.
*/
if (!pmap_page_is_write_mapped(m))
return (FALSE);
return (pmap_page_test_mappings(m, FALSE, TRUE));
return (false);
return (pmap_page_test_mappings(m, false, true));
}
boolean_t
bool
mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
pml3_entry_t *l3e;
pt_entry_t *pte;
boolean_t rv;
bool rv;
CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr);
rv = FALSE;
rv = false;
PMAP_LOCK(pmap);
l3e = pmap_pml3e(pmap, addr);
if (l3e != NULL && (be64toh(*l3e) & (RPTE_LEAF | PG_V)) == PG_V) {
@ -3847,13 +3847,13 @@ mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr)
return (rv);
}
boolean_t
bool
mmu_radix_is_referenced(vm_page_t m)
{
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_referenced: page %p is not managed", m));
CTR2(KTR_PMAP, "%s(%p)", __func__, m);
return (pmap_page_test_mappings(m, TRUE, FALSE));
return (pmap_page_test_mappings(m, true, false));
}
/*
@ -4109,24 +4109,24 @@ mmu_radix_object_init_pt(pmap_t pmap, vm_offset_t addr,
}
}
boolean_t
bool
mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m)
{
struct md_page *pvh;
struct rwlock *lock;
pv_entry_t pv;
int loops = 0;
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_page_exists_quick: page %p is not managed", m));
CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m);
rv = FALSE;
rv = false;
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -4137,7 +4137,7 @@ mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m)
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -4479,18 +4479,18 @@ mmu_radix_pinit0(pmap_t pmap)
/*
* pmap_protect_l3e: do the things to protect a 2mpage in a process
*/
static boolean_t
static bool
pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot)
{
pt_entry_t newpde, oldpde;
vm_offset_t eva, va;
vm_page_t m;
boolean_t anychanged;
bool anychanged;
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
KASSERT((sva & L3_PAGE_MASK) == 0,
("pmap_protect_l3e: sva is not 2mpage aligned"));
anychanged = FALSE;
anychanged = false;
retry:
oldpde = newpde = be64toh(*l3e);
if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) ==
@ -4514,7 +4514,7 @@ pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot)
*/
if (!atomic_cmpset_long(l3e, htobe64(oldpde), htobe64(newpde & ~PG_PROMOTED)))
goto retry;
anychanged = TRUE;
anychanged = true;
}
return (anychanged);
}
@ -4528,7 +4528,7 @@ mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
pml2_entry_t *l2e;
pml3_entry_t ptpaddr, *l3e;
pt_entry_t *pte;
boolean_t anychanged;
bool anychanged;
CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva,
prot);
@ -4548,7 +4548,7 @@ mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
printf("pmap_protect(%p, %#lx, %#lx, %x) - asid: %lu\n",
pmap, sva, eva, prot, pmap->pm_pid);
#endif
anychanged = FALSE;
anychanged = false;
PMAP_LOCK(pmap);
for (; sva < eva; sva = va_next) {
@ -4591,7 +4591,7 @@ mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
*/
if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) {
if (pmap_protect_l3e(pmap, l3e, sva, prot))
anychanged = TRUE;
anychanged = true;
continue;
} else if (!pmap_demote_l3e(pmap, l3e, sva)) {
/*
@ -4631,7 +4631,7 @@ mmu_radix_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
if (!atomic_cmpset_long(pte, htobe64(obits), htobe64(pbits)))
goto retry;
if (obits & (PG_A|PG_M)) {
anychanged = TRUE;
anychanged = true;
#ifdef INVARIANTS
if (VERBOSE_PROTECT || pmap_logging)
printf("%#lx %#lx -> %#lx\n",
@ -4718,8 +4718,7 @@ mmu_radix_qremove(vm_offset_t sva, int count)
* physical memory manager after the TLB has been updated.
*/
static __inline void
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
boolean_t set_PG_ZERO)
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO)
{
if (set_PG_ZERO)
@ -4760,19 +4759,19 @@ pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
/*
* Decrements a page table page's wire count, which is used to record the
* number of valid page table entries within the page. If the wire count
* drops to zero, then the page table page is unmapped. Returns TRUE if the
* page table page was unmapped and FALSE otherwise.
* drops to zero, then the page table page is unmapped. Returns true if the
* page table page was unmapped and false otherwise.
*/
static inline boolean_t
static inline bool
pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
--m->ref_count;
if (m->ref_count == 0) {
_pmap_unwire_ptp(pmap, va, m, free);
return (TRUE);
return (true);
} else
return (FALSE);
return (false);
}
static void
@ -4819,7 +4818,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
* Put page on a list so that it is released after
* *ALL* TLB shootdown is done
*/
pmap_add_delayed_free_list(m, free, TRUE);
pmap_add_delayed_free_list(m, free, true);
}
/*
@ -4897,11 +4896,11 @@ pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
}
}
static boolean_t
static bool
pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va)
{
struct rwlock *lock;
boolean_t rv;
bool rv;
lock = NULL;
rv = pmap_demote_l3e_locked(pmap, pde, va, &lock);
@ -4910,7 +4909,7 @@ pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va)
return (rv);
}
static boolean_t
static bool
pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
struct rwlock **lockp)
{
@ -4954,7 +4953,7 @@ pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
vm_page_free_pages_toq(&free, true);
CTR2(KTR_PMAP, "pmap_demote_l3e: failure for va %#lx"
" in pmap %p", va, pmap);
return (FALSE);
return (false);
}
mpte->pindex = pmap_l3e_pindex(va);
if (va < VM_MAXUSER_ADDRESS)
@ -5015,7 +5014,7 @@ pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va,
counter_u64_add(pmap_l3e_demotions, 1);
CTR2(KTR_PMAP, "pmap_demote_l3e: success for va %#lx"
" in pmap %p", va, pmap);
return (TRUE);
return (true);
}
/*
@ -5091,7 +5090,7 @@ pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva,
KASSERT(mpte->ref_count == NPTEPG,
("pmap_remove_l3e: pte page wire count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, free, FALSE);
pmap_add_delayed_free_list(mpte, free, false);
}
}
return (pmap_unuse_pt(pmap, sva, be64toh(*pmap_pml2e(pmap, sva)), free));
@ -5443,7 +5442,7 @@ mmu_radix_remove_pages(pmap_t pmap)
#ifdef PV_STATS
int freed;
#endif
boolean_t superpage;
bool superpage;
vm_paddr_t pa;
/*
@ -5478,7 +5477,7 @@ mmu_radix_remove_pages(pmap_t pmap)
pte = pmap_l2e_to_l3e(pte, pv->pv_va);
tpte = be64toh(*pte);
if ((tpte & (RPTE_LEAF | PG_V)) == PG_V) {
superpage = FALSE;
superpage = false;
ptel3e = tpte;
pte = (pt_entry_t *)PHYS_TO_DMAP(tpte &
PG_FRAME);
@ -5495,7 +5494,7 @@ mmu_radix_remove_pages(pmap_t pmap)
* regular page could be mistaken for
* a superpage.
*/
superpage = TRUE;
superpage = true;
}
if ((tpte & PG_V) == 0) {
@ -5561,7 +5560,7 @@ mmu_radix_remove_pages(pmap_t pmap)
KASSERT(mpte->ref_count == NPTEPG,
("pmap_remove_pages: pte page wire count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, &free, FALSE);
pmap_add_delayed_free_list(mpte, &free, false);
}
} else {
pmap_resident_count_dec(pmap, 1);
@ -5995,7 +5994,7 @@ pmap_pte_attr(pt_entry_t *pte, uint64_t cache_bits, uint64_t mask)
/*
* Tries to demote a 1GB page mapping.
*/
static boolean_t
static bool
pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
{
pml2_entry_t oldpdpe;
@ -6011,7 +6010,7 @@ pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
if (pdpg == NULL) {
CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx"
" in pmap %p", va, pmap);
return (FALSE);
return (false);
}
pdpg->pindex = va >> L2_PAGE_SIZE_SHIFT;
pdpgpa = VM_PAGE_TO_PHYS(pdpg);
@ -6043,7 +6042,7 @@ pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va)
counter_u64_add(pmap_l2e_demotions, 1);
CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx"
" in pmap %p", va, pmap);
return (TRUE);
return (true);
}
vm_paddr_t
@ -6202,7 +6201,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
pml3_entry_t *l3e;
pt_entry_t *pte;
int cache_bits, error;
boolean_t changed;
bool changed;
PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
base = trunc_page(va);
@ -6217,7 +6216,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
return (EINVAL);
cache_bits = pmap_cache_bits(mode);
changed = FALSE;
changed = false;
/*
* Pages that aren't mapped aren't supported. Also break down 2MB pages
@ -6298,7 +6297,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
if ((be64toh(*l2e) & RPTE_ATTR_MASK) != cache_bits) {
pmap_pte_attr(l2e, cache_bits,
RPTE_ATTR_MASK);
changed = TRUE;
changed = true;
}
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
(*l2e & PG_PS_FRAME) < dmaplimit) {
@ -6328,7 +6327,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
if ((be64toh(*l3e) & RPTE_ATTR_MASK) != cache_bits) {
pmap_pte_attr(l3e, cache_bits,
RPTE_ATTR_MASK);
changed = TRUE;
changed = true;
}
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
(be64toh(*l3e) & PG_PS_FRAME) < dmaplimit) {
@ -6356,7 +6355,7 @@ pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode, bool flush)
if ((be64toh(*pte) & RPTE_ATTR_MASK) != cache_bits) {
pmap_pte_attr(pte, cache_bits,
RPTE_ATTR_MASK);
changed = TRUE;
changed = true;
}
if (tmpva >= VM_MIN_KERNEL_ADDRESS &&
(be64toh(*pte) & PG_FRAME) < dmaplimit) {

View file

@ -263,7 +263,7 @@ static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
#endif
static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, bool);
static int pte_remove(pmap_t, vm_offset_t, uint8_t);
static pte_t *pte_find(pmap_t, vm_offset_t);
static void kernel_pte_alloc(vm_offset_t, vm_offset_t);
@ -302,9 +302,9 @@ static vm_paddr_t mmu_booke_extract(pmap_t, vm_offset_t);
static vm_page_t mmu_booke_extract_and_hold(pmap_t, vm_offset_t,
vm_prot_t);
static void mmu_booke_init(void);
static boolean_t mmu_booke_is_modified(vm_page_t);
static boolean_t mmu_booke_is_prefaultable(pmap_t, vm_offset_t);
static boolean_t mmu_booke_is_referenced(vm_page_t);
static bool mmu_booke_is_modified(vm_page_t);
static bool mmu_booke_is_prefaultable(pmap_t, vm_offset_t);
static bool mmu_booke_is_referenced(vm_page_t);
static int mmu_booke_ts_referenced(vm_page_t);
static vm_offset_t mmu_booke_map(vm_offset_t *, vm_paddr_t, vm_paddr_t,
int);
@ -312,7 +312,7 @@ static int mmu_booke_mincore(pmap_t, vm_offset_t,
vm_paddr_t *);
static void mmu_booke_object_init_pt(pmap_t, vm_offset_t,
vm_object_t, vm_pindex_t, vm_size_t);
static boolean_t mmu_booke_page_exists_quick(pmap_t, vm_page_t);
static bool mmu_booke_page_exists_quick(pmap_t, vm_page_t);
static void mmu_booke_page_init(vm_page_t);
static int mmu_booke_page_wired_mappings(vm_page_t);
static int mmu_booke_pinit(pmap_t);
@ -353,7 +353,7 @@ static int mmu_booke_change_attr(vm_offset_t addr,
static int mmu_booke_decode_kernel_ptr(vm_offset_t addr,
int *is_user, vm_offset_t *decoded_addr);
static void mmu_booke_page_array_startup(long);
static boolean_t mmu_booke_page_is_mapped(vm_page_t m);
static bool mmu_booke_page_is_mapped(vm_page_t m);
static bool mmu_booke_ps_enabled(pmap_t pmap);
static struct pmap_funcs mmu_booke_methods = {
@ -1221,7 +1221,7 @@ mmu_booke_decode_kernel_ptr(vm_offset_t addr, int *is_user,
return (0);
}
static boolean_t
static bool
mmu_booke_page_is_mapped(vm_page_t m)
{
@ -1783,22 +1783,22 @@ mmu_booke_page_init(vm_page_t m)
* Return whether or not the specified physical page was modified
* in any of physical maps.
*/
static boolean_t
static bool
mmu_booke_is_modified(vm_page_t m)
{
pte_t *pte;
pv_entry_t pv;
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("mmu_booke_is_modified: page %p is not managed", m));
rv = FALSE;
rv = false;
/*
* If the page is not busied then this check is racy.
*/
if (!pmap_page_is_write_mapped(m))
return (FALSE);
return (false);
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
@ -1806,7 +1806,7 @@ mmu_booke_is_modified(vm_page_t m)
if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte)) {
if (PTE_ISMODIFIED(pte))
rv = TRUE;
rv = true;
}
PMAP_UNLOCK(pv->pv_pmap);
if (rv)
@ -1820,34 +1820,34 @@ mmu_booke_is_modified(vm_page_t m)
* Return whether or not the specified virtual address is eligible
* for prefault.
*/
static boolean_t
static bool
mmu_booke_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
return (FALSE);
return (false);
}
/*
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
static boolean_t
static bool
mmu_booke_is_referenced(vm_page_t m)
{
pte_t *pte;
pv_entry_t pv;
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("mmu_booke_is_referenced: page %p is not managed", m));
rv = FALSE;
rv = false;
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
PMAP_LOCK(pv->pv_pmap);
if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL &&
PTE_ISVALID(pte)) {
if (PTE_ISREFERENCED(pte))
rv = TRUE;
rv = true;
}
PMAP_UNLOCK(pv->pv_pmap);
if (rv)
@ -1984,21 +1984,21 @@ mmu_booke_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
* only necessary that true be returned for a small subset of pmaps for proper
* page aging.
*/
static boolean_t
static bool
mmu_booke_page_exists_quick(pmap_t pmap, vm_page_t m)
{
pv_entry_t pv;
int loops;
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("mmu_booke_page_exists_quick: page %p is not managed", m));
loops = 0;
rv = FALSE;
rv = false;
rw_wlock(&pvh_global_lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) {
if (pv->pv_pmap == pmap) {
rv = TRUE;
rv = true;
break;
}
if (++loops >= 16)

View file

@ -128,13 +128,13 @@ static struct ptbl_buf *ptbl_buf_alloc(void);
static void ptbl_buf_free(struct ptbl_buf *);
static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
static pte_t *ptbl_alloc(pmap_t, unsigned int, boolean_t);
static pte_t *ptbl_alloc(pmap_t, unsigned int, bool);
static void ptbl_free(pmap_t, unsigned int);
static void ptbl_hold(pmap_t, unsigned int);
static int ptbl_unhold(pmap_t, unsigned int);
static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, bool);
static int pte_remove(pmap_t, vm_offset_t, uint8_t);
static pte_t *pte_find(pmap_t, vm_offset_t);
@ -235,7 +235,7 @@ ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl)
/* Allocate page table. */
static pte_t *
ptbl_alloc(pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep)
ptbl_alloc(pmap_t pmap, unsigned int pdir_idx, bool nosleep)
{
vm_page_t mtbl[PTBL_PAGES];
vm_page_t m;
@ -498,7 +498,7 @@ pte_remove(pmap_t pmap, vm_offset_t va, uint8_t flags)
*/
static int
pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
boolean_t nosleep)
bool nosleep)
{
unsigned int pdir_idx = PDIR_IDX(va);
unsigned int ptbl_idx = PTBL_IDX(va);
@ -760,7 +760,7 @@ mmu_booke_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
m = PHYS_TO_VM_PAGE(pa);
PMAP_LOCK(pmap);
pte_enter(pmap, m, addr,
PTE_SR | PTE_VALID, FALSE);
PTE_SR | PTE_VALID, false);
__syncicache((void *)(addr + (va & PAGE_MASK)),
sync_sz);
pte_remove(pmap, addr, PTBL_UNHOLD);

View file

@ -138,7 +138,7 @@ static void ptbl_hold(pmap_t, pte_t *);
static int ptbl_unhold(pmap_t, vm_offset_t);
static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, bool);
static int pte_remove(pmap_t, vm_offset_t, uint8_t);
static pte_t *pte_find(pmap_t, vm_offset_t);
static pte_t *pte_find_next(pmap_t, vm_offset_t *);
@ -440,7 +440,7 @@ pte_remove(pmap_t pmap, vm_offset_t va, u_int8_t flags)
*/
static int
pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
boolean_t nosleep)
bool nosleep)
{
unsigned int ptbl_idx = PTBL_IDX(va);
pte_t *ptbl, *pte, pte_tmp;

View file

@ -62,15 +62,15 @@ typedef vm_paddr_t (*pmap_extract_t)(pmap_t, vm_offset_t);
typedef vm_page_t (*pmap_extract_and_hold_t)(pmap_t, vm_offset_t, vm_prot_t);
typedef void (*pmap_growkernel_t)(vm_offset_t);
typedef void (*pmap_init_t)(void);
typedef boolean_t (*pmap_is_modified_t)(vm_page_t);
typedef boolean_t (*pmap_is_prefaultable_t)(pmap_t, vm_offset_t);
typedef boolean_t (*pmap_is_referenced_t)(vm_page_t);
typedef bool (*pmap_is_modified_t)(vm_page_t);
typedef bool (*pmap_is_prefaultable_t)(pmap_t, vm_offset_t);
typedef bool (*pmap_is_referenced_t)(vm_page_t);
typedef int (*pmap_ts_referenced_t)(vm_page_t);
typedef vm_offset_t (*pmap_map_t)(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
typedef void (*pmap_object_init_pt_t)(pmap_t, vm_offset_t, vm_object_t,
vm_pindex_t, vm_size_t);
typedef boolean_t (*pmap_page_exists_quick_t)(pmap_t, vm_page_t);
typedef boolean_t (*pmap_page_is_mapped_t)(vm_page_t);
typedef bool (*pmap_page_exists_quick_t)(pmap_t, vm_page_t);
typedef bool (*pmap_page_is_mapped_t)(vm_page_t);
typedef void (*pmap_page_init_t)(vm_page_t);
typedef int (*pmap_page_wired_mappings_t)(vm_page_t);
typedef void (*pmap_pinit0_t)(pmap_t);

View file

@ -318,12 +318,12 @@ int pmap_decode_kernel_ptr(vm_offset_t addr, int *is_user,
void pmap_deactivate(struct thread *);
vm_paddr_t pmap_kextract(vm_offset_t);
int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t);
boolean_t pmap_mmu_install(char *name, int prio);
bool pmap_mmu_install(char *name, int prio);
void pmap_mmu_init(void);
const char *pmap_mmu_name(void);
bool pmap_ps_enabled(pmap_t pmap);
int pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags);
boolean_t pmap_page_is_mapped(vm_page_t m);
bool pmap_page_is_mapped(vm_page_t m);
#define pmap_map_delete(pmap, sva, eva) pmap_remove(pmap, sva, eva)
void pmap_page_array_startup(long count);

View file

@ -135,12 +135,12 @@ DEFINE_PMAP_IFUNC(vm_paddr_t, kextract, (vm_offset_t));
DEFINE_PMAP_IFUNC(void, kremove, (vm_offset_t));
DEFINE_PMAP_IFUNC(void, object_init_pt, (pmap_t, vm_offset_t, vm_object_t, vm_pindex_t,
vm_size_t));
DEFINE_PMAP_IFUNC(boolean_t, is_modified, (vm_page_t));
DEFINE_PMAP_IFUNC(boolean_t, is_prefaultable, (pmap_t, vm_offset_t));
DEFINE_PMAP_IFUNC(boolean_t, is_referenced, (vm_page_t));
DEFINE_PMAP_IFUNC(boolean_t, page_exists_quick, (pmap_t, vm_page_t));
DEFINE_PMAP_IFUNC(bool, is_modified, (vm_page_t));
DEFINE_PMAP_IFUNC(bool, is_prefaultable, (pmap_t, vm_offset_t));
DEFINE_PMAP_IFUNC(bool, is_referenced, (vm_page_t));
DEFINE_PMAP_IFUNC(bool, page_exists_quick, (pmap_t, vm_page_t));
DEFINE_PMAP_IFUNC(void, page_init, (vm_page_t));
DEFINE_PMAP_IFUNC(boolean_t, page_is_mapped, (vm_page_t));
DEFINE_PMAP_IFUNC(bool, page_is_mapped, (vm_page_t));
DEFINE_PMAP_IFUNC(int, page_wired_mappings, (vm_page_t));
DEFINE_PMAP_IFUNC(void, protect, (pmap_t, vm_offset_t, vm_offset_t, vm_prot_t));
DEFINE_PMAP_IFUNC(bool, ps_enabled, (pmap_t));
@ -196,7 +196,7 @@ DEFINE_DUMPSYS_IFUNC(void *, dump_pmap, (void *, void *, u_long *));
*/
SET_DECLARE(mmu_set, struct mmu_kobj);
boolean_t
bool
pmap_mmu_install(char *name, int prio)
{
mmu_t *mmupp, mmup;
@ -213,11 +213,11 @@ pmap_mmu_install(char *name, int prio)
(prio >= curr_prio || mmu_obj == NULL)) {
curr_prio = prio;
mmu_obj = mmup;
return (TRUE);
return (true);
}
}
return (FALSE);
return (false);
}
/* MMU "pre-bootstrap" init, used to install extra resolvers, etc. */
@ -236,7 +236,7 @@ pmap_mmu_name(void)
int unmapped_buf_allowed;
boolean_t
bool
pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
{
@ -248,9 +248,9 @@ pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
case VM_MEMATTR_WRITE_BACK:
case VM_MEMATTR_WRITE_THROUGH:
case VM_MEMATTR_PREFETCHABLE:
return (TRUE);
return (true);
default:
return (FALSE);
return (false);
}
}

View file

@ -315,7 +315,7 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
vm_page_t m, struct rwlock **lockp);
static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
@ -1175,8 +1175,7 @@ pmap_ps_enabled(pmap_t pmap __unused)
* physical memory manager after the TLB has been updated.
*/
static __inline void
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
boolean_t set_PG_ZERO)
pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO)
{
if (set_PG_ZERO)
@ -1232,10 +1231,10 @@ pmap_remove_pt_page(pmap_t pmap, vm_offset_t va)
/*
* Decrements a page table page's reference count, which is used to record the
* number of valid page table entries within the page. If the reference count
* drops to zero, then the page table page is unmapped. Returns TRUE if the
* page table page was unmapped and FALSE otherwise.
* drops to zero, then the page table page is unmapped. Returns true if the
* page table page was unmapped and false otherwise.
*/
static inline boolean_t
static inline bool
pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
{
KASSERT(m->ref_count > 0,
@ -1244,9 +1243,9 @@ pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
--m->ref_count;
if (m->ref_count == 0) {
_pmap_unwire_ptp(pmap, va, m, free);
return (TRUE);
return (true);
} else {
return (FALSE);
return (false);
}
}
@ -1297,7 +1296,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
* Put page on a list so that it is released after
* *ALL* TLB shootdown is done
*/
pmap_add_delayed_free_list(m, free, TRUE);
pmap_add_delayed_free_list(m, free, true);
}
/*
@ -2014,7 +2013,7 @@ pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
* Conditionally create the PV entry for a 4KB page mapping if the required
* memory can be allocated without resorting to reclamation.
*/
static boolean_t
static bool
pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
struct rwlock **lockp)
{
@ -2028,9 +2027,9 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
m->md.pv_gen++;
return (TRUE);
return (true);
} else
return (FALSE);
return (false);
}
/*
@ -2247,7 +2246,7 @@ pmap_remove_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t sva,
("pmap_remove_l2: l3 page ref count error"));
ml3->ref_count = 1;
vm_page_unwire_noq(ml3);
pmap_add_delayed_free_list(ml3, free, FALSE);
pmap_add_delayed_free_list(ml3, free, false);
}
}
return (pmap_unuse_pt(pmap, sva, l1e, free));
@ -3776,24 +3775,24 @@ pmap_quick_remove_page(vm_offset_t addr)
* is only necessary that true be returned for a small
* subset of pmaps for proper page aging.
*/
boolean_t
bool
pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
{
struct md_page *pvh;
struct rwlock *lock;
pv_entry_t pv;
int loops = 0;
boolean_t rv;
bool rv;
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_page_exists_quick: page %p is not managed", m));
rv = FALSE;
rv = false;
rw_rlock(&pvh_global_lock);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -3804,7 +3803,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
if (PV_PMAP(pv) == pmap) {
rv = TRUE;
rv = true;
break;
}
loops++;
@ -3935,7 +3934,7 @@ pmap_remove_pages_pv(pmap_t pmap, vm_page_t m, pv_entry_t pv,
KASSERT(mpte->ref_count == Ln_ENTRIES,
("pmap_remove_pages: pte page ref count error"));
mpte->ref_count = 0;
pmap_add_delayed_free_list(mpte, free, FALSE);
pmap_add_delayed_free_list(mpte, free, false);
}
} else {
pmap_resident_count_dec(pmap, 1);
@ -4073,7 +4072,7 @@ pmap_remove_pages(pmap_t pmap)
}
static bool
pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified)
{
struct md_page *pvh;
struct rwlock *lock;
@ -4090,7 +4089,7 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
if (accessed)
mask |= PTE_A;
rv = FALSE;
rv = false;
rw_rlock(&pvh_global_lock);
lock = VM_PAGE_TO_PV_LIST_LOCK(m);
rw_rlock(lock);
@ -4151,7 +4150,7 @@ pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
* Return whether or not the specified physical page was modified
* in any physical maps.
*/
boolean_t
bool
pmap_is_modified(vm_page_t m)
{
@ -4162,8 +4161,8 @@ pmap_is_modified(vm_page_t m)
* If the page is not busied then this check is racy.
*/
if (!pmap_page_is_write_mapped(m))
return (FALSE);
return (pmap_page_test_mappings(m, FALSE, TRUE));
return (false);
return (pmap_page_test_mappings(m, false, true));
}
/*
@ -4172,21 +4171,21 @@ pmap_is_modified(vm_page_t m)
* Return whether or not the specified virtual address is eligible
* for prefault.
*/
boolean_t
bool
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
pt_entry_t *l3;
boolean_t rv;
bool rv;
/*
* Return TRUE if and only if the L3 entry for the specified virtual
* Return true if and only if the L3 entry for the specified virtual
* address is allocated but invalid.
*/
rv = FALSE;
rv = false;
PMAP_LOCK(pmap);
l3 = pmap_l3(pmap, addr);
if (l3 != NULL && pmap_load(l3) == 0) {
rv = TRUE;
rv = true;
}
PMAP_UNLOCK(pmap);
return (rv);
@ -4198,13 +4197,13 @@ pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
* Return whether or not the specified physical page was referenced
* in any physical maps.
*/
boolean_t
bool
pmap_is_referenced(vm_page_t m)
{
KASSERT((m->oflags & VPO_UNMANAGED) == 0,
("pmap_is_referenced: page %p is not managed", m));
return (pmap_page_test_mappings(m, TRUE, FALSE));
return (pmap_page_test_mappings(m, true, false));
}
/*
@ -4900,7 +4899,7 @@ pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
}
}
boolean_t
bool
pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode)
{

View file

@ -81,8 +81,8 @@ typedef struct pmap_statistics *pmap_statistics_t;
* Each machine-dependent implementation is required to provide:
*
* vm_memattr_t pmap_page_get_memattr(vm_page_t);
* boolean_t pmap_page_is_mapped(vm_page_t);
* boolean_t pmap_page_is_write_mapped(vm_page_t);
* bool pmap_page_is_mapped(vm_page_t);
* bool pmap_page_is_write_mapped(vm_page_t);
* void pmap_page_set_memattr(vm_page_t, vm_memattr_t);
*/
#include <machine/pmap.h>
@ -138,15 +138,15 @@ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va,
vm_prot_t prot);
void pmap_growkernel(vm_offset_t);
void pmap_init(void);
boolean_t pmap_is_modified(vm_page_t m);
boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t va);
boolean_t pmap_is_referenced(vm_page_t m);
boolean_t pmap_is_valid_memattr(pmap_t, vm_memattr_t);
bool pmap_is_modified(vm_page_t m);
bool pmap_is_prefaultable(pmap_t pmap, vm_offset_t va);
bool pmap_is_referenced(vm_page_t m);
bool pmap_is_valid_memattr(pmap_t, vm_memattr_t);
vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap);
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
vm_object_t object, vm_pindex_t pindex, vm_size_t size);
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
bool pmap_page_exists_quick(pmap_t pmap, vm_page_t m);
void pmap_page_init(vm_page_t m);
int pmap_page_wired_mappings(vm_page_t m);
int pmap_pinit(pmap_t);