mirror of
https://github.com/freebsd/freebsd-src
synced 2024-10-15 04:43:53 +00:00
Assert that valid PTEs are not overwritten when installing a new PTP
amd64 and 32-bit ARM already had assertions to this effect. Add them to other pmaps. Reviewed by: alc, kib MFC after: 2 weeks Sponsored by: The FreeBSD Foundation Differential Revision: https://reviews.freebsd.org/D31171
This commit is contained in:
parent
aee6e7dc0c
commit
b092c58c00
|
@ -1870,6 +1870,8 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
|||
|
||||
l0index = ptepindex - (NUL2E + NUL1E);
|
||||
l0 = &pmap->pm_l0[l0index];
|
||||
KASSERT((pmap_load(l0) & ATTR_DESCR_VALID) == 0,
|
||||
("%s: L0 entry %#lx is valid", __func__, pmap_load(l0)));
|
||||
pmap_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
|
||||
} else if (ptepindex >= NUL2E) {
|
||||
vm_pindex_t l0index, l1index;
|
||||
|
@ -1896,6 +1898,8 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
|||
|
||||
l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
|
||||
l1 = &l1[ptepindex & Ln_ADDR_MASK];
|
||||
KASSERT((pmap_load(l1) & ATTR_DESCR_VALID) == 0,
|
||||
("%s: L1 entry %#lx is valid", __func__, pmap_load(l1)));
|
||||
pmap_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
|
||||
} else {
|
||||
vm_pindex_t l0index, l1index;
|
||||
|
@ -1938,6 +1942,8 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
|||
|
||||
l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
|
||||
l2 = &l2[ptepindex & Ln_ADDR_MASK];
|
||||
KASSERT((pmap_load(l2) & ATTR_DESCR_VALID) == 0,
|
||||
("%s: L2 entry %#lx is valid", __func__, pmap_load(l2)));
|
||||
pmap_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
|
||||
}
|
||||
|
||||
|
|
|
@ -2157,8 +2157,11 @@ _pmap_allocpte(pmap_t pmap, u_int ptepindex, u_int flags)
|
|||
pmap->pm_stats.resident_count++;
|
||||
|
||||
ptepa = VM_PAGE_TO_PHYS(m);
|
||||
KASSERT((pmap->pm_pdir[ptepindex] & PG_V) == 0,
|
||||
("%s: page directory entry %#jx is valid",
|
||||
__func__, (uintmax_t)pmap->pm_pdir[ptepindex]));
|
||||
pmap->pm_pdir[ptepindex] =
|
||||
(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
|
||||
(pd_entry_t)(ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
|
||||
|
||||
return (m);
|
||||
}
|
||||
|
|
|
@ -1217,9 +1217,13 @@ _pmap_allocpte(pmap_t pmap, unsigned ptepindex, u_int flags)
|
|||
}
|
||||
/* Next level entry */
|
||||
pde = (pd_entry_t *)*pdep;
|
||||
KASSERT(pde[pdeindex] == 0,
|
||||
("%s: PTE %p is valid", __func__, pde[pdeindex]));
|
||||
pde[pdeindex] = (pd_entry_t)pageva;
|
||||
}
|
||||
#else
|
||||
KASSERT(pmap->pm_segtab[ptepindex] == 0,
|
||||
("%s: PTE %p is valid", __func__, pmap->pm_segtab[ptepindex]));
|
||||
pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva;
|
||||
#endif
|
||||
pmap->pm_stats.resident_count++;
|
||||
|
|
|
@ -4248,8 +4248,9 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
|||
/* Wire up a new PDPE page */
|
||||
pml1index = ptepindex - (NUPDE + NUPDPE);
|
||||
l1e = &pmap->pm_pml1[pml1index];
|
||||
KASSERT((be64toh(*l1e) & PG_V) == 0,
|
||||
("%s: L1 entry %#lx is valid", __func__, *l1e));
|
||||
pde_store(l1e, VM_PAGE_TO_PHYS(m));
|
||||
|
||||
} else if (ptepindex >= NUPDE) {
|
||||
vm_pindex_t pml1index;
|
||||
vm_pindex_t pdpindex;
|
||||
|
@ -4278,8 +4279,9 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
|||
|
||||
/* Now find the pdp page */
|
||||
l2e = &l2e[pdpindex & RPTE_MASK];
|
||||
KASSERT((be64toh(*l2e) & PG_V) == 0,
|
||||
("%s: L2 entry %#lx is valid", __func__, *l2e));
|
||||
pde_store(l2e, VM_PAGE_TO_PHYS(m));
|
||||
|
||||
} else {
|
||||
vm_pindex_t pml1index;
|
||||
vm_pindex_t pdpindex;
|
||||
|
@ -4324,6 +4326,8 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
|||
|
||||
/* Now we know where the page directory page is */
|
||||
l3e = &l3e[ptepindex & RPTE_MASK];
|
||||
KASSERT((be64toh(*l3e) & PG_V) == 0,
|
||||
("%s: L3 entry %#lx is valid", __func__, *l3e));
|
||||
pde_store(l3e, VM_PAGE_TO_PHYS(m));
|
||||
}
|
||||
|
||||
|
|
|
@ -1285,6 +1285,8 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
|||
|
||||
l1index = ptepindex - NUL1E;
|
||||
l1 = &pmap->pm_l1[l1index];
|
||||
KASSERT((pmap_load(l1) & PTE_V) == 0,
|
||||
("%s: L1 entry %#lx is valid", __func__, pmap_load(l1)));
|
||||
|
||||
pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE);
|
||||
entry = (PTE_V);
|
||||
|
@ -1314,6 +1316,8 @@ _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
|
|||
phys = PTE_TO_PHYS(pmap_load(l1));
|
||||
l2 = (pd_entry_t *)PHYS_TO_DMAP(phys);
|
||||
l2 = &l2[ptepindex & Ln_ADDR_MASK];
|
||||
KASSERT((pmap_load(l2) & PTE_V) == 0,
|
||||
("%s: L2 entry %#lx is valid", __func__, pmap_load(l2)));
|
||||
|
||||
pn = (VM_PAGE_TO_PHYS(m) / PAGE_SIZE);
|
||||
entry = (PTE_V);
|
||||
|
|
Loading…
Reference in a new issue