mirror of
https://github.com/freebsd/freebsd-src
synced 2024-10-15 12:54:27 +00:00
vm_iommu_map()/unmap(): stop transiently wiring already wired pages
Namely, switch from vm_fault_quick_hold() to pmap_extract() KPI to translate gpa to hpa. Assert that the looked up hpa belongs to the wired page, as it should be for the VM which is configured for pass-throu (this is theoretically a restriction that could be removed on newer DMARs). Noted by: alc Reviewed by: alc, jhb, markj Sponsored by: The FreeBSD Foundation MFC after: 1 week Differential revision: https://reviews.freebsd.org/D43140
This commit is contained in:
parent
3abc72f871
commit
671a00491d
|
@ -1041,9 +1041,10 @@ vm_iommu_map(struct vm *vm)
|
|||
{
|
||||
vm_paddr_t gpa, hpa;
|
||||
struct mem_map *mm;
|
||||
void *vp, *cookie;
|
||||
int i;
|
||||
|
||||
sx_assert(&vm->mem_segs_lock, SX_LOCKED);
|
||||
|
||||
for (i = 0; i < VM_MAX_MEMMAPS; i++) {
|
||||
mm = &vm->mem_maps[i];
|
||||
if (!sysmem_mapping(vm, mm))
|
||||
|
@ -1057,13 +1058,24 @@ vm_iommu_map(struct vm *vm)
|
|||
mm->flags |= VM_MEMMAP_F_IOMMU;
|
||||
|
||||
for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
|
||||
vp = vm_gpa_hold_global(vm, gpa, PAGE_SIZE,
|
||||
VM_PROT_WRITE, &cookie);
|
||||
KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
|
||||
vm_name(vm), gpa));
|
||||
vm_gpa_release(cookie);
|
||||
hpa = pmap_extract(vmspace_pmap(vm->vmspace), gpa);
|
||||
|
||||
/*
|
||||
* All mappings in the vmm vmspace must be
|
||||
* present since they are managed by vmm in this way.
|
||||
* Because we are in pass-through mode, the
|
||||
* mappings must also be wired. This implies
|
||||
* that all pages must be mapped and wired,
|
||||
* allowing to use pmap_extract() and avoiding the
|
||||
* need to use vm_gpa_hold_global().
|
||||
*
|
||||
* This could change if/when we start
|
||||
* supporting page faults on IOMMU maps.
|
||||
*/
|
||||
KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(hpa)),
|
||||
("vm_iommu_map: vm %p gpa %jx hpa %jx not wired",
|
||||
vm, (uintmax_t)gpa, (uintmax_t)hpa));
|
||||
|
||||
hpa = DMAP_TO_PHYS((uintptr_t)vp);
|
||||
iommu_create_mapping(vm->iommu, gpa, hpa, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
@ -1076,9 +1088,10 @@ vm_iommu_unmap(struct vm *vm)
|
|||
{
|
||||
vm_paddr_t gpa;
|
||||
struct mem_map *mm;
|
||||
void *vp, *cookie;
|
||||
int i;
|
||||
|
||||
sx_assert(&vm->mem_segs_lock, SX_LOCKED);
|
||||
|
||||
for (i = 0; i < VM_MAX_MEMMAPS; i++) {
|
||||
mm = &vm->mem_maps[i];
|
||||
if (!sysmem_mapping(vm, mm))
|
||||
|
@ -1092,12 +1105,10 @@ vm_iommu_unmap(struct vm *vm)
|
|||
mm->gpa, mm->len, mm->flags));
|
||||
|
||||
for (gpa = mm->gpa; gpa < mm->gpa + mm->len; gpa += PAGE_SIZE) {
|
||||
vp = vm_gpa_hold_global(vm, gpa, PAGE_SIZE,
|
||||
VM_PROT_WRITE, &cookie);
|
||||
KASSERT(vp != NULL, ("vm(%s) could not map gpa %#lx",
|
||||
vm_name(vm), gpa));
|
||||
vm_gpa_release(cookie);
|
||||
|
||||
KASSERT(vm_page_wired(PHYS_TO_VM_PAGE(pmap_extract(
|
||||
vmspace_pmap(vm->vmspace), gpa))),
|
||||
("vm_iommu_unmap: vm %p gpa %jx not wired",
|
||||
vm, (uintmax_t)gpa));
|
||||
iommu_remove_mapping(vm->iommu, gpa, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue