mirror of
https://github.com/freebsd/freebsd-src
synced 2024-10-04 23:50:27 +00:00
- Make the acquisition of Giant in vm_fault_unwire() conditional on the
pmap. For the kernel pmap, Giant is not required. In general, for other pmaps, Giant is required by i386's pmap_pte() implementation. Specifically, the use of PMAP2/PADDR2 is synchronized by Giant. Note: In principle, updates to the kernel pmap's wired count could be lost without Giant. However, in practice, we never use the kernel pmap's wired count. This will be resolved when pmap locking appears. - With the above change, cpu_thread_clean() and uma_large_free() need not acquire Giant. (The first case is simply the revival of i386/i386/vm_machdep.c's revision 1.226 by peter.)
This commit is contained in:
parent
fe5a02c927
commit
5d328ed44b
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=126793
|
@ -320,10 +320,8 @@ cpu_thread_clean(struct thread *td)
|
|||
* XXX do we need to move the TSS off the allocated pages
|
||||
* before freeing them? (not done here)
|
||||
*/
|
||||
mtx_lock(&Giant);
|
||||
kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext,
|
||||
ctob(IOPAGES + 1));
|
||||
mtx_unlock(&Giant);
|
||||
pcb->pcb_ext = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2052,17 +2052,7 @@ void
|
|||
uma_large_free(uma_slab_t slab)
|
||||
{
|
||||
vsetobj((vm_offset_t)slab->us_data, kmem_object);
|
||||
/*
|
||||
* XXX: We get a lock order reversal if we don't have Giant:
|
||||
* vm_map_remove (locks system map) -> vm_map_delete ->
|
||||
* vm_map_entry_unwire -> vm_fault_unwire -> mtx_lock(&Giant)
|
||||
*/
|
||||
if (!mtx_owned(&Giant)) {
|
||||
mtx_lock(&Giant);
|
||||
page_free(slab->us_data, slab->us_size, slab->us_flags);
|
||||
mtx_unlock(&Giant);
|
||||
} else
|
||||
page_free(slab->us_data, slab->us_size, slab->us_flags);
|
||||
page_free(slab->us_data, slab->us_size, slab->us_flags);
|
||||
uma_zfree_internal(slabzone, slab, NULL, 0);
|
||||
}
|
||||
|
||||
|
|
|
@ -1081,7 +1081,8 @@ vm_fault_unwire(map, start, end)
|
|||
|
||||
pmap = vm_map_pmap(map);
|
||||
|
||||
mtx_lock(&Giant);
|
||||
if (pmap != kernel_pmap)
|
||||
mtx_lock(&Giant);
|
||||
/*
|
||||
* Since the pages are wired down, we must be able to get their
|
||||
* mappings from the physical map system.
|
||||
|
@ -1095,7 +1096,8 @@ vm_fault_unwire(map, start, end)
|
|||
vm_page_unlock_queues();
|
||||
}
|
||||
}
|
||||
mtx_unlock(&Giant);
|
||||
if (pmap != kernel_pmap)
|
||||
mtx_unlock(&Giant);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in a new issue