diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c index b09128d483c6..e9e236f462ff 100644 --- a/sys/i386/i386/vm_machdep.c +++ b/sys/i386/i386/vm_machdep.c @@ -320,10 +320,8 @@ cpu_thread_clean(struct thread *td) * XXX do we need to move the TSS off the allocated pages * before freeing them? (not done here) */ - mtx_lock(&Giant); kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ext, ctob(IOPAGES + 1)); - mtx_unlock(&Giant); pcb->pcb_ext = 0; } } diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index fb5f5fc226ec..f6935407372a 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -2052,17 +2052,7 @@ void uma_large_free(uma_slab_t slab) { vsetobj((vm_offset_t)slab->us_data, kmem_object); - /* - * XXX: We get a lock order reversal if we don't have Giant: - * vm_map_remove (locks system map) -> vm_map_delete -> - * vm_map_entry_unwire -> vm_fault_unwire -> mtx_lock(&Giant) - */ - if (!mtx_owned(&Giant)) { - mtx_lock(&Giant); - page_free(slab->us_data, slab->us_size, slab->us_flags); - mtx_unlock(&Giant); - } else - page_free(slab->us_data, slab->us_size, slab->us_flags); + page_free(slab->us_data, slab->us_size, slab->us_flags); uma_zfree_internal(slabzone, slab, NULL, 0); } diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index 602b6596d249..fded09939bbf 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -1081,7 +1081,8 @@ vm_fault_unwire(map, start, end) pmap = vm_map_pmap(map); - mtx_lock(&Giant); + if (pmap != kernel_pmap) + mtx_lock(&Giant); /* * Since the pages are wired down, we must be able to get their * mappings from the physical map system. @@ -1095,7 +1096,8 @@ vm_fault_unwire(map, start, end) vm_page_unlock_queues(); } } - mtx_unlock(&Giant); + if (pmap != kernel_pmap) + mtx_unlock(&Giant); } /*