Use atomics and a shared object lock to protect the object reference count.

Certain consumers still need to guarantee a stable reference so we can not
switch entirely to atomics yet.  Exclusive lock holders can still modify
and examine the refcount without using the ref api.

Reviewed by:	kib
Tested by:	pho
Sponsored by:	Netflix, Intel
Differential Revision:	https://reviews.freebsd.org/D21598
This commit is contained in:
Jeff Roberson 2019-10-29 20:58:46 +00:00
parent 4b3e066539
commit 51df53213c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=354157
4 changed files with 56 additions and 37 deletions

View file

@ -175,4 +175,22 @@ refcount_release_if_not_last(volatile u_int *count)
}
}
static __inline __result_use_check bool
refcount_release_if_gt(volatile u_int *count, u_int n)
{
u_int old;
KASSERT(n > 0,
("refcount_release_if_gt: Use refcount_release for final ref"));
old = *count;
for (;;) {
if (REFCOUNT_COUNT(old) <= n)
return (false);
if (__predict_false(REFCOUNT_SATURATED(old)))
return (true);
if (atomic_fcmpset_int(count, &old, old - 1))
return (true);
}
}
#endif /* ! __SYS_REFCOUNT_H__ */

View file

@ -224,8 +224,8 @@ vm_object_zinit(void *mem, int size, int flags)
/* These are true for any object that has been freed */
object->type = OBJT_DEAD;
object->ref_count = 0;
vm_radix_init(&object->rtree);
refcount_init(&object->ref_count, 0);
refcount_init(&object->paging_in_progress, 0);
refcount_init(&object->busy, 0);
object->resident_page_count = 0;
@ -282,7 +282,7 @@ _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
object->size = size;
object->domain.dr_policy = NULL;
object->generation = 1;
object->ref_count = 1;
refcount_init(&object->ref_count, 1);
object->memattr = VM_MEMATTR_DEFAULT;
object->cred = NULL;
object->charge = 0;
@ -444,9 +444,9 @@ vm_object_reference(vm_object_t object)
{
if (object == NULL)
return;
VM_OBJECT_WLOCK(object);
VM_OBJECT_RLOCK(object);
vm_object_reference_locked(object);
VM_OBJECT_WUNLOCK(object);
VM_OBJECT_RUNLOCK(object);
}
/*
@ -461,8 +461,8 @@ vm_object_reference_locked(vm_object_t object)
{
struct vnode *vp;
VM_OBJECT_ASSERT_WLOCKED(object);
object->ref_count++;
VM_OBJECT_ASSERT_LOCKED(object);
refcount_acquire(&object->ref_count);
if (object->type == OBJT_VNODE) {
vp = object->handle;
vref(vp);
@ -477,24 +477,16 @@ vm_object_vndeallocate(vm_object_t object)
{
struct vnode *vp = (struct vnode *) object->handle;
VM_OBJECT_ASSERT_WLOCKED(object);
KASSERT(object->type == OBJT_VNODE,
("vm_object_vndeallocate: not a vnode object"));
KASSERT(vp != NULL, ("vm_object_vndeallocate: missing vp"));
#ifdef INVARIANTS
if (object->ref_count == 0) {
vn_printf(vp, "vm_object_vndeallocate ");
panic("vm_object_vndeallocate: bad object reference count");
}
#endif
if (!umtx_shm_vnobj_persistent && object->ref_count == 1)
if (refcount_release(&object->ref_count) &&
!umtx_shm_vnobj_persistent)
umtx_shm_object_terminated(object);
object->ref_count--;
VM_OBJECT_RUNLOCK(object);
/* vrele may need the vnode lock. */
VM_OBJECT_WUNLOCK(object);
vrele(vp);
}
@ -513,24 +505,32 @@ void
vm_object_deallocate(vm_object_t object)
{
vm_object_t temp;
bool released;
while (object != NULL) {
VM_OBJECT_WLOCK(object);
VM_OBJECT_RLOCK(object);
if (object->type == OBJT_VNODE) {
vm_object_vndeallocate(object);
return;
}
/*
* If the reference count goes to 0 we start calling
* vm_object_terminate() on the object chain. A ref count
* of 1 may be a special case depending on the shadow count
* being 0 or 1. These cases require a write lock on the
* object.
*/
released = refcount_release_if_gt(&object->ref_count, 2);
VM_OBJECT_RUNLOCK(object);
if (released)
return;
VM_OBJECT_WLOCK(object);
KASSERT(object->ref_count != 0,
("vm_object_deallocate: object deallocated too many times: %d", object->type));
/*
* If the reference count goes to 0 we start calling
* vm_object_terminate() on the object chain.
* A ref count of 1 may be a special case depending on the
* shadow count being 0 or 1.
*/
object->ref_count--;
refcount_release(&object->ref_count);
if (object->ref_count > 1) {
VM_OBJECT_WUNLOCK(object);
return;
@ -558,7 +558,7 @@ vm_object_deallocate(vm_object_t object)
/*
* Avoid a potential deadlock.
*/
object->ref_count++;
refcount_acquire(&object->ref_count);
VM_OBJECT_WUNLOCK(object);
/*
* More likely than not the thread
@ -580,7 +580,7 @@ vm_object_deallocate(vm_object_t object)
(robject->type == OBJT_DEFAULT ||
robject->type == OBJT_SWAP)) {
robject->ref_count++;
refcount_acquire(&robject->ref_count);
retry:
if (REFCOUNT_COUNT(robject->paging_in_progress) > 0) {
VM_OBJECT_WUNLOCK(object);
@ -1223,15 +1223,15 @@ vm_object_shadow(
* Don't create the new object if the old object isn't shared.
*/
if (source != NULL) {
VM_OBJECT_WLOCK(source);
VM_OBJECT_RLOCK(source);
if (source->ref_count == 1 &&
source->handle == NULL &&
(source->type == OBJT_DEFAULT ||
source->type == OBJT_SWAP)) {
VM_OBJECT_WUNLOCK(source);
VM_OBJECT_RUNLOCK(source);
return;
}
VM_OBJECT_WUNLOCK(source);
VM_OBJECT_RUNLOCK(source);
}
/*
@ -1822,7 +1822,7 @@ vm_object_collapse(vm_object_t object)
* Drop the reference count on backing_object. Since
* its ref_count was at least 2, it will not vanish.
*/
backing_object->ref_count--;
refcount_release(&backing_object->ref_count);
VM_OBJECT_WUNLOCK(backing_object);
counter_u64_add(object_bypasses, 1);
}

View file

@ -106,7 +106,7 @@ struct vm_object {
vm_pindex_t size; /* Object size */
struct domainset_ref domain; /* NUMA policy. */
int generation; /* generation ID */
int ref_count; /* How many refs?? */
volatile u_int ref_count; /* How many refs?? */
int shadow_count; /* how many objects that this is a shadow for */
vm_memattr_t memattr; /* default memory attribute for pages */
objtype_t type; /* type of pager */

View file

@ -70,6 +70,7 @@ __FBSDID("$FreeBSD$");
#include <sys/ktr.h>
#include <sys/limits.h>
#include <sys/conf.h>
#include <sys/refcount.h>
#include <sys/rwlock.h>
#include <sys/sf_buf.h>
#include <sys/domainset.h>
@ -172,9 +173,9 @@ vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
* Dereference the reference we just created. This assumes
* that the object is associated with the vp.
*/
VM_OBJECT_WLOCK(object);
object->ref_count--;
VM_OBJECT_WUNLOCK(object);
VM_OBJECT_RLOCK(object);
refcount_release(&object->ref_count);
VM_OBJECT_RUNLOCK(object);
vrele(vp);
KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
@ -285,7 +286,7 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
KASSERT(object->ref_count == 1,
("leaked ref %p %d", object, object->ref_count));
object->type = OBJT_DEAD;
object->ref_count = 0;
refcount_init(&object->ref_count, 0);
VM_OBJECT_WUNLOCK(object);
vm_object_destroy(object);
goto retry;
@ -294,7 +295,7 @@ vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
VI_UNLOCK(vp);
} else {
VM_OBJECT_WLOCK(object);
object->ref_count++;
refcount_acquire(&object->ref_count);
#if VM_NRESERVLEVEL > 0
vm_object_color(object, 0);
#endif