Drop the kqueue global mutex as soon as we are finished with it rather

than keeping it locked until we exit the function to optimize the case
where the lock would be dropped and later reacquired.  The optimization
was broken when kevent's were moved from UFS to VFS and the knote list
lock for a vnode kevent became the lockmgr vnode lock.  If one tried
to use a kqueue that contained events for a kqueue fd followed by a vnode,
then the kq global lock would end up being held when the vnode lock was
acquired which could result in sleeping with a mutex held (and subsequent
panics) if the vnode lock was contested.

Reviewed by:	jmg
Tested by:	ps (on 6.x)
MFC after:	3 days
This commit is contained in:
John Baldwin 2006-04-14 14:27:28 +00:00
parent 62463f6768
commit a29b4f6eec
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=157754

View file

@ -1181,7 +1181,6 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
if (timeout < 0) {
error = EWOULDBLOCK;
} else {
KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
kq->kq_state |= KQ_SLEEP;
error = msleep(kq, &kq->kq_lock, PSOCK | PCATCH,
"kqread", timeout);
@ -1203,7 +1202,6 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
if ((kn->kn_status == KN_MARKER && kn != marker) ||
(kn->kn_status & KN_INFLUX) == KN_INFLUX) {
KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
kq->kq_state |= KQ_FLUXWAIT;
error = msleep(kq, &kq->kq_lock, PSOCK,
"kqflxwt", 0);
@ -1248,6 +1246,7 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
KN_LIST_LOCK(kn);
if (kn->kn_fop->f_event(kn, 0) == 0) {
KQ_LOCK(kq);
KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
kn->kn_status &=
~(KN_QUEUED | KN_ACTIVE | KN_INFLUX);
kq->kq_count--;
@ -1256,6 +1255,7 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
}
*kevp = kn->kn_kevent;
KQ_LOCK(kq);
KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
if (kn->kn_flags & EV_CLEAR) {
kn->kn_data = 0;
kn->kn_fflags = 0;
@ -1266,7 +1266,6 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
kn->kn_status &= ~(KN_INFLUX);
KN_LIST_UNLOCK(kn);
}
/* we are returning a copy to the user */
@ -1288,7 +1287,6 @@ kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
done:
KQ_OWNED(kq);
KQ_UNLOCK_FLUX(kq);
KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal);
knote_free(marker);
done_nl:
KQ_NOTOWNED(kq);