rpc: use new macros to lock socket buffers

Fixes:	d80a97def9
This commit is contained in:
Gleb Smirnoff 2024-04-09 09:17:19 -07:00
parent cb20a74ca0
commit e205fd318a
5 changed files with 44 additions and 44 deletions

View file

@ -318,9 +318,9 @@ if (error != 0) printf("sosend=%d\n", error);
mreq = NULL;
if (error == EMSGSIZE) {
printf("emsgsize\n");
SOCKBUF_LOCK(&xprt->xp_socket->so_snd);
SOCK_SENDBUF_LOCK(xprt->xp_socket);
sbwait(xprt->xp_socket, SO_SND);
SOCKBUF_UNLOCK(&xprt->xp_socket->so_snd);
SOCK_SENDBUF_UNLOCK(xprt->xp_socket);
sx_xunlock(&xprt->xp_lock);
AUTH_VALIDATE(auth, xid, NULL, NULL);
mtx_lock(&ct->ct_lock);

View file

@ -112,7 +112,7 @@ TAILQ_HEAD(cu_request_list, cu_request);
* member. It is separate from the client private data to facilitate
* multiple clients sharing the same socket. The cs_lock mutex is used
* to protect all fields of this structure, the socket's receive
* buffer SOCKBUF_LOCK is used to ensure that exactly one of these
* buffer lock is used to ensure that exactly one of these
* structures is installed on the socket.
*/
struct cu_socket {
@ -270,11 +270,11 @@ clnt_dg_create(
}
sb = &so->so_rcv;
SOCKBUF_LOCK(&so->so_rcv);
SOCK_RECVBUF_LOCK(so);
recheck_socket:
if (sb->sb_upcall) {
if (sb->sb_upcall != clnt_dg_soupcall) {
SOCKBUF_UNLOCK(&so->so_rcv);
SOCK_RECVBUF_UNLOCK(so);
printf("clnt_dg_create(): socket already has an incompatible upcall\n");
goto err2;
}
@ -287,9 +287,9 @@ clnt_dg_create(
* We are the first on this socket - allocate the
* structure and install it in the socket.
*/
SOCKBUF_UNLOCK(&so->so_rcv);
SOCK_RECVBUF_UNLOCK(so);
cs = mem_alloc(sizeof(*cs));
SOCKBUF_LOCK(&so->so_rcv);
SOCK_RECVBUF_LOCK(so);
if (sb->sb_upcall) {
/*
* We have lost a race with some other client.
@ -303,7 +303,7 @@ clnt_dg_create(
TAILQ_INIT(&cs->cs_pending);
soupcall_set(so, SO_RCV, clnt_dg_soupcall, cs);
}
SOCKBUF_UNLOCK(&so->so_rcv);
SOCK_RECVBUF_UNLOCK(so);
cl->cl_refs = 1;
cl->cl_ops = &clnt_dg_ops;
@ -993,7 +993,7 @@ clnt_dg_destroy(CLIENT *cl)
cs = cu->cu_socket->so_rcv.sb_upcallarg;
clnt_dg_close(cl);
SOCKBUF_LOCK(&cu->cu_socket->so_rcv);
SOCK_RECVBUF_LOCK(cu->cu_socket);
mtx_lock(&cs->cs_lock);
cs->cs_refs--;
@ -1001,13 +1001,13 @@ clnt_dg_destroy(CLIENT *cl)
mtx_unlock(&cs->cs_lock);
soupcall_clear(cu->cu_socket, SO_RCV);
clnt_dg_upcallsdone(cu->cu_socket, cs);
SOCKBUF_UNLOCK(&cu->cu_socket->so_rcv);
SOCK_RECVBUF_UNLOCK(cu->cu_socket);
mtx_destroy(&cs->cs_lock);
mem_free(cs, sizeof(*cs));
lastsocketref = TRUE;
} else {
mtx_unlock(&cs->cs_lock);
SOCKBUF_UNLOCK(&cu->cu_socket->so_rcv);
SOCK_RECVBUF_UNLOCK(cu->cu_socket);
lastsocketref = FALSE;
}
@ -1052,14 +1052,14 @@ clnt_dg_soupcall(struct socket *so, void *arg, int waitflag)
uio.uio_resid = 1000000000;
uio.uio_td = curthread;
do {
SOCKBUF_UNLOCK(&so->so_rcv);
SOCK_RECVBUF_UNLOCK(so);
m = NULL;
control = NULL;
rcvflag = MSG_DONTWAIT;
error = soreceive(so, NULL, &uio, &m, &control, &rcvflag);
if (control)
m_freem(control);
SOCKBUF_LOCK(&so->so_rcv);
SOCK_RECVBUF_LOCK(so);
if (error == EWOULDBLOCK)
break;
@ -1140,7 +1140,7 @@ static void
clnt_dg_upcallsdone(struct socket *so, struct cu_socket *cs)
{
SOCKBUF_LOCK_ASSERT(&so->so_rcv);
SOCK_RECVBUF_LOCK_ASSERT(so);
while (cs->cs_upcallrefs > 0)
(void) msleep(&cs->cs_upcallrefs, SOCKBUF_MTX(&so->so_rcv), 0,

View file

@ -263,9 +263,9 @@ clnt_vc_create(
cl->cl_private = ct;
cl->cl_auth = authnone_create();
SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
SOCK_RECVBUF_LOCK(ct->ct_socket);
soupcall_set(ct->ct_socket, SO_RCV, clnt_vc_soupcall, ct);
SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
SOCK_RECVBUF_UNLOCK(ct->ct_socket);
ct->ct_raw = NULL;
ct->ct_record = NULL;
@ -437,9 +437,9 @@ clnt_vc_call(
mreq = NULL;
if (error == EMSGSIZE || (error == ERESTART &&
(ct->ct_waitflag & PCATCH) == 0 && trycnt-- > 0)) {
SOCKBUF_LOCK(&ct->ct_socket->so_snd);
SOCK_SENDBUF_LOCK(ct->ct_socket);
sbwait(ct->ct_socket, SO_SND);
SOCKBUF_UNLOCK(&ct->ct_socket->so_snd);
SOCK_SENDBUF_UNLOCK(ct->ct_socket);
AUTH_VALIDATE(auth, xid, NULL, NULL);
mtx_lock(&ct->ct_lock);
TAILQ_REMOVE(&ct->ct_pending, cr, cr_link);
@ -821,12 +821,12 @@ clnt_vc_close(CLIENT *cl)
ct->ct_closing = TRUE;
mtx_unlock(&ct->ct_lock);
SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
SOCK_RECVBUF_LOCK(ct->ct_socket);
if (ct->ct_socket->so_rcv.sb_upcall != NULL) {
soupcall_clear(ct->ct_socket, SO_RCV);
clnt_vc_upcallsdone(ct);
}
SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
SOCK_RECVBUF_UNLOCK(ct->ct_socket);
/*
* Abort any pending requests and wait until everyone
@ -967,7 +967,7 @@ clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
/*
* If another thread is already here, it must be in
* soreceive(), so just return to avoid races with it.
* ct_upcallrefs is protected by the SOCKBUF_LOCK(),
* ct_upcallrefs is protected by the socket receive buffer lock
* which is held in this function, except when
* soreceive() is called.
*/
@ -987,9 +987,9 @@ clnt_vc_soupcall(struct socket *so, void *arg, int waitflag)
if (ct->ct_sslrefno != 0 && (ct->ct_rcvstate &
RPCRCVSTATE_NORMAL) != 0)
rcvflag |= MSG_TLSAPPDATA;
SOCKBUF_UNLOCK(&so->so_rcv);
SOCK_RECVBUF_UNLOCK(so);
error = soreceive(so, NULL, &uio, &m, &m2, &rcvflag);
SOCKBUF_LOCK(&so->so_rcv);
SOCK_RECVBUF_LOCK(so);
if (error == EWOULDBLOCK) {
/*
@ -1255,7 +1255,7 @@ static void
clnt_vc_upcallsdone(struct ct_data *ct)
{
SOCKBUF_LOCK_ASSERT(&ct->ct_socket->so_rcv);
SOCK_RECVBUF_LOCK_ASSERT(ct->ct_socket);
while (ct->ct_upcallrefs > 0)
(void) msleep(&ct->ct_upcallrefs,
@ -1296,9 +1296,9 @@ clnt_vc_dotlsupcall(void *data)
if ((ct->ct_rcvstate & RPCRCVSTATE_SOUPCALLNEEDED) != 0) {
ct->ct_rcvstate &= ~RPCRCVSTATE_SOUPCALLNEEDED;
mtx_unlock(&ct->ct_lock);
SOCKBUF_LOCK(&ct->ct_socket->so_rcv);
SOCK_RECVBUF_LOCK(ct->ct_socket);
clnt_vc_soupcall(ct->ct_socket, ct, M_NOWAIT);
SOCKBUF_UNLOCK(&ct->ct_socket->so_rcv);
SOCK_RECVBUF_UNLOCK(ct->ct_socket);
mtx_lock(&ct->ct_lock);
}
msleep(&ct->ct_sslrefno, &ct->ct_lock, 0, "clntvcdu", hz);

View file

@ -130,9 +130,9 @@ svc_dg_create(SVCPOOL *pool, struct socket *so, size_t sendsize,
xprt_register(xprt);
SOCKBUF_LOCK(&so->so_rcv);
SOCK_RECVBUF_LOCK(so);
soupcall_set(so, SO_RCV, svc_dg_soupcall, xprt);
SOCKBUF_UNLOCK(&so->so_rcv);
SOCK_RECVBUF_UNLOCK(so);
return (xprt);
freedata:
@ -190,18 +190,18 @@ svc_dg_recv(SVCXPRT *xprt, struct rpc_msg *msg,
* from racing the upcall after our soreadable() call
* returns false.
*/
SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
SOCK_RECVBUF_LOCK(xprt->xp_socket);
if (!soreadable(xprt->xp_socket))
xprt_inactive_self(xprt);
SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
SOCK_RECVBUF_UNLOCK(xprt->xp_socket);
sx_xunlock(&xprt->xp_lock);
return (FALSE);
}
if (error) {
SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
SOCK_RECVBUF_LOCK(xprt->xp_socket);
soupcall_clear(xprt->xp_socket, SO_RCV);
SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
SOCK_RECVBUF_UNLOCK(xprt->xp_socket);
xprt_inactive_self(xprt);
sx_xunlock(&xprt->xp_lock);
return (FALSE);
@ -266,9 +266,9 @@ static void
svc_dg_destroy(SVCXPRT *xprt)
{
SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
SOCK_RECVBUF_LOCK(xprt->xp_socket);
soupcall_clear(xprt->xp_socket, SO_RCV);
SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
SOCK_RECVBUF_UNLOCK(xprt->xp_socket);
sx_destroy(&xprt->xp_lock);
if (xprt->xp_socket)

View file

@ -315,10 +315,10 @@ svc_vc_create_conn(SVCPOOL *pool, struct socket *so, struct sockaddr *raddr)
xprt_register(xprt);
SOCKBUF_LOCK(&so->so_rcv);
SOCK_RECVBUF_LOCK(so);
xprt->xp_upcallset = 1;
soupcall_set(so, SO_RCV, svc_vc_soupcall, xprt);
SOCKBUF_UNLOCK(&so->so_rcv);
SOCK_RECVBUF_UNLOCK(so);
/*
* Throw the transport into the active list in case it already
@ -537,13 +537,13 @@ svc_vc_destroy(SVCXPRT *xprt)
struct cf_conn *cd = (struct cf_conn *)xprt->xp_p1;
CLIENT *cl = (CLIENT *)xprt->xp_p2;
SOCKBUF_LOCK(&xprt->xp_socket->so_rcv);
SOCK_RECVBUF_LOCK(xprt->xp_socket);
if (xprt->xp_upcallset) {
xprt->xp_upcallset = 0;
if (xprt->xp_socket->so_rcv.sb_upcall != NULL)
soupcall_clear(xprt->xp_socket, SO_RCV);
}
SOCKBUF_UNLOCK(&xprt->xp_socket->so_rcv);
SOCK_RECVBUF_UNLOCK(xprt->xp_socket);
if (cl != NULL)
CLNT_RELEASE(cl);
@ -780,10 +780,10 @@ svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
/* Check for next request in a pending queue. */
svc_vc_process_pending(xprt);
if (cd->mreq == NULL || cd->resid != 0) {
SOCKBUF_LOCK(&so->so_rcv);
SOCK_RECVBUF_LOCK(so);
if (!soreadable(so))
xprt_inactive_self(xprt);
SOCKBUF_UNLOCK(&so->so_rcv);
SOCK_RECVBUF_UNLOCK(so);
}
sx_xunlock(&xprt->xp_lock);
@ -834,10 +834,10 @@ svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
* after our call to soreceive fails with
* EWOULDBLOCK.
*/
SOCKBUF_LOCK(&so->so_rcv);
SOCK_RECVBUF_LOCK(so);
if (!soreadable(so))
xprt_inactive_self(xprt);
SOCKBUF_UNLOCK(&so->so_rcv);
SOCK_RECVBUF_UNLOCK(so);
sx_xunlock(&xprt->xp_lock);
return (FALSE);
}
@ -877,12 +877,12 @@ svc_vc_recv(SVCXPRT *xprt, struct rpc_msg *msg,
if (error) {
KRPC_CURVNET_RESTORE();
SOCKBUF_LOCK(&so->so_rcv);
SOCK_RECVBUF_LOCK(so);
if (xprt->xp_upcallset) {
xprt->xp_upcallset = 0;
soupcall_clear(so, SO_RCV);
}
SOCKBUF_UNLOCK(&so->so_rcv);
SOCK_RECVBUF_UNLOCK(so);
xprt_inactive_self(xprt);
cd->strm_stat = XPRT_DIED;
sx_xunlock(&xprt->xp_lock);