SUNRPC: change the back-channel queue to lwq

This removes the need to store and update back-links in the list.
It also remove the need for the _bh version of spin_lock().

Signed-off-by: NeilBrown <neilb@suse.de>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Anna Schumaker <Anna.Schumaker@Netapp.com>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
This commit is contained in:
NeilBrown 2023-09-11 10:40:22 -04:00 committed by Chuck Lever
parent 580a25756a
commit 15d39883ee
6 changed files with 9 additions and 21 deletions

View File

@ -90,10 +90,9 @@ struct svc_serv {
int (*sv_threadfn)(void *data);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct list_head sv_cb_list; /* queue for callback requests
struct lwq sv_cb_list; /* queue for callback requests
* that arrive over the same
* connection */
spinlock_t sv_cb_lock; /* protects the svc_cb_list */
bool sv_bc_enabled; /* service uses backchannel */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};

View File

@ -57,6 +57,7 @@ struct xprt_class;
struct seq_file;
struct svc_serv;
struct net;
#include <linux/lwq.h>
/*
* This describes a complete RPC request
@ -121,7 +122,7 @@ struct rpc_rqst {
int rq_ntrans;
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct list_head rq_bc_list; /* Callback service list */
struct lwq_node rq_bc_list; /* Callback service list */
unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
#endif /* CONFIG_SUNRPC_BACKCHANEL */

View File

@ -83,7 +83,6 @@ static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt)
return NULL;
req->rq_xprt = xprt;
INIT_LIST_HEAD(&req->rq_bc_list);
/* Preallocate one XDR receive buffer */
if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) {
@ -367,8 +366,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
dprintk("RPC: add callback request to list\n");
xprt_get(xprt);
spin_lock(&bc_serv->sv_cb_lock);
list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
spin_unlock(&bc_serv->sv_cb_lock);
lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list);
svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);
}

View File

@ -438,8 +438,7 @@ EXPORT_SYMBOL_GPL(svc_bind);
static void
__svc_init_bc(struct svc_serv *serv)
{
INIT_LIST_HEAD(&serv->sv_cb_list);
spin_lock_init(&serv->sv_cb_lock);
lwq_init(&serv->sv_cb_list);
}
#else
static void

View File

@ -705,7 +705,7 @@ svc_thread_should_sleep(struct svc_rqst *rqstp)
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
if (svc_is_backchannel(rqstp)) {
if (!list_empty(&rqstp->rq_server->sv_cb_list))
if (!lwq_empty(&rqstp->rq_server->sv_cb_list))
return false;
}
#endif
@ -878,18 +878,12 @@ void svc_recv(struct svc_rqst *rqstp)
struct svc_serv *serv = rqstp->rq_server;
struct rpc_rqst *req;
spin_lock_bh(&serv->sv_cb_lock);
req = list_first_entry_or_null(&serv->sv_cb_list,
struct rpc_rqst, rq_bc_list);
req = lwq_dequeue(&serv->sv_cb_list,
struct rpc_rqst, rq_bc_list);
if (req) {
list_del(&req->rq_bc_list);
spin_unlock_bh(&serv->sv_cb_lock);
svc_thread_wake_next(rqstp);
svc_process_bc(req, rqstp);
return;
}
spin_unlock_bh(&serv->sv_cb_lock);
}
#endif
}

View File

@ -263,9 +263,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
/* Queue rqst for ULP's callback service */
bc_serv = xprt->bc_serv;
xprt_get(xprt);
spin_lock(&bc_serv->sv_cb_lock);
list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
spin_unlock(&bc_serv->sv_cb_lock);
lwq_enqueue(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]);