mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
SUNRPC: Allow dynamic allocation of back channel slots
Now that the reads happen in a process context rather than a softirq, it is safe to allocate back channel slots using a reclaiming allocation. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
This commit is contained in:
parent
067c469671
commit
0d1bf3407c
1 changed files with 24 additions and 15 deletions
|
@ -235,7 +235,8 @@ void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
|
|||
list_empty(&xprt->bc_pa_list) ? "true" : "false");
|
||||
}
|
||||
|
||||
static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
|
||||
static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
|
||||
struct rpc_rqst *new)
|
||||
{
|
||||
struct rpc_rqst *req = NULL;
|
||||
|
||||
|
@ -243,10 +244,9 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
|
|||
if (atomic_read(&xprt->bc_free_slots) <= 0)
|
||||
goto not_found;
|
||||
if (list_empty(&xprt->bc_pa_list)) {
|
||||
req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
|
||||
if (!req)
|
||||
if (!new)
|
||||
goto not_found;
|
||||
list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
|
||||
list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
|
||||
xprt->bc_alloc_count++;
|
||||
}
|
||||
req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
|
||||
|
@ -256,8 +256,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
|
|||
sizeof(req->rq_private_buf));
|
||||
req->rq_xid = xid;
|
||||
req->rq_connect_cookie = xprt->connect_cookie;
|
||||
not_found:
|
||||
dprintk("RPC: backchannel req=%p\n", req);
|
||||
not_found:
|
||||
return req;
|
||||
}
|
||||
|
||||
|
@ -320,18 +320,27 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
|
|||
*/
|
||||
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
|
||||
{
|
||||
struct rpc_rqst *req;
|
||||
struct rpc_rqst *req, *new = NULL;
|
||||
|
||||
spin_lock(&xprt->bc_pa_lock);
|
||||
list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
|
||||
if (req->rq_connect_cookie != xprt->connect_cookie)
|
||||
continue;
|
||||
if (req->rq_xid == xid)
|
||||
goto found;
|
||||
}
|
||||
req = xprt_alloc_bc_request(xprt, xid);
|
||||
do {
|
||||
spin_lock(&xprt->bc_pa_lock);
|
||||
list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
|
||||
if (req->rq_connect_cookie != xprt->connect_cookie)
|
||||
continue;
|
||||
if (req->rq_xid == xid)
|
||||
goto found;
|
||||
}
|
||||
req = xprt_get_bc_request(xprt, xid, new);
|
||||
found:
|
||||
spin_unlock(&xprt->bc_pa_lock);
|
||||
spin_unlock(&xprt->bc_pa_lock);
|
||||
if (new) {
|
||||
if (req != new)
|
||||
xprt_free_bc_rqst(new);
|
||||
break;
|
||||
} else if (req)
|
||||
break;
|
||||
new = xprt_alloc_bc_req(xprt, GFP_KERNEL);
|
||||
} while (new);
|
||||
return req;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue