NFS client updates for Linux 4.15

Stable bugfixes:
 - Revalidate "." and ".." correctly on open
 - Avoid RCU usage in tracepoints
 - Fix ugly referral attributes
 - Fix a typo in nomigration mount option
 - Revert "NFS: Move the flock open mode check into nfs_flock()"
 
 Features:
 - Implement a stronger send queue accounting system for NFS over RDMA
 - Switch some atomics to the new refcount_t type
 
 Other bugfixes and cleanups:
 - Clean up access mode bits
 - Remove special-case revalidations in nfs_opendir()
 - Improve invalidating NFS over RDMA memory for async operations that time out
 - Handle NFS over RDMA replies with a worqueue
 - Handle NFS over RDMA sends with a workqueue
 - Fix up replaying interrupted requests
 - Remove dead NFS over RDMA definitions
 - Update NFS over RDMA copyright information
 - Be more consistent with bool initialization and comparisons
 - Mark expected switch fall throughs
 - Various sunrpc tracepoint cleanups
 - Fix various OPEN races
 - Fix a typo in nfs_rename()
 - Use common error handling code in nfs_lock_and_join_request()
 - Check that some structures are properly cleaned up during net_exit()
 - Remove net pointer from dprintk()s
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEnZ5MQTpR7cLU7KEp18tUv7ClQOsFAloPWGwACgkQ18tUv7Cl
 QOtMVhAAufCkDxqO2lmDH+0JyYUKMcoOMYtI8s2J1HrbEzTW/dVtI28fPAKEEd4m
 2JjNqnO516Jiv+g3E6eO4uunZRb4IB3AYT6YaTwmBFE+l7tpMdPb1xybOBP02Hji
 Y29kzLXwxxvnoxEqFalzCzV2BeRb2kAw6mayY9FxH6AfiEEQZfmxLCYgVuYa2jTC
 Z/B5E0GxAf28Aj0bIP8lLKbOkFijo851DB88UffEOZQGKUDlAd3GNUSSHb81Rj0N
 4ef7bKoGylkIpZ1PdTChdG1+RKqud02zrmQfmEwXui3eUwhOWy8hrKloNykqR5sj
 pgoDz79euAq4TDVyQKtutnbvVxfCcBeMYAXZhXkZLVcl+39in0kuLj4SxU5AmDhf
 ErnthG4W7jsLMM96kMvSTaoh4uwioviG1KmZfvuvUoMBSwtiX18hFTWtFKRD6x9e
 PNOqBdh8nkKYEFbEO4ksfYaWZJ5AuyFIQiIpj1gm+7sf039oN/zEuPV+jaEJG0oa
 Ef9IqHrQbbCUFYFjpBENr3HjU3igTTaxQ5iq+VYl4zg1pw6m6JTojqZ6qtQzqOYS
 O3N1ygeShsW934z8QcWjtEyeUXIB3JF9vUS3gEBgWPDyCltGXyq4Cq6Lod4s4JCb
 pWGI6wJLX1Fg6nq7cj0S4Or3QBgz2q8ZyBxssamhdvON/Ef5ccI=
 =2Zc1
 -----END PGP SIGNATURE-----

Merge tag 'nfs-for-4.15-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS client updates from Anna Schumaker:
 "Stable bugfixes:
   - Revalidate "." and ".." correctly on open
   - Avoid RCU usage in tracepoints
   - Fix ugly referral attributes
   - Fix a typo in nomigration mount option
   - Revert "NFS: Move the flock open mode check into nfs_flock()"

  Features:
   - Implement a stronger send queue accounting system for NFS over RDMA
   - Switch some atomics to the new refcount_t type

  Other bugfixes and cleanups:
   - Clean up access mode bits
   - Remove special-case revalidations in nfs_opendir()
   - Improve invalidating NFS over RDMA memory for async operations that
     time out
   - Handle NFS over RDMA replies with a worqueue
   - Handle NFS over RDMA sends with a workqueue
   - Fix up replaying interrupted requests
   - Remove dead NFS over RDMA definitions
   - Update NFS over RDMA copyright information
   - Be more consistent with bool initialization and comparisons
   - Mark expected switch fall throughs
   - Various sunrpc tracepoint cleanups
   - Fix various OPEN races
   - Fix a typo in nfs_rename()
   - Use common error handling code in nfs_lock_and_join_request()
   - Check that some structures are properly cleaned up during
     net_exit()
   - Remove net pointer from dprintk()s"

* tag 'nfs-for-4.15-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (62 commits)
  NFS: Revert "NFS: Move the flock open mode check into nfs_flock()"
  NFS: Fix typo in nomigration mount option
  nfs: Fix ugly referral attributes
  NFS: super: mark expected switch fall-throughs
  sunrpc: remove net pointer from messages
  nfs: remove net pointer from messages
  sunrpc: exit_net cleanup check added
  nfs client: exit_net cleanup check added
  nfs/write: Use common error handling code in nfs_lock_and_join_requests()
  NFSv4: Replace closed stateids with the "invalid special stateid"
  NFSv4: nfs_set_open_stateid must not trigger state recovery for closed state
  NFSv4: Check the open stateid when searching for expired state
  NFSv4: Clean up nfs4_delegreturn_done
  NFSv4: cleanup nfs4_close_done
  NFSv4: Retry NFS4ERR_OLD_STATEID errors in layoutreturn
  pNFS: Retry NFS4ERR_OLD_STATEID errors in layoutreturn-on-close
  NFSv4: Don't try to CLOSE if the stateid 'other' field has changed
  NFSv4: Retry CLOSE and DELEGRETURN on NFS4ERR_OLD_STATEID.
  NFS: Fix a typo in nfs_rename()
  NFSv4: Fix open create exclusive when the server reboots
  ...
This commit is contained in:
Linus Torvalds 2017-11-17 14:18:00 -08:00
commit c3e9c04b89
43 changed files with 1194 additions and 719 deletions

View file

@ -67,7 +67,7 @@ int nfs_cache_upcall(struct cache_detail *cd, char *entry_name)
*/
void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq)
{
if (atomic_dec_and_test(&dreq->count))
if (refcount_dec_and_test(&dreq->count))
kfree(dreq);
}
@ -87,7 +87,7 @@ static struct cache_deferred_req *nfs_dns_cache_defer(struct cache_req *req)
dreq = container_of(req, struct nfs_cache_defer_req, req);
dreq->deferred_req.revisit = nfs_dns_cache_revisit;
atomic_inc(&dreq->count);
refcount_inc(&dreq->count);
return &dreq->deferred_req;
}
@ -99,7 +99,7 @@ struct nfs_cache_defer_req *nfs_cache_defer_req_alloc(void)
dreq = kzalloc(sizeof(*dreq), GFP_KERNEL);
if (dreq) {
init_completion(&dreq->completion);
atomic_set(&dreq->count, 1);
refcount_set(&dreq->count, 1);
dreq->req.defer = nfs_dns_cache_defer;
}
return dreq;

View file

@ -16,7 +16,7 @@ struct nfs_cache_defer_req {
struct cache_req req;
struct cache_deferred_req deferred_req;
struct completion completion;
atomic_t count;
refcount_t count;
};
extern int nfs_cache_upcall(struct cache_detail *cd, char *entry_name);

View file

@ -49,15 +49,15 @@ static int nfs4_callback_up_net(struct svc_serv *serv, struct net *net)
if (ret <= 0)
goto out_err;
nn->nfs_callback_tcpport = ret;
dprintk("NFS: Callback listener port = %u (af %u, net %p)\n",
nn->nfs_callback_tcpport, PF_INET, net);
dprintk("NFS: Callback listener port = %u (af %u, net %x)\n",
nn->nfs_callback_tcpport, PF_INET, net->ns.inum);
ret = svc_create_xprt(serv, "tcp", net, PF_INET6,
nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
if (ret > 0) {
nn->nfs_callback_tcpport6 = ret;
dprintk("NFS: Callback listener port = %u (af %u, net %p)\n",
nn->nfs_callback_tcpport6, PF_INET6, net);
dprintk("NFS: Callback listener port = %u (af %u, net %x\n",
nn->nfs_callback_tcpport6, PF_INET6, net->ns.inum);
} else if (ret != -EAFNOSUPPORT)
goto out_err;
return 0;
@ -185,7 +185,7 @@ static void nfs_callback_down_net(u32 minorversion, struct svc_serv *serv, struc
if (--nn->cb_users[minorversion])
return;
dprintk("NFS: destroy per-net callback data; net=%p\n", net);
dprintk("NFS: destroy per-net callback data; net=%x\n", net->ns.inum);
svc_shutdown_net(serv, net);
}
@ -198,7 +198,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
if (nn->cb_users[minorversion]++)
return 0;
dprintk("NFS: create per-net callback data; net=%p\n", net);
dprintk("NFS: create per-net callback data; net=%x\n", net->ns.inum);
ret = svc_bind(serv, net);
if (ret < 0) {
@ -223,7 +223,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
err_bind:
nn->cb_users[minorversion]--;
dprintk("NFS: Couldn't create callback socket: err = %d; "
"net = %p\n", ret, net);
"net = %x\n", ret, net->ns.inum);
return ret;
}

View file

@ -440,7 +440,7 @@ static bool referring_call_exists(struct nfs_client *clp,
uint32_t nrclists,
struct referring_call_list *rclists)
{
bool status = 0;
bool status = false;
int i, j;
struct nfs4_session *session;
struct nfs4_slot_table *tbl;

View file

@ -163,7 +163,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
clp->rpc_ops = clp->cl_nfs_mod->rpc_ops;
atomic_set(&clp->cl_count, 1);
refcount_set(&clp->cl_count, 1);
clp->cl_cons_state = NFS_CS_INITING;
memcpy(&clp->cl_addr, cl_init->addr, cl_init->addrlen);
@ -269,7 +269,7 @@ void nfs_put_client(struct nfs_client *clp)
nn = net_generic(clp->cl_net, nfs_net_id);
if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
if (refcount_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
list_del(&clp->cl_share_link);
nfs_cb_idr_remove_locked(clp);
spin_unlock(&nn->nfs_client_lock);
@ -314,7 +314,7 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
sap))
continue;
atomic_inc(&clp->cl_count);
refcount_inc(&clp->cl_count);
return clp;
}
return NULL;
@ -1006,7 +1006,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source,
/* Copy data from the source */
server->nfs_client = source->nfs_client;
server->destroy = source->destroy;
atomic_inc(&server->nfs_client->cl_count);
refcount_inc(&server->nfs_client->cl_count);
nfs_server_copy_userdata(server, source);
server->fsid = fattr->fsid;
@ -1166,7 +1166,7 @@ static int nfs_server_list_show(struct seq_file *m, void *v)
clp->rpc_ops->version,
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR),
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT),
atomic_read(&clp->cl_count),
refcount_read(&clp->cl_count),
clp->cl_hostname);
rcu_read_unlock();

View file

@ -1040,6 +1040,33 @@ int nfs_delegations_present(struct nfs_client *clp)
return ret;
}
/**
* nfs4_refresh_delegation_stateid - Update delegation stateid seqid
* @dst: stateid to refresh
* @inode: inode to check
*
* Returns "true" and updates "dst->seqid" * if inode had a delegation
* that matches our delegation stateid. Otherwise "false" is returned.
*/
bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
{
struct nfs_delegation *delegation;
bool ret = false;
if (!inode)
goto out;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
if (delegation != NULL &&
nfs4_stateid_match_other(dst, &delegation->stateid)) {
dst->seqid = delegation->stateid.seqid;
return ret;
}
rcu_read_unlock();
out:
return ret;
}
/**
* nfs4_copy_delegation_stateid - Copy inode's state ID information
* @inode: inode to check

View file

@ -62,6 +62,7 @@ int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4
int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, struct rpc_cred **cred);
bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
int nfs4_have_delegation(struct inode *inode, fmode_t flags);

View file

@ -118,13 +118,6 @@ nfs_opendir(struct inode *inode, struct file *filp)
goto out;
}
filp->private_data = ctx;
if (filp->f_path.dentry == filp->f_path.mnt->mnt_root) {
/* This is a mountpoint, so d_revalidate will never
* have been called, so we need to refresh the
* inode (for close-open consistency) ourselves.
*/
__nfs_revalidate_inode(NFS_SERVER(inode), inode);
}
out:
put_rpccred(cred);
return res;
@ -253,7 +246,7 @@ int nfs_readdir_search_for_pos(struct nfs_cache_array *array, nfs_readdir_descri
desc->cache_entry_index = index;
return 0;
out_eof:
desc->eof = 1;
desc->eof = true;
return -EBADCOOKIE;
}
@ -307,7 +300,7 @@ int nfs_readdir_search_for_cookie(struct nfs_cache_array *array, nfs_readdir_des
if (array->eof_index >= 0) {
status = -EBADCOOKIE;
if (*desc->dir_cookie == array->last_cookie)
desc->eof = 1;
desc->eof = true;
}
out:
return status;
@ -761,7 +754,7 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
ent = &array->array[i];
if (!dir_emit(desc->ctx, ent->string.name, ent->string.len,
nfs_compat_user_ino64(ent->ino), ent->d_type)) {
desc->eof = 1;
desc->eof = true;
break;
}
desc->ctx->pos++;
@ -773,7 +766,7 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc)
ctx->duped = 1;
}
if (array->eof_index >= 0)
desc->eof = 1;
desc->eof = true;
kunmap(desc->page);
cache_page_release(desc);
@ -873,7 +866,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
if (res == -EBADCOOKIE) {
res = 0;
/* This means either end of directory */
if (*desc->dir_cookie && desc->eof == 0) {
if (*desc->dir_cookie && !desc->eof) {
/* Or that the server has 'lost' a cookie */
res = uncached_readdir(desc);
if (res == 0)
@ -1241,8 +1234,7 @@ static int nfs_weak_revalidate(struct dentry *dentry, unsigned int flags)
return 0;
}
if (nfs_mapping_need_revalidate_inode(inode))
error = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
error = nfs_lookup_verify_inode(inode, flags);
dfprintk(LOOKUPCACHE, "NFS: %s: inode %lu is %s\n",
__func__, inode->i_ino, error ? "invalid" : "valid");
return !error;
@ -1393,6 +1385,7 @@ static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
const struct dentry_operations nfs4_dentry_operations = {
.d_revalidate = nfs4_lookup_revalidate,
.d_weak_revalidate = nfs_weak_revalidate,
.d_delete = nfs_dentry_delete,
.d_iput = nfs_dentry_iput,
.d_automount = nfs_d_automount,
@ -2064,7 +2057,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
* should mark the directories for revalidation.
*/
d_move(old_dentry, new_dentry);
nfs_set_verifier(new_dentry,
nfs_set_verifier(old_dentry,
nfs_save_change_attribute(new_dir));
} else if (error == -ENOENT)
nfs_dentry_handle_enoent(old_dentry);
@ -2369,15 +2362,15 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
}
EXPORT_SYMBOL_GPL(nfs_access_add_cache);
#define NFS_MAY_READ (NFS4_ACCESS_READ)
#define NFS_MAY_WRITE (NFS4_ACCESS_MODIFY | \
NFS4_ACCESS_EXTEND | \
NFS4_ACCESS_DELETE)
#define NFS_FILE_MAY_WRITE (NFS4_ACCESS_MODIFY | \
NFS4_ACCESS_EXTEND)
#define NFS_MAY_READ (NFS_ACCESS_READ)
#define NFS_MAY_WRITE (NFS_ACCESS_MODIFY | \
NFS_ACCESS_EXTEND | \
NFS_ACCESS_DELETE)
#define NFS_FILE_MAY_WRITE (NFS_ACCESS_MODIFY | \
NFS_ACCESS_EXTEND)
#define NFS_DIR_MAY_WRITE NFS_MAY_WRITE
#define NFS_MAY_LOOKUP (NFS4_ACCESS_LOOKUP)
#define NFS_MAY_EXECUTE (NFS4_ACCESS_EXECUTE)
#define NFS_MAY_LOOKUP (NFS_ACCESS_LOOKUP)
#define NFS_MAY_EXECUTE (NFS_ACCESS_EXECUTE)
static int
nfs_access_calc_mask(u32 access_result, umode_t umode)
{
@ -2425,9 +2418,14 @@ static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
if (!may_block)
goto out;
/* Be clever: ask server to check for all possible rights */
cache.mask = NFS_MAY_LOOKUP | NFS_MAY_EXECUTE
| NFS_MAY_WRITE | NFS_MAY_READ;
/*
* Determine which access bits we want to ask for...
*/
cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND;
if (S_ISDIR(inode->i_mode))
cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP;
else
cache.mask |= NFS_ACCESS_EXECUTE;
cache.cred = cred;
status = NFS_PROTO(inode)->access(inode, &cache);
if (status != 0) {

View file

@ -829,23 +829,9 @@ int nfs_flock(struct file *filp, int cmd, struct file_lock *fl)
if (NFS_SERVER(inode)->flags & NFS_MOUNT_LOCAL_FLOCK)
is_local = 1;
/*
* VFS doesn't require the open mode to match a flock() lock's type.
* NFS, however, may simulate flock() locking with posix locking which
* requires the open mode to match the lock type.
*/
switch (fl->fl_type) {
case F_UNLCK:
/* We're simulating flock() locks using posix locks on the server */
if (fl->fl_type == F_UNLCK)
return do_unlk(filp, cmd, fl, is_local);
case F_RDLCK:
if (!(filp->f_mode & FMODE_READ))
return -EBADF;
break;
case F_WRLCK:
if (!(filp->f_mode & FMODE_WRITE))
return -EBADF;
}
return do_setlk(filp, cmd, fl, is_local);
}
EXPORT_SYMBOL_GPL(nfs_flock);

View file

@ -471,10 +471,10 @@ filelayout_read_pagelist(struct nfs_pgio_header *hdr)
return PNFS_NOT_ATTEMPTED;
dprintk("%s USE DS: %s cl_count %d\n", __func__,
ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count));
/* No multipath support. Use first DS */
atomic_inc(&ds->ds_clp->cl_count);
refcount_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
hdr->ds_commit_idx = idx;
fh = nfs4_fl_select_ds_fh(lseg, j);
@ -515,10 +515,10 @@ filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d\n",
__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count));
hdr->pgio_done_cb = filelayout_write_done_cb;
atomic_inc(&ds->ds_clp->cl_count);
refcount_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
hdr->ds_commit_idx = idx;
fh = nfs4_fl_select_ds_fh(lseg, j);
@ -1064,9 +1064,9 @@ static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
goto out_err;
dprintk("%s ino %lu, how %d cl_count %d\n", __func__,
data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count));
data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count));
data->commit_done_cb = filelayout_commit_done_cb;
atomic_inc(&ds->ds_clp->cl_count);
refcount_inc(&ds->ds_clp->cl_count);
data->ds_clp = ds->ds_clp;
fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
if (fh)

View file

@ -187,7 +187,7 @@ ff_layout_add_mirror(struct pnfs_layout_hdr *lo,
continue;
if (!ff_mirror_match_fh(mirror, pos))
continue;
if (atomic_inc_not_zero(&pos->ref)) {
if (refcount_inc_not_zero(&pos->ref)) {
spin_unlock(&inode->i_lock);
return pos;
}
@ -218,7 +218,7 @@ static struct nfs4_ff_layout_mirror *ff_layout_alloc_mirror(gfp_t gfp_flags)
mirror = kzalloc(sizeof(*mirror), gfp_flags);
if (mirror != NULL) {
spin_lock_init(&mirror->lock);
atomic_set(&mirror->ref, 1);
refcount_set(&mirror->ref, 1);
INIT_LIST_HEAD(&mirror->mirrors);
}
return mirror;
@ -242,7 +242,7 @@ static void ff_layout_free_mirror(struct nfs4_ff_layout_mirror *mirror)
static void ff_layout_put_mirror(struct nfs4_ff_layout_mirror *mirror)
{
if (mirror != NULL && atomic_dec_and_test(&mirror->ref))
if (mirror != NULL && refcount_dec_and_test(&mirror->ref))
ff_layout_free_mirror(mirror);
}
@ -1726,10 +1726,10 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
vers = nfs4_ff_layout_ds_version(lseg, idx);
dprintk("%s USE DS: %s cl_count %d vers %d\n", __func__,
ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count), vers);
ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count), vers);
hdr->pgio_done_cb = ff_layout_read_done_cb;
atomic_inc(&ds->ds_clp->cl_count);
refcount_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
if (fh)
@ -1785,11 +1785,11 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d vers %d\n",
__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count),
offset, ds->ds_remotestr, refcount_read(&ds->ds_clp->cl_count),
vers);
hdr->pgio_done_cb = ff_layout_write_done_cb;
atomic_inc(&ds->ds_clp->cl_count);
refcount_inc(&ds->ds_clp->cl_count);
hdr->ds_clp = ds->ds_clp;
hdr->ds_commit_idx = idx;
fh = nfs4_ff_layout_select_ds_fh(lseg, idx);
@ -1863,11 +1863,11 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
vers = nfs4_ff_layout_ds_version(lseg, idx);
dprintk("%s ino %lu, how %d cl_count %d vers %d\n", __func__,
data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count),
data->inode->i_ino, how, refcount_read(&ds->ds_clp->cl_count),
vers);
data->commit_done_cb = ff_layout_commit_done_cb;
data->cred = ds_cred;
atomic_inc(&ds->ds_clp->cl_count);
refcount_inc(&ds->ds_clp->cl_count);
data->ds_clp = ds->ds_clp;
fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
if (fh)
@ -2286,7 +2286,7 @@ ff_layout_mirror_prepare_stats(struct pnfs_layout_hdr *lo,
if (!test_and_clear_bit(NFS4_FF_MIRROR_STAT_AVAIL, &mirror->flags))
continue;
/* mirror refcount put in cleanup_layoutstats */
if (!atomic_inc_not_zero(&mirror->ref))
if (!refcount_inc_not_zero(&mirror->ref))
continue;
dev = &mirror->mirror_ds->id_node;
memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE);

View file

@ -14,6 +14,7 @@
#define FF_FLAGS_NO_IO_THRU_MDS 2
#define FF_FLAGS_NO_READ_IO 4
#include <linux/refcount.h>
#include "../pnfs.h"
/* XXX: Let's filter out insanely large mirror count for now to avoid oom
@ -82,7 +83,7 @@ struct nfs4_ff_layout_mirror {
nfs4_stateid stateid;
struct rpc_cred __rcu *ro_cred;
struct rpc_cred __rcu *rw_cred;
atomic_t ref;
refcount_t ref;
spinlock_t lock;
unsigned long flags;
struct nfs4_ff_layoutstat read_stat;

View file

@ -783,7 +783,7 @@ EXPORT_SYMBOL_GPL(nfs_getattr);
static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
{
atomic_set(&l_ctx->count, 1);
refcount_set(&l_ctx->count, 1);
l_ctx->lockowner = current->files;
INIT_LIST_HEAD(&l_ctx->list);
atomic_set(&l_ctx->io_count, 0);
@ -797,7 +797,7 @@ static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context
do {
if (pos->lockowner != current->files)
continue;
atomic_inc(&pos->count);
refcount_inc(&pos->count);
return pos;
} while ((pos = list_entry(pos->list.next, typeof(*pos), list)) != head);
return NULL;
@ -836,7 +836,7 @@ void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
struct nfs_open_context *ctx = l_ctx->open_context;
struct inode *inode = d_inode(ctx->dentry);
if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock))
if (!refcount_dec_and_lock(&l_ctx->count, &inode->i_lock))
return;
list_del(&l_ctx->list);
spin_unlock(&inode->i_lock);
@ -913,7 +913,7 @@ EXPORT_SYMBOL_GPL(alloc_nfs_open_context);
struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
{
if (ctx != NULL)
atomic_inc(&ctx->lock_context.count);
refcount_inc(&ctx->lock_context.count);
return ctx;
}
EXPORT_SYMBOL_GPL(get_nfs_open_context);
@ -924,11 +924,11 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
struct super_block *sb = ctx->dentry->d_sb;
if (!list_empty(&ctx->list)) {
if (!atomic_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
if (!refcount_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
return;
list_del(&ctx->list);
spin_unlock(&inode->i_lock);
} else if (!atomic_dec_and_test(&ctx->lock_context.count))
} else if (!refcount_dec_and_test(&ctx->lock_context.count))
return;
if (inode != NULL)
NFS_PROTO(inode)->close_context(ctx, is_sync);
@ -2084,8 +2084,12 @@ static int nfs_net_init(struct net *net)
static void nfs_net_exit(struct net *net)
{
struct nfs_net *nn = net_generic(net, nfs_net_id);
nfs_fs_proc_net_exit(net);
nfs_cleanup_cb_ident_idr(net);
WARN_ON_ONCE(!list_empty(&nn->nfs_client_list));
WARN_ON_ONCE(!list_empty(&nn->nfs_volume_list));
}
static struct pernet_operations nfs_net_ops = {

View file

@ -188,6 +188,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
{
struct nfs3_accessargs arg = {
.fh = NFS_FH(inode),
.access = entry->mask,
};
struct nfs3_accessres res;
struct rpc_message msg = {
@ -196,25 +197,9 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
.rpc_resp = &res,
.rpc_cred = entry->cred,
};
int mode = entry->mask;
int status = -ENOMEM;
dprintk("NFS call access\n");
if (mode & MAY_READ)
arg.access |= NFS3_ACCESS_READ;
if (S_ISDIR(inode->i_mode)) {
if (mode & MAY_WRITE)
arg.access |= NFS3_ACCESS_MODIFY | NFS3_ACCESS_EXTEND | NFS3_ACCESS_DELETE;
if (mode & MAY_EXEC)
arg.access |= NFS3_ACCESS_LOOKUP;
} else {
if (mode & MAY_WRITE)
arg.access |= NFS3_ACCESS_MODIFY | NFS3_ACCESS_EXTEND;
if (mode & MAY_EXEC)
arg.access |= NFS3_ACCESS_EXECUTE;
}
res.fattr = nfs_alloc_fattr();
if (res.fattr == NULL)
goto out;

View file

@ -145,7 +145,7 @@ struct nfs4_lock_state {
unsigned long ls_flags;
struct nfs_seqid_counter ls_seqid;
nfs4_stateid ls_stateid;
atomic_t ls_count;
refcount_t ls_count;
fl_owner_t ls_owner;
};
@ -162,6 +162,7 @@ enum {
NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
NFS_STATE_RECOVERY_FAILED, /* OPEN stateid state recovery failed */
NFS_STATE_MAY_NOTIFY_LOCK, /* server may CB_NOTIFY_LOCK */
NFS_STATE_CHANGE_WAIT, /* A state changing operation is outstanding */
};
struct nfs4_state {
@ -185,6 +186,8 @@ struct nfs4_state {
unsigned int n_rdwr; /* Number of read/write references */
fmode_t state; /* State on the server (R,W, or RW) */
atomic_t count;
wait_queue_head_t waitq;
};
@ -458,6 +461,10 @@ extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
extern int nfs4_select_rw_stateid(struct nfs4_state *, fmode_t,
const struct nfs_lock_context *, nfs4_stateid *,
struct rpc_cred **);
extern bool nfs4_refresh_open_stateid(nfs4_stateid *dst,
struct nfs4_state *state);
extern bool nfs4_copy_open_stateid(nfs4_stateid *dst,
struct nfs4_state *state);
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
@ -465,7 +472,7 @@ extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
extern void nfs_release_seqid(struct nfs_seqid *seqid);
extern void nfs_free_seqid(struct nfs_seqid *seqid);
extern int nfs4_setup_sequence(const struct nfs_client *client,
extern int nfs4_setup_sequence(struct nfs_client *client,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
struct rpc_task *task);
@ -475,6 +482,7 @@ extern int nfs4_sequence_done(struct rpc_task *task,
extern void nfs4_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp);
extern const nfs4_stateid zero_stateid;
extern const nfs4_stateid invalid_stateid;
/* nfs4super.c */
struct nfs_mount_info;

View file

@ -483,7 +483,7 @@ static int nfs4_match_client(struct nfs_client *pos, struct nfs_client *new,
* ID and serverowner fields. Wait for CREATE_SESSION
* to finish. */
if (pos->cl_cons_state > NFS_CS_READY) {
atomic_inc(&pos->cl_count);
refcount_inc(&pos->cl_count);
spin_unlock(&nn->nfs_client_lock);
nfs_put_client(*prev);
@ -559,7 +559,7 @@ int nfs40_walk_client_list(struct nfs_client *new,
* way that a SETCLIENTID_CONFIRM to pos can succeed is
* if new and pos point to the same server:
*/
atomic_inc(&pos->cl_count);
refcount_inc(&pos->cl_count);
spin_unlock(&nn->nfs_client_lock);
nfs_put_client(prev);
@ -715,7 +715,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
continue;
found:
atomic_inc(&pos->cl_count);
refcount_inc(&pos->cl_count);
*result = pos;
status = 0;
break;
@ -749,7 +749,7 @@ nfs4_find_client_ident(struct net *net, int cb_ident)
spin_lock(&nn->nfs_client_lock);
clp = idr_find(&nn->cb_ident_idr, cb_ident);
if (clp)
atomic_inc(&clp->cl_count);
refcount_inc(&clp->cl_count);
spin_unlock(&nn->nfs_client_lock);
return clp;
}
@ -793,7 +793,7 @@ nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
spin_lock(&nn->nfs_client_lock);
list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
if (nfs4_cb_match_client(addr, clp, minorversion) == false)
if (!nfs4_cb_match_client(addr, clp, minorversion))
continue;
if (!nfs4_has_session(clp))
@ -804,7 +804,7 @@ nfs4_find_client_sessionid(struct net *net, const struct sockaddr *addr,
sid->data, NFS4_MAX_SESSIONID_LEN) != 0)
continue;
atomic_inc(&clp->cl_count);
refcount_inc(&clp->cl_count);
spin_unlock(&nn->nfs_client_lock);
return clp;
}

View file

@ -96,6 +96,10 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
struct nfs_open_context *ctx, struct nfs4_label *ilabel,
struct nfs4_label *olabel);
#ifdef CONFIG_NFS_V4_1
static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
struct rpc_cred *cred,
struct nfs4_slot *slot,
bool is_privileged);
static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
struct rpc_cred *);
static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
@ -254,15 +258,12 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
};
const u32 nfs4_fs_locations_bitmap[3] = {
FATTR4_WORD0_TYPE
| FATTR4_WORD0_CHANGE
FATTR4_WORD0_CHANGE
| FATTR4_WORD0_SIZE
| FATTR4_WORD0_FSID
| FATTR4_WORD0_FILEID
| FATTR4_WORD0_FS_LOCATIONS,
FATTR4_WORD1_MODE
| FATTR4_WORD1_NUMLINKS
| FATTR4_WORD1_OWNER
FATTR4_WORD1_OWNER
| FATTR4_WORD1_OWNER_GROUP
| FATTR4_WORD1_RAWDEV
| FATTR4_WORD1_SPACE_USED
@ -644,13 +645,14 @@ static int nfs40_sequence_done(struct rpc_task *task,
#if defined(CONFIG_NFS_V4_1)
static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
static void nfs41_release_slot(struct nfs4_slot *slot)
{
struct nfs4_session *session;
struct nfs4_slot_table *tbl;
struct nfs4_slot *slot = res->sr_slot;
bool send_new_highest_used_slotid = false;
if (!slot)
return;
tbl = slot->table;
session = tbl->session;
@ -676,13 +678,18 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
send_new_highest_used_slotid = false;
out_unlock:
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
if (send_new_highest_used_slotid)
nfs41_notify_server(session->clp);
if (waitqueue_active(&tbl->slot_waitq))
wake_up_all(&tbl->slot_waitq);
}
static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
{
nfs41_release_slot(res->sr_slot);
res->sr_slot = NULL;
}
static int nfs41_sequence_process(struct rpc_task *task,
struct nfs4_sequence_res *res)
{
@ -710,13 +717,6 @@ static int nfs41_sequence_process(struct rpc_task *task,
/* Check the SEQUENCE operation status */
switch (res->sr_status) {
case 0:
/* If previous op on slot was interrupted and we reused
* the seq# and got a reply from the cache, then retry
*/
if (task->tk_status == -EREMOTEIO && interrupted) {
++slot->seq_nr;
goto retry_nowait;
}
/* Update the slot's sequence and clientid lease timer */
slot->seq_done = 1;
clp = session->clp;
@ -750,16 +750,16 @@ static int nfs41_sequence_process(struct rpc_task *task,
* The slot id we used was probably retired. Try again
* using a different slot id.
*/
if (slot->seq_nr < slot->table->target_highest_slotid)
goto session_recover;
goto retry_nowait;
case -NFS4ERR_SEQ_MISORDERED:
/*
* Was the last operation on this sequence interrupted?
* If so, retry after bumping the sequence number.
*/
if (interrupted) {
++slot->seq_nr;
goto retry_nowait;
}
if (interrupted)
goto retry_new_seq;
/*
* Could this slot have been previously retired?
* If so, then the server may be expecting seq_nr = 1!
@ -768,10 +768,11 @@ static int nfs41_sequence_process(struct rpc_task *task,
slot->seq_nr = 1;
goto retry_nowait;
}
break;
goto session_recover;
case -NFS4ERR_SEQ_FALSE_RETRY:
++slot->seq_nr;
goto retry_nowait;
if (interrupted)
goto retry_new_seq;
goto session_recover;
default:
/* Just update the slot sequence no. */
slot->seq_done = 1;
@ -781,6 +782,11 @@ static int nfs41_sequence_process(struct rpc_task *task,
dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
out_noaction:
return ret;
session_recover:
nfs4_schedule_session_recovery(session, res->sr_status);
goto retry_nowait;
retry_new_seq:
++slot->seq_nr;
retry_nowait:
if (rpc_restart_call_prepare(task)) {
nfs41_sequence_free_slot(res);
@ -857,6 +863,17 @@ static const struct rpc_call_ops nfs41_call_sync_ops = {
.rpc_call_done = nfs41_call_sync_done,
};
static void
nfs4_sequence_process_interrupted(struct nfs_client *client,
struct nfs4_slot *slot, struct rpc_cred *cred)
{
struct rpc_task *task;
task = _nfs41_proc_sequence(client, cred, slot, true);
if (!IS_ERR(task))
rpc_put_task_async(task);
}
#else /* !CONFIG_NFS_V4_1 */
static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
@ -877,9 +894,34 @@ int nfs4_sequence_done(struct rpc_task *task,
}
EXPORT_SYMBOL_GPL(nfs4_sequence_done);
static void
nfs4_sequence_process_interrupted(struct nfs_client *client,
struct nfs4_slot *slot, struct rpc_cred *cred)
{
WARN_ON_ONCE(1);
slot->interrupted = 0;
}
#endif /* !CONFIG_NFS_V4_1 */
int nfs4_setup_sequence(const struct nfs_client *client,
static
void nfs4_sequence_attach_slot(struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
struct nfs4_slot *slot)
{
if (!slot)
return;
slot->privileged = args->sa_privileged ? 1 : 0;
args->sa_slot = slot;
res->sr_slot = slot;
res->sr_timestamp = jiffies;
res->sr_status_flags = 0;
res->sr_status = 1;
}
int nfs4_setup_sequence(struct nfs_client *client,
struct nfs4_sequence_args *args,
struct nfs4_sequence_res *res,
struct rpc_task *task)
@ -897,29 +939,28 @@ int nfs4_setup_sequence(const struct nfs_client *client,
task->tk_timeout = 0;
}
spin_lock(&tbl->slot_tbl_lock);
/* The state manager will wait until the slot table is empty */
if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
goto out_sleep;
for (;;) {
spin_lock(&tbl->slot_tbl_lock);
/* The state manager will wait until the slot table is empty */
if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
goto out_sleep;
slot = nfs4_alloc_slot(tbl);
if (IS_ERR(slot)) {
/* Try again in 1/4 second */
if (slot == ERR_PTR(-ENOMEM))
task->tk_timeout = HZ >> 2;
goto out_sleep;
slot = nfs4_alloc_slot(tbl);
if (IS_ERR(slot)) {
/* Try again in 1/4 second */
if (slot == ERR_PTR(-ENOMEM))
task->tk_timeout = HZ >> 2;
goto out_sleep;
}
spin_unlock(&tbl->slot_tbl_lock);
if (likely(!slot->interrupted))
break;
nfs4_sequence_process_interrupted(client,
slot, task->tk_msg.rpc_cred);
}
spin_unlock(&tbl->slot_tbl_lock);
slot->privileged = args->sa_privileged ? 1 : 0;
args->sa_slot = slot;
res->sr_slot = slot;
if (session) {
res->sr_timestamp = jiffies;
res->sr_status_flags = 0;
res->sr_status = 1;
}
nfs4_sequence_attach_slot(args, res, slot);
trace_nfs4_setup_sequence(session, args);
out_start:
@ -1044,6 +1085,12 @@ struct nfs4_opendata {
int rpc_status;
};
struct nfs4_open_createattrs {
struct nfs4_label *label;
struct iattr *sattr;
const __u32 verf[2];
};
static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
int err, struct nfs4_exception *exception)
{
@ -1113,8 +1160,7 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
struct nfs4_state_owner *sp, fmode_t fmode, int flags,
const struct iattr *attrs,
struct nfs4_label *label,
const struct nfs4_open_createattrs *c,
enum open_claim_type4 claim,
gfp_t gfp_mask)
{
@ -1122,6 +1168,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
struct inode *dir = d_inode(parent);
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
struct nfs4_label *label = (c != NULL) ? c->label : NULL;
struct nfs4_opendata *p;
p = kzalloc(sizeof(*p), gfp_mask);
@ -1187,15 +1234,11 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
p->o_arg.fh = NFS_FH(d_inode(dentry));
}
if (attrs != NULL && attrs->ia_valid != 0) {
__u32 verf[2];
if (c != NULL && c->sattr != NULL && c->sattr->ia_valid != 0) {
p->o_arg.u.attrs = &p->attrs;
memcpy(&p->attrs, attrs, sizeof(p->attrs));
memcpy(&p->attrs, c->sattr, sizeof(p->attrs));
verf[0] = jiffies;
verf[1] = current->pid;
memcpy(p->o_arg.u.verifier.data, verf,
memcpy(p->o_arg.u.verifier.data, c->verf,
sizeof(p->o_arg.u.verifier.data));
}
p->c_arg.fh = &p->o_res.fh;
@ -1334,6 +1377,25 @@ static bool nfs_open_stateid_recover_openmode(struct nfs4_state *state)
}
#endif /* CONFIG_NFS_V4_1 */
static void nfs_state_log_update_open_stateid(struct nfs4_state *state)
{
if (test_and_clear_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
wake_up_all(&state->waitq);
}
static void nfs_state_log_out_of_order_open_stateid(struct nfs4_state *state,
const nfs4_stateid *stateid)
{
u32 state_seqid = be32_to_cpu(state->open_stateid.seqid);
u32 stateid_seqid = be32_to_cpu(stateid->seqid);
if (stateid_seqid == state_seqid + 1U ||
(stateid_seqid == 1U && state_seqid == 0xffffffffU))
nfs_state_log_update_open_stateid(state);
else
set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
}
static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
{
struct nfs_client *clp = state->owner->so_server->nfs_client;
@ -1349,18 +1411,32 @@ static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
nfs4_state_mark_reclaim_nograce(clp, state);
}
/*
* Check for whether or not the caller may update the open stateid
* to the value passed in by stateid.
*
* Note: This function relies heavily on the server implementing
* RFC7530 Section 9.1.4.2, and RFC5661 Section 8.2.2
* correctly.
* i.e. The stateid seqids have to be initialised to 1, and
* are then incremented on every state transition.
*/
static bool nfs_need_update_open_stateid(struct nfs4_state *state,
const nfs4_stateid *stateid, nfs4_stateid *freeme)
const nfs4_stateid *stateid)
{
if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
return true;
if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
nfs4_stateid_copy(freeme, &state->open_stateid);
nfs_test_and_clear_all_open_stateid(state);
if (test_bit(NFS_OPEN_STATE, &state->flags) == 0 ||
!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
if (stateid->seqid == cpu_to_be32(1))
nfs_state_log_update_open_stateid(state);
else
set_bit(NFS_STATE_CHANGE_WAIT, &state->flags);
return true;
}
if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
if (nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
nfs_state_log_out_of_order_open_stateid(state, stateid);
return true;
}
return false;
}
@ -1399,11 +1475,14 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
!nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
nfs_resync_open_stateid_locked(state);
return;
goto out;
}
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
nfs4_stateid_copy(&state->stateid, stateid);
nfs4_stateid_copy(&state->open_stateid, stateid);
trace_nfs4_open_stateid_update(state->inode, stateid, 0);
out:
nfs_state_log_update_open_stateid(state);
}
static void nfs_clear_open_stateid(struct nfs4_state *state,
@ -1420,29 +1499,60 @@ static void nfs_clear_open_stateid(struct nfs4_state *state,
}
static void nfs_set_open_stateid_locked(struct nfs4_state *state,
const nfs4_stateid *stateid, fmode_t fmode,
nfs4_stateid *freeme)
const nfs4_stateid *stateid, nfs4_stateid *freeme)
{
switch (fmode) {
case FMODE_READ:
set_bit(NFS_O_RDONLY_STATE, &state->flags);
DEFINE_WAIT(wait);
int status = 0;
for (;;) {
if (!nfs_need_update_open_stateid(state, stateid))
return;
if (!test_bit(NFS_STATE_CHANGE_WAIT, &state->flags))
break;
case FMODE_WRITE:
set_bit(NFS_O_WRONLY_STATE, &state->flags);
if (status)
break;
case FMODE_READ|FMODE_WRITE:
set_bit(NFS_O_RDWR_STATE, &state->flags);
/* Rely on seqids for serialisation with NFSv4.0 */
if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
break;
prepare_to_wait(&state->waitq, &wait, TASK_KILLABLE);
/*
* Ensure we process the state changes in the same order
* in which the server processed them by delaying the
* update of the stateid until we are in sequence.
*/
write_sequnlock(&state->seqlock);
spin_unlock(&state->owner->so_lock);
rcu_read_unlock();
trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
if (!signal_pending(current)) {
if (schedule_timeout(5*HZ) == 0)
status = -EAGAIN;
else
status = 0;
} else
status = -EINTR;
finish_wait(&state->waitq, &wait);
rcu_read_lock();
spin_lock(&state->owner->so_lock);
write_seqlock(&state->seqlock);
}
if (!nfs_need_update_open_stateid(state, stateid, freeme))
return;
if (test_bit(NFS_OPEN_STATE, &state->flags) &&
!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
nfs4_stateid_copy(freeme, &state->open_stateid);
nfs_test_and_clear_all_open_stateid(state);
}
if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
nfs4_stateid_copy(&state->stateid, stateid);
nfs4_stateid_copy(&state->open_stateid, stateid);
trace_nfs4_open_stateid_update(state->inode, stateid, status);
nfs_state_log_update_open_stateid(state);
}
static void __update_open_stateid(struct nfs4_state *state,
static void nfs_state_set_open_stateid(struct nfs4_state *state,
const nfs4_stateid *open_stateid,
const nfs4_stateid *deleg_stateid,
fmode_t fmode,
nfs4_stateid *freeme)
{
@ -1450,17 +1560,34 @@ static void __update_open_stateid(struct nfs4_state *state,
* Protect the call to nfs4_state_set_mode_locked and
* serialise the stateid update
*/
spin_lock(&state->owner->so_lock);
write_seqlock(&state->seqlock);
if (deleg_stateid != NULL) {
nfs4_stateid_copy(&state->stateid, deleg_stateid);
set_bit(NFS_DELEGATED_STATE, &state->flags);
nfs_set_open_stateid_locked(state, open_stateid, freeme);
switch (fmode) {
case FMODE_READ:
set_bit(NFS_O_RDONLY_STATE, &state->flags);
break;
case FMODE_WRITE:
set_bit(NFS_O_WRONLY_STATE, &state->flags);
break;
case FMODE_READ|FMODE_WRITE:
set_bit(NFS_O_RDWR_STATE, &state->flags);
}
if (open_stateid != NULL)
nfs_set_open_stateid_locked(state, open_stateid, fmode, freeme);
set_bit(NFS_OPEN_STATE, &state->flags);
write_sequnlock(&state->seqlock);
}
static void nfs_state_set_delegation(struct nfs4_state *state,
const nfs4_stateid *deleg_stateid,
fmode_t fmode)
{
/*
* Protect the call to nfs4_state_set_mode_locked and
* serialise the stateid update
*/
write_seqlock(&state->seqlock);
nfs4_stateid_copy(&state->stateid, deleg_stateid);
set_bit(NFS_DELEGATED_STATE, &state->flags);
write_sequnlock(&state->seqlock);
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
}
static int update_open_stateid(struct nfs4_state *state,
@ -1478,6 +1605,12 @@ static int update_open_stateid(struct nfs4_state *state,
fmode &= (FMODE_READ|FMODE_WRITE);
rcu_read_lock();
spin_lock(&state->owner->so_lock);
if (open_stateid != NULL) {
nfs_state_set_open_stateid(state, open_stateid, fmode, &freeme);
ret = 1;
}
deleg_cur = rcu_dereference(nfsi->delegation);
if (deleg_cur == NULL)
goto no_delegation;
@ -1494,18 +1627,16 @@ static int update_open_stateid(struct nfs4_state *state,
goto no_delegation_unlock;
nfs_mark_delegation_referenced(deleg_cur);
__update_open_stateid(state, open_stateid, &deleg_cur->stateid,
fmode, &freeme);
nfs_state_set_delegation(state, &deleg_cur->stateid, fmode);
ret = 1;
no_delegation_unlock:
spin_unlock(&deleg_cur->lock);
no_delegation:
if (ret)
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
rcu_read_unlock();
if (!ret && open_stateid != NULL) {
__update_open_stateid(state, open_stateid, NULL, fmode, &freeme);
ret = 1;
}
if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
nfs4_schedule_state_manager(clp);
if (freeme.type != 0)
@ -1761,7 +1892,7 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
struct nfs4_opendata *opendata;
opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
NULL, NULL, claim, GFP_NOFS);
NULL, claim, GFP_NOFS);
if (opendata == NULL)
return ERR_PTR(-ENOMEM);
opendata->state = state;
@ -2518,7 +2649,7 @@ static int nfs41_check_expired_locks(struct nfs4_state *state)
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
atomic_inc(&lsp->ls_count);
refcount_inc(&lsp->ls_count);
spin_unlock(&state->state_lock);
nfs4_put_lock_state(prev);
@ -2692,8 +2823,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
static int _nfs4_do_open(struct inode *dir,
struct nfs_open_context *ctx,
int flags,
struct iattr *sattr,
struct nfs4_label *label,
const struct nfs4_open_createattrs *c,
int *opened)
{
struct nfs4_state_owner *sp;
@ -2705,6 +2835,8 @@ static int _nfs4_do_open(struct inode *dir,
struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
struct iattr *sattr = c->sattr;
struct nfs4_label *label = c->label;
struct nfs4_label *olabel = NULL;
int status;
@ -2723,8 +2855,8 @@ static int _nfs4_do_open(struct inode *dir,
status = -ENOMEM;
if (d_really_is_positive(dentry))
claim = NFS4_OPEN_CLAIM_FH;
opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
label, claim, GFP_KERNEL);
opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags,
c, claim, GFP_KERNEL);
if (opendata == NULL)
goto err_put_state_owner;
@ -2805,10 +2937,18 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
struct nfs_server *server = NFS_SERVER(dir);
struct nfs4_exception exception = { };
struct nfs4_state *res;
struct nfs4_open_createattrs c = {
.label = label,
.sattr = sattr,
.verf = {
[0] = (__u32)jiffies,
[1] = (__u32)current->pid,
},
};
int status;
do {
status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
status = _nfs4_do_open(dir, ctx, flags, &c, opened);
res = ctx->state;
trace_nfs4_open_file(ctx, flags, status);
if (status == 0)
@ -3024,18 +3164,20 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
calldata->arg.lr_args = NULL;
calldata->res.lr_res = NULL;
break;
case -NFS4ERR_OLD_STATEID:
if (nfs4_refresh_layout_stateid(&calldata->arg.lr_args->stateid,
calldata->inode))
goto lr_restart;
/* Fallthrough */
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_EXPIRED:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
case -NFS4ERR_WRONG_CRED:
calldata->arg.lr_args = NULL;
calldata->res.lr_res = NULL;
calldata->res.lr_ret = 0;
rpc_restart_call_prepare(task);
return;
goto lr_restart;
}
}
@ -3051,39 +3193,43 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
if (calldata->arg.bitmask != NULL) {
calldata->arg.bitmask = NULL;
calldata->res.fattr = NULL;
task->tk_status = 0;
rpc_restart_call_prepare(task);
goto out_release;
goto out_restart;
}
break;
case -NFS4ERR_OLD_STATEID:
/* Did we race with OPEN? */
if (nfs4_refresh_open_stateid(&calldata->arg.stateid,
state))
goto out_restart;
goto out_release;
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
nfs4_free_revoked_stateid(server,
&calldata->arg.stateid,
task->tk_msg.rpc_cred);
case -NFS4ERR_OLD_STATEID:
/* Fallthrough */
case -NFS4ERR_BAD_STATEID:
if (!nfs4_stateid_match(&calldata->arg.stateid,
&state->open_stateid)) {
rpc_restart_call_prepare(task);
goto out_release;
}
if (calldata->arg.fmode == 0)
break;
break;
default:
if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
goto out_release;
}
if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN)
goto out_restart;
}
nfs_clear_open_stateid(state, &calldata->arg.stateid,
res_stateid, calldata->arg.fmode);
out_release:
task->tk_status = 0;
nfs_release_seqid(calldata->arg.seqid);
nfs_refresh_inode(calldata->inode, &calldata->fattr);
dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
return;
lr_restart:
calldata->res.lr_ret = 0;
out_restart:
task->tk_status = 0;
rpc_restart_call_prepare(task);
goto out_release;
}
static void nfs4_close_prepare(struct rpc_task *task, void *data)
@ -3103,7 +3249,6 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
/* Calculate the change in open mode */
calldata->arg.fmode = 0;
if (state->n_rdwr == 0) {
@ -3121,7 +3266,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
if (!nfs4_valid_open_stateid(state) ||
test_bit(NFS_OPEN_STATE, &state->flags) == 0)
!nfs4_refresh_open_stateid(&calldata->arg.stateid, state))
call_close = 0;
spin_unlock(&state->owner->so_lock);
@ -3215,6 +3360,8 @@ int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
calldata->inode = state->inode;
calldata->state = state;
calldata->arg.fh = NFS_FH(state->inode);
if (!nfs4_copy_open_stateid(&calldata->arg.stateid, state))
goto out_free_calldata;
/* Serialization for the sequence id */
alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
@ -3889,6 +4036,7 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
struct nfs4_accessargs args = {
.fh = NFS_FH(inode),
.bitmask = server->cache_consistency_bitmask,
.access = entry->mask,
};
struct nfs4_accessres res = {
.server = server,
@ -3899,26 +4047,8 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
.rpc_resp = &res,
.rpc_cred = entry->cred,
};
int mode = entry->mask;
int status = 0;
/*
* Determine which access bits we want to ask for...
*/
if (mode & MAY_READ)
args.access |= NFS4_ACCESS_READ;
if (S_ISDIR(inode->i_mode)) {
if (mode & MAY_WRITE)
args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
if (mode & MAY_EXEC)
args.access |= NFS4_ACCESS_LOOKUP;
} else {
if (mode & MAY_WRITE)
args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
if (mode & MAY_EXEC)
args.access |= NFS4_ACCESS_EXECUTE;
}
res.fattr = nfs_alloc_fattr();
if (res.fattr == NULL)
return -ENOMEM;
@ -4843,7 +4973,7 @@ static void nfs4_renew_release(void *calldata)
struct nfs4_renewdata *data = calldata;
struct nfs_client *clp = data->client;
if (atomic_read(&clp->cl_count) > 1)
if (refcount_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
kfree(data);
@ -4891,7 +5021,7 @@ static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred,
if (renew_flags == 0)
return 0;
if (!atomic_inc_not_zero(&clp->cl_count))
if (!refcount_inc_not_zero(&clp->cl_count))
return -EIO;
data = kmalloc(sizeof(*data), GFP_NOFS);
if (data == NULL) {
@ -5643,18 +5773,20 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
data->args.lr_args = NULL;
data->res.lr_res = NULL;
break;
case -NFS4ERR_OLD_STATEID:
if (nfs4_refresh_layout_stateid(&data->args.lr_args->stateid,
data->inode))
goto lr_restart;
/* Fallthrough */
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_EXPIRED:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_UNKNOWN_LAYOUTTYPE:
case -NFS4ERR_WRONG_CRED:
data->args.lr_args = NULL;
data->res.lr_res = NULL;
data->res.lr_ret = 0;
rpc_restart_call_prepare(task);
return;
goto lr_restart;
}
}
@ -5668,27 +5800,36 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
nfs4_free_revoked_stateid(data->res.server,
data->args.stateid,
task->tk_msg.rpc_cred);
/* Fallthrough */
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
task->tk_status = 0;
break;
case -NFS4ERR_OLD_STATEID:
if (nfs4_refresh_delegation_stateid(&data->stateid, data->inode))
goto out_restart;
task->tk_status = 0;
break;
case -NFS4ERR_ACCESS:
if (data->args.bitmask) {
data->args.bitmask = NULL;
data->res.fattr = NULL;
task->tk_status = 0;
rpc_restart_call_prepare(task);
return;
goto out_restart;
}
/* Fallthrough */
default:
if (nfs4_async_handle_error(task, data->res.server,
NULL, NULL) == -EAGAIN) {
rpc_restart_call_prepare(task);
return;
goto out_restart;
}
}
data->rpc_status = task->tk_status;
return;
lr_restart:
data->res.lr_ret = 0;
out_restart:
task->tk_status = 0;
rpc_restart_call_prepare(task);
}
static void nfs4_delegreturn_release(void *calldata)
@ -5896,7 +6037,7 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
p->arg.seqid = seqid;
p->res.seqid = seqid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
refcount_inc(&lsp->ls_count);
/* Ensure we don't close file until we're done freeing locks! */
p->ctx = get_nfs_open_context(ctx);
p->l_ctx = nfs_get_lock_context(ctx);
@ -6112,7 +6253,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
p->server = server;
atomic_inc(&lsp->ls_count);
refcount_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
@ -6568,6 +6709,20 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
!test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
return -ENOLCK;
/*
* Don't rely on the VFS having checked the file open mode,
* since it won't do this for flock() locks.
*/
switch (request->fl_type) {
case F_RDLCK:
if (!(filp->f_mode & FMODE_READ))
return -EBADF;
break;
case F_WRLCK:
if (!(filp->f_mode & FMODE_WRITE))
return -EBADF;
}
status = nfs4_set_lock_state(state, request);
if (status != 0)
return status;
@ -6763,9 +6918,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
struct page *page)
{
struct nfs_server *server = NFS_SERVER(dir);
u32 bitmask[3] = {
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
};
u32 bitmask[3];
struct nfs4_fs_locations_arg args = {
.dir_fh = NFS_FH(dir),
.name = name,
@ -6784,12 +6937,15 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
dprintk("%s: start\n", __func__);
bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
bitmask[1] = nfs4_fattr_bitmap[1];
/* Ask for the fileid of the absent filesystem if mounted_on_fileid
* is not supported */
if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
bitmask[0] &= ~FATTR4_WORD0_FILEID;
else
bitmask[0] |= FATTR4_WORD0_FILEID;
bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
@ -7472,7 +7628,7 @@ nfs4_run_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
struct nfs41_exchange_id_data *calldata;
int status;
if (!atomic_inc_not_zero(&clp->cl_count))
if (!refcount_inc_not_zero(&clp->cl_count))
return ERR_PTR(-EIO);
status = -ENOMEM;
@ -8072,7 +8228,7 @@ static void nfs41_sequence_release(void *data)
struct nfs4_sequence_data *calldata = data;
struct nfs_client *clp = calldata->clp;
if (atomic_read(&clp->cl_count) > 1)
if (refcount_read(&clp->cl_count) > 1)
nfs4_schedule_state_renewal(clp);
nfs_put_client(clp);
kfree(calldata);
@ -8101,7 +8257,7 @@ static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
trace_nfs4_sequence(clp, task->tk_status);
if (task->tk_status < 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (atomic_read(&clp->cl_count) == 1)
if (refcount_read(&clp->cl_count) == 1)
goto out;
if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
@ -8135,6 +8291,7 @@ static const struct rpc_call_ops nfs41_sequence_ops = {
static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
struct rpc_cred *cred,
struct nfs4_slot *slot,
bool is_privileged)
{
struct nfs4_sequence_data *calldata;
@ -8148,15 +8305,18 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
.callback_ops = &nfs41_sequence_ops,
.flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
};
struct rpc_task *ret;
if (!atomic_inc_not_zero(&clp->cl_count))
return ERR_PTR(-EIO);
ret = ERR_PTR(-EIO);
if (!refcount_inc_not_zero(&clp->cl_count))
goto out_err;
ret = ERR_PTR(-ENOMEM);
calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
if (calldata == NULL) {
nfs_put_client(clp);
return ERR_PTR(-ENOMEM);
}
if (calldata == NULL)
goto out_put_clp;
nfs4_init_sequence(&calldata->args, &calldata->res, 0);
nfs4_sequence_attach_slot(&calldata->args, &calldata->res, slot);
if (is_privileged)
nfs4_set_sequence_privileged(&calldata->args);
msg.rpc_argp = &calldata->args;
@ -8164,7 +8324,15 @@ static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
calldata->clp = clp;
task_setup_data.callback_data = calldata;
return rpc_run_task(&task_setup_data);
ret = rpc_run_task(&task_setup_data);
if (IS_ERR(ret))
goto out_err;
return ret;
out_put_clp:
nfs_put_client(clp);
out_err:
nfs41_release_slot(slot);
return ret;
}
static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
@ -8174,7 +8342,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
return -EAGAIN;
task = _nfs41_proc_sequence(clp, cred, false);
task = _nfs41_proc_sequence(clp, cred, NULL, false);
if (IS_ERR(task))
ret = PTR_ERR(task);
else
@ -8188,7 +8356,7 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
struct rpc_task *task;
int ret;
task = _nfs41_proc_sequence(clp, cred, true);
task = _nfs41_proc_sequence(clp, cred, NULL, true);
if (IS_ERR(task)) {
ret = PTR_ERR(task);
goto out;
@ -8588,18 +8756,27 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
server = NFS_SERVER(lrp->args.inode);
switch (task->tk_status) {
case -NFS4ERR_OLD_STATEID:
if (nfs4_refresh_layout_stateid(&lrp->args.stateid,
lrp->args.inode))
goto out_restart;
/* Fallthrough */
default:
task->tk_status = 0;
/* Fallthrough */
case 0:
break;
case -NFS4ERR_DELAY:
if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
break;
nfs4_sequence_free_slot(&lrp->res.seq_res);
rpc_restart_call_prepare(task);
return;
goto out_restart;
}
dprintk("<-- %s\n", __func__);
return;
out_restart:
task->tk_status = 0;
nfs4_sequence_free_slot(&lrp->res.seq_res);
rpc_restart_call_prepare(task);
}
static void nfs4_layoutreturn_release(void *calldata)

View file

@ -69,6 +69,14 @@ const nfs4_stateid zero_stateid = {
{ .data = { 0 } },
.type = NFS4_SPECIAL_STATEID_TYPE,
};
const nfs4_stateid invalid_stateid = {
{
.seqid = cpu_to_be32(0xffffffffU),
.other = { 0 },
},
.type = NFS4_INVALID_STATEID_TYPE,
};
static DEFINE_MUTEX(nfs_clid_init_mutex);
int nfs4_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
@ -645,6 +653,7 @@ nfs4_alloc_open_state(void)
INIT_LIST_HEAD(&state->lock_states);
spin_lock_init(&state->state_lock);
seqlock_init(&state->seqlock);
init_waitqueue_head(&state->waitq);
return state;
}
@ -825,7 +834,7 @@ __nfs4_find_lock_state(struct nfs4_state *state,
ret = pos;
}
if (ret)
atomic_inc(&ret->ls_count);
refcount_inc(&ret->ls_count);
return ret;
}
@ -843,7 +852,7 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f
if (lsp == NULL)
return NULL;
nfs4_init_seqid_counter(&lsp->ls_seqid);
atomic_set(&lsp->ls_count, 1);
refcount_set(&lsp->ls_count, 1);
lsp->ls_state = state;
lsp->ls_owner = fl_owner;
lsp->ls_seqid.owner_id = ida_simple_get(&server->lockowner_id, 0, 0, GFP_NOFS);
@ -907,7 +916,7 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
if (lsp == NULL)
return;
state = lsp->ls_state;
if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
if (!refcount_dec_and_lock(&lsp->ls_count, &state->state_lock))
return;
list_del(&lsp->ls_locks);
if (list_empty(&state->lock_states))
@ -927,7 +936,7 @@ static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
dst->fl_u.nfs4_fl.owner = lsp;
atomic_inc(&lsp->ls_count);
refcount_inc(&lsp->ls_count);
}
static void nfs4_fl_release_lock(struct file_lock *fl)
@ -985,18 +994,39 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
return ret;
}
static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
bool nfs4_refresh_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
{
bool ret;
int seq;
do {
ret = false;
seq = read_seqbegin(&state->seqlock);
if (nfs4_state_match_open_stateid_other(state, dst)) {
dst->seqid = state->open_stateid.seqid;
ret = true;
}
} while (read_seqretry(&state->seqlock, seq));
return ret;
}
bool nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
{
bool ret;
const nfs4_stateid *src;
int seq;
do {
ret = false;
src = &zero_stateid;
seq = read_seqbegin(&state->seqlock);
if (test_bit(NFS_OPEN_STATE, &state->flags))
if (test_bit(NFS_OPEN_STATE, &state->flags)) {
src = &state->open_stateid;
ret = true;
}
nfs4_stateid_copy(dst, src);
} while (read_seqretry(&state->seqlock, seq));
return ret;
}
/*
@ -1177,7 +1207,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
return;
__module_get(THIS_MODULE);
atomic_inc(&clp->cl_count);
refcount_inc(&clp->cl_count);
/* The rcu_read_lock() is not strictly necessary, as the state
* manager is the only thread that ever changes the rpc_xprt
@ -1269,7 +1299,7 @@ int nfs4_wait_clnt_recover(struct nfs_client *clp)
might_sleep();
atomic_inc(&clp->cl_count);
refcount_inc(&clp->cl_count);
res = wait_on_bit_action(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
nfs_wait_bit_killable, TASK_KILLABLE);
if (res)
@ -1409,6 +1439,11 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
found = true;
continue;
}
if (nfs4_stateid_match_other(&state->open_stateid, stateid) &&
nfs4_state_mark_reclaim_nograce(clp, state)) {
found = true;
continue;
}
if (nfs_state_lock_state_matches_stateid(state, stateid) &&
nfs4_state_mark_reclaim_nograce(clp, state))
found = true;
@ -2510,7 +2545,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
break;
if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
break;
} while (atomic_read(&clp->cl_count) > 1);
} while (refcount_read(&clp->cl_count) > 1);
return;
out_error:
if (strlen(section))

View file

@ -202,17 +202,13 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
TP_ARGS(clp, error),
TP_STRUCT__entry(
__string(dstaddr,
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_ADDR))
__string(dstaddr, clp->cl_hostname)
__field(int, error)
),
TP_fast_assign(
__entry->error = error;
__assign_str(dstaddr,
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_ADDR));
__assign_str(dstaddr, clp->cl_hostname);
),
TP_printk(
@ -1066,6 +1062,8 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_event,
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_setattr);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update);
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait);
DECLARE_EVENT_CLASS(nfs4_getattr_event,
TP_PROTO(
@ -1133,9 +1131,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
__string(dstaddr, clp ?
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_ADDR) : "unknown")
__string(dstaddr, clp ? clp->cl_hostname : "unknown")
),
TP_fast_assign(
@ -1148,9 +1144,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
__entry->fileid = 0;
__entry->dev = 0;
}
__assign_str(dstaddr, clp ?
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_ADDR) : "unknown")
__assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
),
TP_printk(
@ -1192,9 +1186,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
__string(dstaddr, clp ?
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_ADDR) : "unknown")
__string(dstaddr, clp ? clp->cl_hostname : "unknown")
__field(int, stateid_seq)
__field(u32, stateid_hash)
),
@ -1209,9 +1201,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
__entry->fileid = 0;
__entry->dev = 0;
}
__assign_str(dstaddr, clp ?
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_ADDR) : "unknown")
__assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
__entry->stateid_seq =
be32_to_cpu(stateid->seqid);
__entry->stateid_hash =

View file

@ -4385,6 +4385,14 @@ static int decode_delegation_stateid(struct xdr_stream *xdr, nfs4_stateid *state
return decode_stateid(xdr, stateid);
}
static int decode_invalid_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
{
nfs4_stateid dummy;
nfs4_stateid_copy(stateid, &invalid_stateid);
return decode_stateid(xdr, &dummy);
}
static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
{
int status;
@ -4393,7 +4401,7 @@ static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (!status)
status = decode_open_stateid(xdr, &res->stateid);
status = decode_invalid_stateid(xdr, &res->stateid);
return status;
}
@ -6108,6 +6116,8 @@ static int decode_layoutreturn(struct xdr_stream *xdr,
res->lrs_present = be32_to_cpup(p);
if (res->lrs_present)
status = decode_layout_stateid(xdr, &res->stateid);
else
nfs4_stateid_copy(&res->stateid, &invalid_stateid);
return status;
out_overflow:
print_overflow_msg(__func__, xdr);

View file

@ -251,7 +251,7 @@ EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
void
pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
{
atomic_inc(&lo->plh_refcount);
refcount_inc(&lo->plh_refcount);
}
static struct pnfs_layout_hdr *
@ -296,7 +296,7 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
pnfs_layoutreturn_before_put_layout_hdr(lo);
if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
if (refcount_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
if (!list_empty(&lo->plh_segs))
WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
pnfs_detach_layout_hdr(lo);
@ -354,6 +354,24 @@ pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
pnfs_lseg_dec_and_remove_zero(lseg, free_me);
}
/*
* Update the seqid of a layout stateid
*/
bool nfs4_refresh_layout_stateid(nfs4_stateid *dst, struct inode *inode)
{
struct pnfs_layout_hdr *lo;
bool ret = false;
spin_lock(&inode->i_lock);
lo = NFS_I(inode)->layout;
if (lo && nfs4_stateid_match_other(dst, &lo->plh_stateid)) {
dst->seqid = lo->plh_stateid.seqid;
ret = true;
}
spin_unlock(&inode->i_lock);
return ret;
}
/*
* Mark a pnfs_layout_hdr and all associated layout segments as invalid
*
@ -395,14 +413,14 @@ pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{
lo->plh_retry_timestamp = jiffies;
if (!test_and_set_bit(fail_bit, &lo->plh_flags))
atomic_inc(&lo->plh_refcount);
refcount_inc(&lo->plh_refcount);
}
static void
pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{
if (test_and_clear_bit(fail_bit, &lo->plh_flags))
atomic_dec(&lo->plh_refcount);
refcount_dec(&lo->plh_refcount);
}
static void
@ -450,7 +468,7 @@ pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
{
INIT_LIST_HEAD(&lseg->pls_list);
INIT_LIST_HEAD(&lseg->pls_lc_list);
atomic_set(&lseg->pls_refcount, 1);
refcount_set(&lseg->pls_refcount, 1);
set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
lseg->pls_layout = lo;
lseg->pls_range = *range;
@ -472,7 +490,7 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
list_del_init(&lseg->pls_list);
/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
atomic_dec(&lo->plh_refcount);
refcount_dec(&lo->plh_refcount);
if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
return;
if (list_empty(&lo->plh_segs) &&
@ -507,13 +525,13 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
return;
dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
atomic_read(&lseg->pls_refcount),
refcount_read(&lseg->pls_refcount),
test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
lo = lseg->pls_layout;
inode = lo->plh_inode;
if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
if (refcount_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
spin_unlock(&inode->i_lock);
return;
@ -551,7 +569,7 @@ pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
struct list_head *tmp_list)
{
if (!atomic_dec_and_test(&lseg->pls_refcount))
if (!refcount_dec_and_test(&lseg->pls_refcount))
return false;
pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
list_add(&lseg->pls_list, tmp_list);
@ -570,7 +588,7 @@ static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
* outstanding io is finished.
*/
dprintk("%s: lseg %p ref %d\n", __func__, lseg,
atomic_read(&lseg->pls_refcount));
refcount_read(&lseg->pls_refcount));
if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
rv = 1;
}
@ -1451,7 +1469,7 @@ alloc_init_layout_hdr(struct inode *ino,
lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
if (!lo)
return NULL;
atomic_set(&lo->plh_refcount, 1);
refcount_set(&lo->plh_refcount, 1);
INIT_LIST_HEAD(&lo->plh_layouts);
INIT_LIST_HEAD(&lo->plh_segs);
INIT_LIST_HEAD(&lo->plh_return_segs);
@ -1513,7 +1531,7 @@ pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
if ((range->iomode == IOMODE_RW &&
ls_range->iomode != IOMODE_RW) ||
(range->iomode != ls_range->iomode &&
strict_iomode == true) ||
strict_iomode) ||
!pnfs_lseg_range_intersecting(ls_range, range))
return 0;
@ -1546,7 +1564,7 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
}
dprintk("%s:Return lseg %p ref %d\n",
__func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
__func__, ret, ret ? refcount_read(&ret->pls_refcount) : 0);
return ret;
}

View file

@ -30,6 +30,7 @@
#ifndef FS_NFS_PNFS_H
#define FS_NFS_PNFS_H
#include <linux/refcount.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_page.h>
#include <linux/workqueue.h>
@ -54,7 +55,7 @@ struct nfs4_pnfs_ds {
char *ds_remotestr; /* comma sep list of addrs */
struct list_head ds_addrs;
struct nfs_client *ds_clp;
atomic_t ds_count;
refcount_t ds_count;
unsigned long ds_state;
#define NFS4DS_CONNECTING 0 /* ds is establishing connection */
};
@ -63,7 +64,7 @@ struct pnfs_layout_segment {
struct list_head pls_list;
struct list_head pls_lc_list;
struct pnfs_layout_range pls_range;
atomic_t pls_refcount;
refcount_t pls_refcount;
u32 pls_seq;
unsigned long pls_flags;
struct pnfs_layout_hdr *pls_layout;
@ -179,7 +180,7 @@ struct pnfs_layoutdriver_type {
};
struct pnfs_layout_hdr {
atomic_t plh_refcount;
refcount_t plh_refcount;
atomic_t plh_outstanding; /* number of RPCs out */
struct list_head plh_layouts; /* other client layouts */
struct list_head plh_bulk_destroy;
@ -251,6 +252,7 @@ int pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
bool is_recall);
int pnfs_destroy_layouts_byclid(struct nfs_client *clp,
bool is_recall);
bool nfs4_refresh_layout_stateid(nfs4_stateid *dst, struct inode *inode);
void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo);
void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
const nfs4_stateid *new,
@ -393,7 +395,7 @@ static inline struct pnfs_layout_segment *
pnfs_get_lseg(struct pnfs_layout_segment *lseg)
{
if (lseg) {
atomic_inc(&lseg->pls_refcount);
refcount_inc(&lseg->pls_refcount);
smp_mb__after_atomic();
}
return lseg;
@ -764,6 +766,11 @@ static inline void nfs4_pnfs_v3_ds_connect_unload(void)
{
}
static inline bool nfs4_refresh_layout_stateid(nfs4_stateid *dst,
struct inode *inode)
{
return false;
}
#endif /* CONFIG_NFS_V4_1 */
#if IS_ENABLED(CONFIG_NFS_V4_2)

View file

@ -338,7 +338,7 @@ print_ds(struct nfs4_pnfs_ds *ds)
" client %p\n"
" cl_exchange_flags %x\n",
ds->ds_remotestr,
atomic_read(&ds->ds_count), ds->ds_clp,
refcount_read(&ds->ds_count), ds->ds_clp,
ds->ds_clp ? ds->ds_clp->cl_exchange_flags : 0);
}
@ -451,7 +451,7 @@ static void destroy_ds(struct nfs4_pnfs_ds *ds)
void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds)
{
if (atomic_dec_and_lock(&ds->ds_count,
if (refcount_dec_and_lock(&ds->ds_count,
&nfs4_ds_cache_lock)) {
list_del_init(&ds->ds_node);
spin_unlock(&nfs4_ds_cache_lock);
@ -537,7 +537,7 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
INIT_LIST_HEAD(&ds->ds_addrs);
list_splice_init(dsaddrs, &ds->ds_addrs);
ds->ds_remotestr = remotestr;
atomic_set(&ds->ds_count, 1);
refcount_set(&ds->ds_count, 1);
INIT_LIST_HEAD(&ds->ds_node);
ds->ds_clp = NULL;
list_add(&ds->ds_node, &nfs4_data_server_cache);
@ -546,10 +546,10 @@ nfs4_pnfs_ds_add(struct list_head *dsaddrs, gfp_t gfp_flags)
} else {
kfree(remotestr);
kfree(ds);
atomic_inc(&tmp_ds->ds_count);
refcount_inc(&tmp_ds->ds_count);
dprintk("%s data server %s found, inc'ed ds_count to %d\n",
__func__, tmp_ds->ds_remotestr,
atomic_read(&tmp_ds->ds_count));
refcount_read(&tmp_ds->ds_count));
ds = tmp_ds;
}
spin_unlock(&nfs4_ds_cache_lock);

View file

@ -1332,7 +1332,7 @@ static int nfs_parse_mount_options(char *raw,
mnt->options |= NFS_OPTION_MIGRATION;
break;
case Opt_nomigration:
mnt->options &= NFS_OPTION_MIGRATION;
mnt->options &= ~NFS_OPTION_MIGRATION;
break;
/*
@ -1456,18 +1456,21 @@ static int nfs_parse_mount_options(char *raw,
switch (token) {
case Opt_xprt_udp6:
protofamily = AF_INET6;
/* fall through */
case Opt_xprt_udp:
mnt->flags &= ~NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_UDP;
break;
case Opt_xprt_tcp6:
protofamily = AF_INET6;
/* fall through */
case Opt_xprt_tcp:
mnt->flags |= NFS_MOUNT_TCP;
mnt->nfs_server.protocol = XPRT_TRANSPORT_TCP;
break;
case Opt_xprt_rdma6:
protofamily = AF_INET6;
/* fall through */
case Opt_xprt_rdma:
/* vector side protocols to TCP */
mnt->flags |= NFS_MOUNT_TCP;
@ -1494,11 +1497,13 @@ static int nfs_parse_mount_options(char *raw,
switch (token) {
case Opt_xprt_udp6:
mountfamily = AF_INET6;
/* fall through */
case Opt_xprt_udp:
mnt->mount_server.protocol = XPRT_TRANSPORT_UDP;
break;
case Opt_xprt_tcp6:
mountfamily = AF_INET6;
/* fall through */
case Opt_xprt_tcp:
mnt->mount_server.protocol = XPRT_TRANSPORT_TCP;
break;
@ -1988,9 +1993,9 @@ static int nfs23_validate_mount_data(void *options,
args->version = NFS_DEFAULT_VERSION;
switch (data->version) {
case 1:
data->namlen = 0;
data->namlen = 0; /* fall through */
case 2:
data->bsize = 0;
data->bsize = 0; /* fall through */
case 3:
if (data->flags & NFS_MOUNT_VER3)
goto out_no_v3;
@ -1998,11 +2003,14 @@ static int nfs23_validate_mount_data(void *options,
memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE);
/* Turn off security negotiation */
extra_flags |= NFS_MOUNT_SECFLAVOUR;
/* fall through */
case 4:
if (data->flags & NFS_MOUNT_SECFLAVOUR)
goto out_no_sec;
/* fall through */
case 5:
memset(data->context, 0, sizeof(data->context));
/* fall through */
case 6:
if (data->flags & NFS_MOUNT_VER3) {
if (data->root.size > NFS3_FHSIZE || data->root.size == 0)

View file

@ -487,10 +487,8 @@ nfs_lock_and_join_requests(struct page *page)
}
ret = nfs_page_group_lock(head);
if (ret < 0) {
nfs_unlock_and_release_request(head);
return ERR_PTR(ret);
}
if (ret < 0)
goto release_request;
/* lock each request in the page group */
total_bytes = head->wb_bytes;
@ -515,8 +513,7 @@ nfs_lock_and_join_requests(struct page *page)
if (ret < 0) {
nfs_unroll_locks(inode, head, subreq);
nfs_release_request(subreq);
nfs_unlock_and_release_request(head);
return ERR_PTR(ret);
goto release_request;
}
}
/*
@ -532,8 +529,8 @@ nfs_lock_and_join_requests(struct page *page)
nfs_page_group_unlock(head);
nfs_unroll_locks(inode, head, subreq);
nfs_unlock_and_release_request(subreq);
nfs_unlock_and_release_request(head);
return ERR_PTR(-EIO);
ret = -EIO;
goto release_request;
}
}
@ -576,6 +573,10 @@ nfs_lock_and_join_requests(struct page *page)
/* still holds ref on head from nfs_page_find_head_request
* and still has lock on head from lock loop */
return head;
release_request:
nfs_unlock_and_release_request(head);
return ERR_PTR(ret);
}
static void nfs_write_error_remove_page(struct nfs_page *req)

View file

@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/rbtree.h>
#include <linux/refcount.h>
#include <linux/rwsem.h>
#include <linux/wait.h>
@ -56,7 +57,7 @@ struct nfs_access_entry {
};
struct nfs_lock_context {
atomic_t count;
refcount_t count;
struct list_head list;
struct nfs_open_context *open_context;
fl_owner_t lockowner;
@ -184,6 +185,16 @@ struct nfs_inode {
struct inode vfs_inode;
};
/*
* Access bit flags
*/
#define NFS_ACCESS_READ 0x0001
#define NFS_ACCESS_LOOKUP 0x0002
#define NFS_ACCESS_MODIFY 0x0004
#define NFS_ACCESS_EXTEND 0x0008
#define NFS_ACCESS_DELETE 0x0010
#define NFS_ACCESS_EXECUTE 0x0020
/*
* Cache validity bit flags
*/

View file

@ -10,6 +10,7 @@
#include <linux/sunrpc/xprt.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
struct nfs4_session;
struct nfs_iostats;
@ -25,7 +26,7 @@ struct nfs41_impl_id;
* The nfs_client identifies our client state to the server.
*/
struct nfs_client {
atomic_t cl_count;
refcount_t cl_count;
atomic_t cl_mds_count;
int cl_cons_state; /* current construction state (-ve: init error) */
#define NFS_CS_READY 0 /* ready to be used */

View file

@ -1,4 +1,5 @@
/*
* Copyright (c) 2015-2017 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -50,65 +51,6 @@ enum {
RPCRDMA_V1_DEF_INLINE_SIZE = 1024,
};
struct rpcrdma_segment {
__be32 rs_handle; /* Registered memory handle */
__be32 rs_length; /* Length of the chunk in bytes */
__be64 rs_offset; /* Chunk virtual address or offset */
};
/*
* read chunk(s), encoded as a linked list.
*/
struct rpcrdma_read_chunk {
__be32 rc_discrim; /* 1 indicates presence */
__be32 rc_position; /* Position in XDR stream */
struct rpcrdma_segment rc_target;
};
/*
* write chunk, and reply chunk.
*/
struct rpcrdma_write_chunk {
struct rpcrdma_segment wc_target;
};
/*
* write chunk(s), encoded as a counted array.
*/
struct rpcrdma_write_array {
__be32 wc_discrim; /* 1 indicates presence */
__be32 wc_nchunks; /* Array count */
struct rpcrdma_write_chunk wc_array[0];
};
struct rpcrdma_msg {
__be32 rm_xid; /* Mirrors the RPC header xid */
__be32 rm_vers; /* Version of this protocol */
__be32 rm_credit; /* Buffers requested/granted */
__be32 rm_type; /* Type of message (enum rpcrdma_proc) */
union {
struct { /* no chunks */
__be32 rm_empty[3]; /* 3 empty chunk lists */
} rm_nochunks;
struct { /* no chunks and padded */
__be32 rm_align; /* Padding alignment */
__be32 rm_thresh; /* Padding threshold */
__be32 rm_pempty[3]; /* 3 empty chunk lists */
} rm_padded;
struct {
__be32 rm_err;
__be32 rm_vers_low;
__be32 rm_vers_high;
} rm_error;
__be32 rm_chunks[0]; /* read, write and reply chunks */
} rm_body;
};
/*
* XDR sizes, in quads
*/

View file

@ -71,6 +71,36 @@ TRACE_EVENT(rpc_connect_status,
__entry->status)
);
TRACE_EVENT(rpc_request,
TP_PROTO(const struct rpc_task *task),
TP_ARGS(task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, version)
__field(bool, async)
__string(progname, task->tk_client->cl_program->name)
__string(procname, rpc_proc_name(task))
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->version = task->tk_client->cl_vers;
__entry->async = RPC_IS_ASYNC(task);
__assign_str(progname, task->tk_client->cl_program->name)
__assign_str(procname, rpc_proc_name(task))
),
TP_printk("task:%u@%u %sv%d %s (%ssync)",
__entry->task_id, __entry->client_id,
__get_str(progname), __entry->version,
__get_str(procname), __entry->async ? "a": ""
)
);
DECLARE_EVENT_CLASS(rpc_task_running,
TP_PROTO(const struct rpc_clnt *clnt, const struct rpc_task *task, const void *action),
@ -342,21 +372,21 @@ DECLARE_EVENT_CLASS(rpc_xprt_event,
TP_ARGS(xprt, xid, status),
TP_STRUCT__entry(
__field(__be32, xid)
__field(u32, xid)
__field(int, status)
__string(addr, xprt->address_strings[RPC_DISPLAY_ADDR])
__string(port, xprt->address_strings[RPC_DISPLAY_PORT])
),
TP_fast_assign(
__entry->xid = xid;
__entry->xid = be32_to_cpu(xid);
__entry->status = status;
__assign_str(addr, xprt->address_strings[RPC_DISPLAY_ADDR]);
__assign_str(port, xprt->address_strings[RPC_DISPLAY_PORT]);
),
TP_printk("peer=[%s]:%s xid=0x%x status=%d", __get_str(addr),
__get_str(port), be32_to_cpu(__entry->xid),
TP_printk("peer=[%s]:%s xid=0x%08x status=%d", __get_str(addr),
__get_str(port), __entry->xid,
__entry->status)
);
@ -417,7 +447,7 @@ TRACE_EVENT(xs_tcp_data_recv,
TP_STRUCT__entry(
__string(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR])
__string(port, xs->xprt.address_strings[RPC_DISPLAY_PORT])
__field(__be32, xid)
__field(u32, xid)
__field(unsigned long, flags)
__field(unsigned long, copied)
__field(unsigned int, reclen)
@ -427,15 +457,15 @@ TRACE_EVENT(xs_tcp_data_recv,
TP_fast_assign(
__assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]);
__assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]);
__entry->xid = xs->tcp_xid;
__entry->xid = be32_to_cpu(xs->tcp_xid);
__entry->flags = xs->tcp_flags;
__entry->copied = xs->tcp_copied;
__entry->reclen = xs->tcp_reclen;
__entry->offset = xs->tcp_offset;
),
TP_printk("peer=[%s]:%s xid=0x%x flags=%s copied=%lu reclen=%u offset=%lu",
__get_str(addr), __get_str(port), be32_to_cpu(__entry->xid),
TP_printk("peer=[%s]:%s xid=0x%08x flags=%s copied=%lu reclen=%u offset=%lu",
__get_str(addr), __get_str(port), __entry->xid,
rpc_show_sock_xprt_flags(__entry->flags),
__entry->copied, __entry->reclen, __entry->offset)
);
@ -457,20 +487,20 @@ TRACE_EVENT(svc_recv,
TP_STRUCT__entry(
__field(struct sockaddr *, addr)
__field(__be32, xid)
__field(u32, xid)
__field(int, status)
__field(unsigned long, flags)
),
TP_fast_assign(
__entry->addr = (struct sockaddr *)&rqst->rq_addr;
__entry->xid = status > 0 ? rqst->rq_xid : 0;
__entry->xid = status > 0 ? be32_to_cpu(rqst->rq_xid) : 0;
__entry->status = status;
__entry->flags = rqst->rq_flags;
),
TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
be32_to_cpu(__entry->xid), __entry->status,
TP_printk("addr=%pIScp xid=0x%08x status=%d flags=%s", __entry->addr,
__entry->xid, __entry->status,
show_rqstp_flags(__entry->flags))
);
@ -481,21 +511,21 @@ DECLARE_EVENT_CLASS(svc_rqst_event,
TP_ARGS(rqst),
TP_STRUCT__entry(
__field(__be32, xid)
__field(u32, xid)
__field(unsigned long, flags)
__dynamic_array(unsigned char, addr, rqst->rq_addrlen)
),
TP_fast_assign(
__entry->xid = rqst->rq_xid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->flags = rqst->rq_flags;
memcpy(__get_dynamic_array(addr),
&rqst->rq_addr, rqst->rq_addrlen);
),
TP_printk("addr=%pIScp rq_xid=0x%x flags=%s",
TP_printk("addr=%pIScp rq_xid=0x%08x flags=%s",
(struct sockaddr *)__get_dynamic_array(addr),
be32_to_cpu(__entry->xid),
__entry->xid,
show_rqstp_flags(__entry->flags))
);
@ -515,7 +545,7 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
TP_STRUCT__entry(
__field(struct sockaddr *, addr)
__field(__be32, xid)
__field(u32, xid)
__field(int, dropme)
__field(int, status)
__field(unsigned long, flags)
@ -523,13 +553,13 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
TP_fast_assign(
__entry->addr = (struct sockaddr *)&rqst->rq_addr;
__entry->xid = rqst->rq_xid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->status = status;
__entry->flags = rqst->rq_flags;
),
TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
__entry->addr, be32_to_cpu(__entry->xid),
TP_printk("addr=%pIScp rq_xid=0x%08x status=%d flags=%s",
__entry->addr, __entry->xid,
__entry->status, show_rqstp_flags(__entry->flags))
);
@ -678,18 +708,19 @@ DECLARE_EVENT_CLASS(svc_deferred_event,
TP_ARGS(dr),
TP_STRUCT__entry(
__field(__be32, xid)
__field(u32, xid)
__dynamic_array(unsigned char, addr, dr->addrlen)
),
TP_fast_assign(
__entry->xid = *(__be32 *)(dr->args + (dr->xprt_hlen>>2));
__entry->xid = be32_to_cpu(*(__be32 *)(dr->args +
(dr->xprt_hlen>>2)));
memcpy(__get_dynamic_array(addr), &dr->addr, dr->addrlen);
),
TP_printk("addr=%pIScp xid=0x%x",
TP_printk("addr=%pIScp xid=0x%08x",
(struct sockaddr *)__get_dynamic_array(addr),
be32_to_cpu(__entry->xid))
__entry->xid)
);
DEFINE_EVENT(svc_deferred_event, svc_drop_deferred,

View file

@ -1491,7 +1491,6 @@ rpc_restart_call(struct rpc_task *task)
}
EXPORT_SYMBOL_GPL(rpc_restart_call);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
const char
*rpc_proc_name(const struct rpc_task *task)
{
@ -1505,7 +1504,6 @@ const char
} else
return "no proc";
}
#endif
/*
* 0. Initial state
@ -1519,6 +1517,7 @@ call_start(struct rpc_task *task)
struct rpc_clnt *clnt = task->tk_client;
int idx = task->tk_msg.rpc_proc->p_statidx;
trace_rpc_request(task);
dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid,
clnt->cl_program->name, clnt->cl_vers,
rpc_proc_name(task),
@ -1586,6 +1585,7 @@ call_reserveresult(struct rpc_task *task)
switch (status) {
case -ENOMEM:
rpc_delay(task, HZ >> 2);
/* fall through */
case -EAGAIN: /* woken up; retry */
task->tk_action = call_retry_reserve;
return;
@ -1647,10 +1647,13 @@ call_refreshresult(struct rpc_task *task)
/* Use rate-limiting and a max number of retries if refresh
* had status 0 but failed to update the cred.
*/
/* fall through */
case -ETIMEDOUT:
rpc_delay(task, 3*HZ);
/* fall through */
case -EAGAIN:
status = -EACCES;
/* fall through */
case -EKEYEXPIRED:
if (!task->tk_cred_retry)
break;
@ -1911,6 +1914,7 @@ call_connect_status(struct rpc_task *task)
task->tk_action = call_bind;
return;
}
/* fall through */
case -ECONNRESET:
case -ECONNABORTED:
case -ENETUNREACH:
@ -1924,6 +1928,7 @@ call_connect_status(struct rpc_task *task)
break;
/* retry with existing socket, after a delay */
rpc_delay(task, 3*HZ);
/* fall through */
case -EAGAIN:
/* Check for timeouts before looping back to call_bind */
case -ETIMEDOUT:
@ -2025,6 +2030,7 @@ call_transmit_status(struct rpc_task *task)
rpc_exit(task, task->tk_status);
break;
}
/* fall through */
case -ECONNRESET:
case -ECONNABORTED:
case -EADDRINUSE:
@ -2145,6 +2151,7 @@ call_status(struct rpc_task *task)
* were a timeout.
*/
rpc_delay(task, 3*HZ);
/* fall through */
case -ETIMEDOUT:
task->tk_action = call_timeout;
break;
@ -2152,14 +2159,17 @@ call_status(struct rpc_task *task)
case -ECONNRESET:
case -ECONNABORTED:
rpc_force_rebind(clnt);
/* fall through */
case -EADDRINUSE:
rpc_delay(task, 3*HZ);
/* fall through */
case -EPIPE:
case -ENOTCONN:
task->tk_action = call_bind;
break;
case -ENOBUFS:
rpc_delay(task, HZ>>2);
/* fall through */
case -EAGAIN:
task->tk_action = call_transmit;
break;

View file

@ -1410,8 +1410,8 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
return PTR_ERR(gssd_dentry);
}
dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n",
net, NET_NAME(net));
dprintk("RPC: sending pipefs MOUNT notification for net %x%s\n",
net->ns.inum, NET_NAME(net));
mutex_lock(&sn->pipefs_sb_lock);
sn->pipefs_sb = sb;
err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
@ -1462,8 +1462,8 @@ static void rpc_kill_sb(struct super_block *sb)
goto out;
}
sn->pipefs_sb = NULL;
dprintk("RPC: sending pipefs UMOUNT notification for net %p%s\n",
net, NET_NAME(net));
dprintk("RPC: sending pipefs UMOUNT notification for net %x%s\n",
net->ns.inum, NET_NAME(net));
blocking_notifier_call_chain(&rpc_pipefs_notifier_list,
RPC_PIPEFS_UMOUNT,
sb);

View file

@ -216,9 +216,9 @@ static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
smp_wmb();
sn->rpcb_users = 1;
dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
"%p, rpcb_local_clnt4: %p) for net %p%s\n",
sn->rpcb_local_clnt, sn->rpcb_local_clnt4,
net, (net == &init_net) ? " (init_net)" : "");
"%p, rpcb_local_clnt4: %p) for net %x%s\n",
sn->rpcb_local_clnt, sn->rpcb_local_clnt4,
net->ns.inum, (net == &init_net) ? " (init_net)" : "");
}
/*

View file

@ -274,10 +274,9 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
static void rpc_set_active(struct rpc_task *task)
{
trace_rpc_task_begin(task->tk_client, task, NULL);
rpc_task_set_debuginfo(task);
set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
trace_rpc_task_begin(task->tk_client, task, NULL);
}
/*

View file

@ -65,10 +65,13 @@ static __net_init int sunrpc_init_net(struct net *net)
static __net_exit void sunrpc_exit_net(struct net *net)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
rpc_pipefs_exit_net(net);
unix_gid_cache_destroy(net);
ip_map_cache_destroy(net);
rpc_proc_exit(net);
WARN_ON_ONCE(!list_empty(&sn->all_clients));
}
static struct pernet_operations sunrpc_net_ops = {

View file

@ -1139,6 +1139,7 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
case -EAGAIN:
xprt_add_backlog(xprt, task);
dprintk("RPC: waiting for request slot\n");
/* fall through */
default:
task->tk_status = -EAGAIN;
}

View file

@ -43,7 +43,7 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
req = rpcrdma_create_req(r_xprt);
if (IS_ERR(req))
return PTR_ERR(req);
req->rl_backchannel = true;
__set_bit(RPCRDMA_REQ_F_BACKCHANNEL, &req->rl_flags);
rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
DMA_TO_DEVICE, GFP_KERNEL);
@ -223,8 +223,8 @@ int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
*p++ = xdr_zero;
*p = xdr_zero;
if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, RPCRDMA_HDRLEN_MIN,
&rqst->rq_snd_buf, rpcrdma_noch))
if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN,
&rqst->rq_snd_buf, rpcrdma_noch))
return -EIO;
return 0;
}

View file

@ -306,28 +306,9 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
}
}
/* Use a slow, safe mechanism to invalidate all memory regions
* that were registered for "req".
*/
static void
fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
bool sync)
{
struct rpcrdma_mw *mw;
while (!list_empty(&req->rl_registered)) {
mw = rpcrdma_pop_mw(&req->rl_registered);
if (sync)
fmr_op_recover_mr(mw);
else
rpcrdma_defer_mr_recovery(mw);
}
}
const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
.ro_map = fmr_op_map,
.ro_unmap_sync = fmr_op_unmap_sync,
.ro_unmap_safe = fmr_op_unmap_safe,
.ro_recover_mr = fmr_op_recover_mr,
.ro_open = fmr_op_open,
.ro_maxpages = fmr_op_maxpages,

View file

@ -420,7 +420,6 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
IB_ACCESS_REMOTE_READ;
rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr);
rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
if (rc)
goto out_senderr;
@ -508,12 +507,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
f->fr_cqe.done = frwr_wc_localinv_wake;
reinit_completion(&f->fr_linv_done);
/* Initialize CQ count, since there is always a signaled
* WR being posted here. The new cqcount depends on how
* many SQEs are about to be consumed.
*/
rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
/* Transport disconnect drains the receive CQ before it
* replaces the QP. The RPC reply handler won't call us
* unless ri_id->qp is a valid pointer.
@ -546,7 +539,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
/* Find and reset the MRs in the LOCAL_INV WRs that did not
* get posted.
*/
rpcrdma_init_cqcount(&r_xprt->rx_ep, -count);
while (bad_wr) {
f = container_of(bad_wr, struct rpcrdma_frmr,
fr_invwr);
@ -559,28 +551,9 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
goto unmap;
}
/* Use a slow, safe mechanism to invalidate all memory regions
* that were registered for "req".
*/
static void
frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
bool sync)
{
struct rpcrdma_mw *mw;
while (!list_empty(&req->rl_registered)) {
mw = rpcrdma_pop_mw(&req->rl_registered);
if (sync)
frwr_op_recover_mr(mw);
else
rpcrdma_defer_mr_recovery(mw);
}
}
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
.ro_map = frwr_op_map,
.ro_unmap_sync = frwr_op_unmap_sync,
.ro_unmap_safe = frwr_op_unmap_safe,
.ro_recover_mr = frwr_op_recover_mr,
.ro_open = frwr_op_open,
.ro_maxpages = frwr_op_maxpages,

View file

@ -1,4 +1,5 @@
/*
* Copyright (c) 2014-2017 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -75,11 +76,11 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
/* Maximum Read list size */
maxsegs += 2; /* segment for head and tail buffers */
size = maxsegs * sizeof(struct rpcrdma_read_chunk);
size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
/* Minimal Read chunk size */
size += sizeof(__be32); /* segment count */
size += sizeof(struct rpcrdma_segment);
size += rpcrdma_segment_maxsz * sizeof(__be32);
size += sizeof(__be32); /* list discriminator */
dprintk("RPC: %s: max call header size = %u\n",
@ -102,7 +103,7 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
/* Maximum Write list size */
maxsegs += 2; /* segment for head and tail buffers */
size = sizeof(__be32); /* segment count */
size += maxsegs * sizeof(struct rpcrdma_segment);
size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
size += sizeof(__be32); /* list discriminator */
dprintk("RPC: %s: max reply header size = %u\n",
@ -511,27 +512,60 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
return 0;
}
/* Prepare the RPC-over-RDMA header SGE.
/**
* rpcrdma_unmap_sendctx - DMA-unmap Send buffers
* @sc: sendctx containing SGEs to unmap
*
*/
void
rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc)
{
struct rpcrdma_ia *ia = &sc->sc_xprt->rx_ia;
struct ib_sge *sge;
unsigned int count;
dprintk("RPC: %s: unmapping %u sges for sc=%p\n",
__func__, sc->sc_unmap_count, sc);
/* The first two SGEs contain the transport header and
* the inline buffer. These are always left mapped so
* they can be cheaply re-used.
*/
sge = &sc->sc_sges[2];
for (count = sc->sc_unmap_count; count; ++sge, --count)
ib_dma_unmap_page(ia->ri_device,
sge->addr, sge->length, DMA_TO_DEVICE);
if (test_and_clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &sc->sc_req->rl_flags)) {
smp_mb__after_atomic();
wake_up_bit(&sc->sc_req->rl_flags, RPCRDMA_REQ_F_TX_RESOURCES);
}
}
/* Prepare an SGE for the RPC-over-RDMA transport header.
*/
static bool
rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
u32 len)
{
struct rpcrdma_sendctx *sc = req->rl_sendctx;
struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
struct ib_sge *sge = &req->rl_send_sge[0];
struct ib_sge *sge = sc->sc_sges;
if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) {
if (!__rpcrdma_dma_map_regbuf(ia, rb))
return false;
sge->addr = rdmab_addr(rb);
sge->lkey = rdmab_lkey(rb);
}
if (!rpcrdma_dma_map_regbuf(ia, rb))
goto out_regbuf;
sge->addr = rdmab_addr(rb);
sge->length = len;
sge->lkey = rdmab_lkey(rb);
ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr,
sge->length, DMA_TO_DEVICE);
req->rl_send_wr.num_sge++;
sc->sc_wr.num_sge++;
return true;
out_regbuf:
pr_err("rpcrdma: failed to DMA map a Send buffer\n");
return false;
}
/* Prepare the Send SGEs. The head and tail iovec, and each entry
@ -541,10 +575,11 @@ static bool
rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
{
struct rpcrdma_sendctx *sc = req->rl_sendctx;
unsigned int sge_no, page_base, len, remaining;
struct rpcrdma_regbuf *rb = req->rl_sendbuf;
struct ib_device *device = ia->ri_device;
struct ib_sge *sge = req->rl_send_sge;
struct ib_sge *sge = sc->sc_sges;
u32 lkey = ia->ri_pd->local_dma_lkey;
struct page *page, **ppages;
@ -552,7 +587,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
* DMA-mapped. Sync the content that has changed.
*/
if (!rpcrdma_dma_map_regbuf(ia, rb))
return false;
goto out_regbuf;
sge_no = 1;
sge[sge_no].addr = rdmab_addr(rb);
sge[sge_no].length = xdr->head[0].iov_len;
@ -607,7 +642,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
sge[sge_no].length = len;
sge[sge_no].lkey = lkey;
req->rl_mapped_sges++;
sc->sc_unmap_count++;
ppages++;
remaining -= len;
page_base = 0;
@ -633,56 +668,61 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
goto out_mapping_err;
sge[sge_no].length = len;
sge[sge_no].lkey = lkey;
req->rl_mapped_sges++;
sc->sc_unmap_count++;
}
out:
req->rl_send_wr.num_sge = sge_no + 1;
sc->sc_wr.num_sge += sge_no;
if (sc->sc_unmap_count)
__set_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
return true;
out_regbuf:
pr_err("rpcrdma: failed to DMA map a Send buffer\n");
return false;
out_mapping_overflow:
rpcrdma_unmap_sendctx(sc);
pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no);
return false;
out_mapping_err:
rpcrdma_unmap_sendctx(sc);
pr_err("rpcrdma: Send mapping error\n");
return false;
}
bool
rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req,
u32 hdrlen, struct xdr_buf *xdr,
enum rpcrdma_chunktype rtype)
/**
* rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
* @r_xprt: controlling transport
* @req: context of RPC Call being marshalled
* @hdrlen: size of transport header, in bytes
* @xdr: xdr_buf containing RPC Call
* @rtype: chunk type being encoded
*
* Returns 0 on success; otherwise a negative errno is returned.
*/
int
rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req, u32 hdrlen,
struct xdr_buf *xdr, enum rpcrdma_chunktype rtype)
{
req->rl_send_wr.num_sge = 0;
req->rl_mapped_sges = 0;
req->rl_sendctx = rpcrdma_sendctx_get_locked(&r_xprt->rx_buf);
if (!req->rl_sendctx)
return -ENOBUFS;
req->rl_sendctx->sc_wr.num_sge = 0;
req->rl_sendctx->sc_unmap_count = 0;
req->rl_sendctx->sc_req = req;
__clear_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags);
if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen))
goto out_map;
if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen))
return -EIO;
if (rtype != rpcrdma_areadch)
if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype))
goto out_map;
if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype))
return -EIO;
return true;
out_map:
pr_err("rpcrdma: failed to DMA map a Send buffer\n");
return false;
}
void
rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
{
struct ib_device *device = ia->ri_device;
struct ib_sge *sge;
int count;
sge = &req->rl_send_sge[2];
for (count = req->rl_mapped_sges; count--; sge++)
ib_dma_unmap_page(device, sge->addr, sge->length,
DMA_TO_DEVICE);
req->rl_mapped_sges = 0;
return 0;
}
/**
@ -833,12 +873,10 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
transfertypes[rtype], transfertypes[wtype],
xdr_stream_pos(xdr));
if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req,
xdr_stream_pos(xdr),
&rqst->rq_snd_buf, rtype)) {
ret = -EIO;
ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr),
&rqst->rq_snd_buf, rtype);
if (ret)
goto out_err;
}
return 0;
out_err:
@ -970,14 +1008,13 @@ rpcrdma_mark_remote_invalidation(struct list_head *mws,
* straightforward to check the RPC header's direction field.
*/
static bool
rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
__be32 xid, __be32 proc)
rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
{
struct xdr_stream *xdr = &rep->rr_stream;
__be32 *p;
if (proc != rdma_msg)
if (rep->rr_proc != rdma_msg)
return false;
/* Peek at stream contents without advancing. */
@ -992,7 +1029,7 @@ rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
return false;
/* RPC header */
if (*p++ != xid)
if (*p++ != rep->rr_xid)
return false;
if (*p != cpu_to_be32(RPC_CALL))
return false;
@ -1212,78 +1249,21 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
return -EREMOTEIO;
}
/* Process received RPC/RDMA messages.
*
* Errors must result in the RPC task either being awakened, or
* allowed to timeout, to discover the errors at that time.
/* Perform XID lookup, reconstruction of the RPC reply, and
* RPC completion while holding the transport lock to ensure
* the rep, rqst, and rq_task pointers remain stable.
*/
void
rpcrdma_reply_handler(struct work_struct *work)
void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
{
struct rpcrdma_rep *rep =
container_of(work, struct rpcrdma_rep, rr_work);
struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
struct xdr_stream *xdr = &rep->rr_stream;
struct rpcrdma_req *req;
struct rpc_rqst *rqst;
__be32 *p, xid, vers, proc;
struct rpc_rqst *rqst = rep->rr_rqst;
unsigned long cwnd;
int status;
dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
if (rep->rr_hdrbuf.head[0].iov_len == 0)
goto out_badstatus;
xdr_init_decode(xdr, &rep->rr_hdrbuf,
rep->rr_hdrbuf.head[0].iov_base);
/* Fixed transport header fields */
p = xdr_inline_decode(xdr, 4 * sizeof(*p));
if (unlikely(!p))
goto out_shortreply;
xid = *p++;
vers = *p++;
p++; /* credits */
proc = *p++;
if (rpcrdma_is_bcall(r_xprt, rep, xid, proc))
return;
/* Match incoming rpcrdma_rep to an rpcrdma_req to
* get context for handling any incoming chunks.
*/
spin_lock(&xprt->recv_lock);
rqst = xprt_lookup_rqst(xprt, xid);
if (!rqst)
goto out_norqst;
xprt_pin_rqst(rqst);
spin_unlock(&xprt->recv_lock);
req = rpcr_to_rdmar(rqst);
req->rl_reply = rep;
dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
__func__, rep, req, be32_to_cpu(xid));
/* Invalidate and unmap the data payloads before waking the
* waiting application. This guarantees the memory regions
* are properly fenced from the server before the application
* accesses the data. It also ensures proper send flow control:
* waking the next RPC waits until this RPC has relinquished
* all its Send Queue entries.
*/
if (!list_empty(&req->rl_registered)) {
rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
&req->rl_registered);
}
xprt->reestablish_timeout = 0;
if (vers != rpcrdma_version)
goto out_badversion;
switch (proc) {
switch (rep->rr_proc) {
case rdma_msg:
status = rpcrdma_decode_msg(r_xprt, rep, rqst);
break;
@ -1302,15 +1282,137 @@ rpcrdma_reply_handler(struct work_struct *work)
out:
spin_lock(&xprt->recv_lock);
cwnd = xprt->cwnd;
xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
if (xprt->cwnd > cwnd)
xprt_release_rqst_cong(rqst->rq_task);
xprt_complete_rqst(rqst->rq_task, status);
xprt_unpin_rqst(rqst);
spin_unlock(&xprt->recv_lock);
dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
__func__, xprt, rqst, status);
return;
/* If the incoming reply terminated a pending RPC, the next
* RPC call will post a replacement receive buffer as it is
* being marshaled.
*/
out_badheader:
dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
rqst->rq_task->tk_pid, __func__, be32_to_cpu(rep->rr_proc));
r_xprt->rx_stats.bad_reply_count++;
status = -EIO;
goto out;
}
void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{
/* Invalidate and unmap the data payloads before waking
* the waiting application. This guarantees the memory
* regions are properly fenced from the server before the
* application accesses the data. It also ensures proper
* send flow control: waking the next RPC waits until this
* RPC has relinquished all its Send Queue entries.
*/
if (!list_empty(&req->rl_registered))
r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
&req->rl_registered);
/* Ensure that any DMA mapped pages associated with
* the Send of the RPC Call have been unmapped before
* allowing the RPC to complete. This protects argument
* memory not controlled by the RPC client from being
* re-used before we're done with it.
*/
if (test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
r_xprt->rx_stats.reply_waits_for_send++;
out_of_line_wait_on_bit(&req->rl_flags,
RPCRDMA_REQ_F_TX_RESOURCES,
bit_wait,
TASK_UNINTERRUPTIBLE);
}
}
/* Reply handling runs in the poll worker thread. Anything that
* might wait is deferred to a separate workqueue.
*/
void rpcrdma_deferred_completion(struct work_struct *work)
{
struct rpcrdma_rep *rep =
container_of(work, struct rpcrdma_rep, rr_work);
struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
rpcrdma_release_rqst(rep->rr_rxprt, req);
rpcrdma_complete_rqst(rep);
}
/* Process received RPC/RDMA messages.
*
* Errors must result in the RPC task either being awakened, or
* allowed to timeout, to discover the errors at that time.
*/
void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
{
struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_req *req;
struct rpc_rqst *rqst;
u32 credits;
__be32 *p;
dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
if (rep->rr_hdrbuf.head[0].iov_len == 0)
goto out_badstatus;
xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
rep->rr_hdrbuf.head[0].iov_base);
/* Fixed transport header fields */
p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
if (unlikely(!p))
goto out_shortreply;
rep->rr_xid = *p++;
rep->rr_vers = *p++;
credits = be32_to_cpu(*p++);
rep->rr_proc = *p++;
if (rep->rr_vers != rpcrdma_version)
goto out_badversion;
if (rpcrdma_is_bcall(r_xprt, rep))
return;
/* Match incoming rpcrdma_rep to an rpcrdma_req to
* get context for handling any incoming chunks.
*/
spin_lock(&xprt->recv_lock);
rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
if (!rqst)
goto out_norqst;
xprt_pin_rqst(rqst);
if (credits == 0)
credits = 1; /* don't deadlock */
else if (credits > buf->rb_max_requests)
credits = buf->rb_max_requests;
buf->rb_credits = credits;
spin_unlock(&xprt->recv_lock);
req = rpcr_to_rdmar(rqst);
req->rl_reply = rep;
rep->rr_rqst = rqst;
clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
__func__, rep, req, be32_to_cpu(rep->rr_xid));
if (list_empty(&req->rl_registered) &&
!test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags))
rpcrdma_complete_rqst(rep);
else
queue_work(rpcrdma_receive_wq, &rep->rr_work);
return;
out_badstatus:
@ -1321,37 +1423,22 @@ rpcrdma_reply_handler(struct work_struct *work)
}
return;
/* If the incoming reply terminated a pending RPC, the next
* RPC call will post a replacement receive buffer as it is
* being marshaled.
*/
out_badversion:
dprintk("RPC: %s: invalid version %d\n",
__func__, be32_to_cpu(vers));
status = -EIO;
r_xprt->rx_stats.bad_reply_count++;
goto out;
__func__, be32_to_cpu(rep->rr_vers));
goto repost;
out_badheader:
dprintk("RPC: %5u %s: invalid rpcrdma reply (type %u)\n",
rqst->rq_task->tk_pid, __func__, be32_to_cpu(proc));
r_xprt->rx_stats.bad_reply_count++;
status = -EIO;
goto out;
/* The req was still available, but by the time the recv_lock
* was acquired, the rqst and task had been released. Thus the RPC
* has already been terminated.
/* The RPC transaction has already been terminated, or the header
* is corrupt.
*/
out_norqst:
spin_unlock(&xprt->recv_lock);
dprintk("RPC: %s: no match for incoming xid 0x%08x\n",
__func__, be32_to_cpu(xid));
__func__, be32_to_cpu(rep->rr_xid));
goto repost;
out_shortreply:
dprintk("RPC: %s: short/invalid reply\n", __func__);
goto repost;
/* If no pending RPC transaction was matched, post a replacement
* receive buffer before returning.

View file

@ -1,4 +1,5 @@
/*
* Copyright (c) 2014-2017 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -678,16 +679,14 @@ xprt_rdma_free(struct rpc_task *task)
struct rpc_rqst *rqst = task->tk_rqstp;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
if (req->rl_backchannel)
if (test_bit(RPCRDMA_REQ_F_BACKCHANNEL, &req->rl_flags))
return;
dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
if (!list_empty(&req->rl_registered))
ia->ri_ops->ro_unmap_safe(r_xprt, req, !RPC_IS_ASYNC(task));
rpcrdma_unmap_sges(ia, req);
if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
rpcrdma_release_rqst(r_xprt, req);
rpcrdma_buffer_put(req);
}
@ -728,7 +727,8 @@ xprt_rdma_send_request(struct rpc_task *task)
/* On retransmit, remove any previously registered chunks */
if (unlikely(!list_empty(&req->rl_registered)))
r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
&req->rl_registered);
rc = rpcrdma_marshal_req(r_xprt, rqst);
if (rc < 0)
@ -742,6 +742,7 @@ xprt_rdma_send_request(struct rpc_task *task)
goto drop_connection;
req->rl_connect_cookie = xprt->connect_cookie;
set_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
goto drop_connection;
@ -789,11 +790,13 @@ void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
r_xprt->rx_stats.failed_marshal_count,
r_xprt->rx_stats.bad_reply_count,
r_xprt->rx_stats.nomsg_call_count);
seq_printf(seq, "%lu %lu %lu %lu\n",
seq_printf(seq, "%lu %lu %lu %lu %lu %lu\n",
r_xprt->rx_stats.mrs_recovered,
r_xprt->rx_stats.mrs_orphaned,
r_xprt->rx_stats.mrs_allocated,
r_xprt->rx_stats.local_inv_needed);
r_xprt->rx_stats.local_inv_needed,
r_xprt->rx_stats.empty_sendctx_q,
r_xprt->rx_stats.reply_waits_for_send);
}
static int

View file

@ -1,4 +1,5 @@
/*
* Copyright (c) 2014-2017 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -49,9 +50,10 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/svc_rdma.h>
#include <asm-generic/barrier.h>
#include <asm/bitops.h>
#include <rdma/ib_cm.h>
@ -73,7 +75,7 @@ static void rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf);
static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
static struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
int
rpcrdma_alloc_wq(void)
@ -126,30 +128,17 @@ rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
static void
rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_cqe *cqe = wc->wr_cqe;
struct rpcrdma_sendctx *sc =
container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */
if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
ib_wc_status_msg(wc->status),
wc->status, wc->vendor_err);
}
/* Perform basic sanity checking to avoid using garbage
* to update the credit grant value.
*/
static void
rpcrdma_update_granted_credits(struct rpcrdma_rep *rep)
{
struct rpcrdma_buffer *buffer = &rep->rr_rxprt->rx_buf;
__be32 *p = rep->rr_rdmabuf->rg_base;
u32 credits;
credits = be32_to_cpup(p + 2);
if (credits == 0)
credits = 1; /* don't deadlock */
else if (credits > buffer->rb_max_requests)
credits = buffer->rb_max_requests;
atomic_set(&buffer->rb_credits, credits);
rpcrdma_sendctx_put_locked(sc);
}
/**
@ -181,11 +170,8 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
rdmab_addr(rep->rr_rdmabuf),
wc->byte_len, DMA_FROM_DEVICE);
if (wc->byte_len >= RPCRDMA_HDRLEN_ERR)
rpcrdma_update_granted_credits(rep);
out_schedule:
queue_work(rpcrdma_receive_wq, &rep->rr_work);
rpcrdma_reply_handler(rep);
return;
out_fail:
@ -295,7 +281,7 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_DISCONNECTED:
connstate = -ECONNABORTED;
connected:
atomic_set(&xprt->rx_buf.rb_credits, 1);
xprt->rx_buf.rb_credits = 1;
ep->rep_connected = connstate;
rpcrdma_conn_func(ep);
wake_up_all(&ep->rep_connect_wait);
@ -564,16 +550,15 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
ep->rep_attr.cap.max_recv_sge);
/* set trigger for requesting send completion */
ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
if (ep->rep_cqinit <= 2)
ep->rep_cqinit = 0; /* always signal? */
rpcrdma_init_cqcount(ep, 0);
ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
cdata->max_requests >> 2);
ep->rep_send_count = ep->rep_send_batch;
init_waitqueue_head(&ep->rep_connect_wait);
INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
sendcq = ib_alloc_cq(ia->ri_device, NULL,
ep->rep_attr.cap.max_send_wr + 1,
0, IB_POLL_SOFTIRQ);
1, IB_POLL_WORKQUEUE);
if (IS_ERR(sendcq)) {
rc = PTR_ERR(sendcq);
dprintk("RPC: %s: failed to create send CQ: %i\n",
@ -583,7 +568,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
recvcq = ib_alloc_cq(ia->ri_device, NULL,
ep->rep_attr.cap.max_recv_wr + 1,
0, IB_POLL_SOFTIRQ);
0, IB_POLL_WORKQUEUE);
if (IS_ERR(recvcq)) {
rc = PTR_ERR(recvcq);
dprintk("RPC: %s: failed to create recv CQ: %i\n",
@ -846,6 +831,168 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
ib_drain_qp(ia->ri_id->qp);
}
/* Fixed-size circular FIFO queue. This implementation is wait-free and
* lock-free.
*
* Consumer is the code path that posts Sends. This path dequeues a
* sendctx for use by a Send operation. Multiple consumer threads
* are serialized by the RPC transport lock, which allows only one
* ->send_request call at a time.
*
* Producer is the code path that handles Send completions. This path
* enqueues a sendctx that has been completed. Multiple producer
* threads are serialized by the ib_poll_cq() function.
*/
/* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
* queue activity, and ib_drain_qp has flushed all remaining Send
* requests.
*/
static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
{
unsigned long i;
for (i = 0; i <= buf->rb_sc_last; i++)
kfree(buf->rb_sc_ctxs[i]);
kfree(buf->rb_sc_ctxs);
}
static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
{
struct rpcrdma_sendctx *sc;
sc = kzalloc(sizeof(*sc) +
ia->ri_max_send_sges * sizeof(struct ib_sge),
GFP_KERNEL);
if (!sc)
return NULL;
sc->sc_wr.wr_cqe = &sc->sc_cqe;
sc->sc_wr.sg_list = sc->sc_sges;
sc->sc_wr.opcode = IB_WR_SEND;
sc->sc_cqe.done = rpcrdma_wc_send;
return sc;
}
static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_sendctx *sc;
unsigned long i;
/* Maximum number of concurrent outstanding Send WRs. Capping
* the circular queue size stops Send Queue overflow by causing
* the ->send_request call to fail temporarily before too many
* Sends are posted.
*/
i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i);
buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
if (!buf->rb_sc_ctxs)
return -ENOMEM;
buf->rb_sc_last = i - 1;
for (i = 0; i <= buf->rb_sc_last; i++) {
sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
if (!sc)
goto out_destroy;
sc->sc_xprt = r_xprt;
buf->rb_sc_ctxs[i] = sc;
}
return 0;
out_destroy:
rpcrdma_sendctxs_destroy(buf);
return -ENOMEM;
}
/* The sendctx queue is not guaranteed to have a size that is a
* power of two, thus the helpers in circ_buf.h cannot be used.
* The other option is to use modulus (%), which can be expensive.
*/
static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
unsigned long item)
{
return likely(item < buf->rb_sc_last) ? item + 1 : 0;
}
/**
* rpcrdma_sendctx_get_locked - Acquire a send context
* @buf: transport buffers from which to acquire an unused context
*
* Returns pointer to a free send completion context; or NULL if
* the queue is empty.
*
* Usage: Called to acquire an SGE array before preparing a Send WR.
*
* The caller serializes calls to this function (per rpcrdma_buffer),
* and provides an effective memory barrier that flushes the new value
* of rb_sc_head.
*/
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf)
{
struct rpcrdma_xprt *r_xprt;
struct rpcrdma_sendctx *sc;
unsigned long next_head;
next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
if (next_head == READ_ONCE(buf->rb_sc_tail))
goto out_emptyq;
/* ORDER: item must be accessed _before_ head is updated */
sc = buf->rb_sc_ctxs[next_head];
/* Releasing the lock in the caller acts as a memory
* barrier that flushes rb_sc_head.
*/
buf->rb_sc_head = next_head;
return sc;
out_emptyq:
/* The queue is "empty" if there have not been enough Send
* completions recently. This is a sign the Send Queue is
* backing up. Cause the caller to pause and try again.
*/
dprintk("RPC: %s: empty sendctx queue\n", __func__);
r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf);
r_xprt->rx_stats.empty_sendctx_q++;
return NULL;
}
/**
* rpcrdma_sendctx_put_locked - Release a send context
* @sc: send context to release
*
* Usage: Called from Send completion to return a sendctxt
* to the queue.
*
* The caller serializes calls to this function (per rpcrdma_buffer).
*/
void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
{
struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf;
unsigned long next_tail;
/* Unmap SGEs of previously completed by unsignaled
* Sends by walking up the queue until @sc is found.
*/
next_tail = buf->rb_sc_tail;
do {
next_tail = rpcrdma_sendctx_next(buf, next_tail);
/* ORDER: item must be accessed _before_ tail is updated */
rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]);
} while (buf->rb_sc_ctxs[next_tail] != sc);
/* Paired with READ_ONCE */
smp_store_release(&buf->rb_sc_tail, next_tail);
}
static void
rpcrdma_mr_recovery_worker(struct work_struct *work)
{
@ -941,13 +1088,8 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
spin_lock(&buffer->rb_reqslock);
list_add(&req->rl_all, &buffer->rb_allreqs);
spin_unlock(&buffer->rb_reqslock);
req->rl_cqe.done = rpcrdma_wc_send;
req->rl_buffer = &r_xprt->rx_buf;
INIT_LIST_HEAD(&req->rl_registered);
req->rl_send_wr.next = NULL;
req->rl_send_wr.wr_cqe = &req->rl_cqe;
req->rl_send_wr.sg_list = req->rl_send_sge;
req->rl_send_wr.opcode = IB_WR_SEND;
return req;
}
@ -974,7 +1116,7 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
rep->rr_cqe.done = rpcrdma_wc_receive;
rep->rr_rxprt = r_xprt;
INIT_WORK(&rep->rr_work, rpcrdma_reply_handler);
INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion);
rep->rr_recv_wr.next = NULL;
rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
@ -995,7 +1137,6 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
buf->rb_max_requests = r_xprt->rx_data.max_requests;
buf->rb_bc_srv_max_requests = 0;
atomic_set(&buf->rb_credits, 1);
spin_lock_init(&buf->rb_mwlock);
spin_lock_init(&buf->rb_lock);
spin_lock_init(&buf->rb_recovery_lock);
@ -1022,7 +1163,6 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
rc = PTR_ERR(req);
goto out;
}
req->rl_backchannel = false;
list_add(&req->rl_list, &buf->rb_send_bufs);
}
@ -1040,6 +1180,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
list_add(&rep->rr_list, &buf->rb_recv_bufs);
}
rc = rpcrdma_sendctxs_create(r_xprt);
if (rc)
goto out;
return 0;
out:
rpcrdma_buffer_destroy(buf);
@ -1116,6 +1260,8 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
cancel_delayed_work_sync(&buf->rb_recovery_worker);
cancel_delayed_work_sync(&buf->rb_refresh_worker);
rpcrdma_sendctxs_destroy(buf);
while (!list_empty(&buf->rb_recv_bufs)) {
struct rpcrdma_rep *rep;
@ -1231,7 +1377,6 @@ rpcrdma_buffer_put(struct rpcrdma_req *req)
struct rpcrdma_buffer *buffers = req->rl_buffer;
struct rpcrdma_rep *rep = req->rl_reply;
req->rl_send_wr.num_sge = 0;
req->rl_reply = NULL;
spin_lock(&buffers->rb_lock);
@ -1363,7 +1508,7 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
struct rpcrdma_ep *ep,
struct rpcrdma_req *req)
{
struct ib_send_wr *send_wr = &req->rl_send_wr;
struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
struct ib_send_wr *send_wr_fail;
int rc;
@ -1377,7 +1522,14 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
dprintk("RPC: %s: posting %d s/g entries\n",
__func__, send_wr->num_sge);
rpcrdma_set_signaled(ep, send_wr);
if (!ep->rep_send_count ||
test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
send_wr->send_flags |= IB_SEND_SIGNALED;
ep->rep_send_count = ep->rep_send_batch;
} else {
send_wr->send_flags &= ~IB_SEND_SIGNALED;
--ep->rep_send_count;
}
rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
if (rc)
goto out_postsend_err;

View file

@ -1,4 +1,5 @@
/*
* Copyright (c) 2014-2017 Oracle. All rights reserved.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -93,8 +94,8 @@ enum {
*/
struct rpcrdma_ep {
atomic_t rep_cqcount;
int rep_cqinit;
unsigned int rep_send_count;
unsigned int rep_send_batch;
int rep_connected;
struct ib_qp_init_attr rep_attr;
wait_queue_head_t rep_connect_wait;
@ -104,25 +105,6 @@ struct rpcrdma_ep {
struct delayed_work rep_connect_worker;
};
static inline void
rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
{
atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
}
/* To update send queue accounting, provider must take a
* send completion every now and then.
*/
static inline void
rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
{
send_wr->send_flags = 0;
if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
rpcrdma_init_cqcount(ep, 0);
send_wr->send_flags = IB_SEND_SIGNALED;
}
}
/* Pre-allocate extra Work Requests for handling backward receives
* and sends. This is a fixed value because the Work Queues are
* allocated when the forward channel is set up.
@ -164,12 +146,6 @@ rdmab_lkey(struct rpcrdma_regbuf *rb)
return rb->rg_iov.lkey;
}
static inline struct rpcrdma_msg *
rdmab_to_msg(struct rpcrdma_regbuf *rb)
{
return (struct rpcrdma_msg *)rb->rg_base;
}
static inline struct ib_device *
rdmab_device(struct rpcrdma_regbuf *rb)
{
@ -202,22 +178,24 @@ enum {
};
/*
* struct rpcrdma_rep -- this structure encapsulates state required to recv
* and complete a reply, asychronously. It needs several pieces of
* state:
* o recv buffer (posted to provider)
* o ib_sge (also donated to provider)
* o status of reply (length, success or not)
* o bookkeeping state to get run by reply handler (list, etc)
* struct rpcrdma_rep -- this structure encapsulates state required
* to receive and complete an RPC Reply, asychronously. It needs
* several pieces of state:
*
* These are allocated during initialization, per-transport instance.
* o receive buffer and ib_sge (donated to provider)
* o status of receive (success or not, length, inv rkey)
* o bookkeeping state to get run by reply handler (XDR stream)
*
* N of these are associated with a transport instance, and stored in
* struct rpcrdma_buffer. N is the max number of outstanding requests.
* These structures are allocated during transport initialization.
* N of these are associated with a transport instance, managed by
* struct rpcrdma_buffer. N is the max number of outstanding RPCs.
*/
struct rpcrdma_rep {
struct ib_cqe rr_cqe;
__be32 rr_xid;
__be32 rr_vers;
__be32 rr_proc;
int rr_wc_flags;
u32 rr_inv_rkey;
struct rpcrdma_regbuf *rr_rdmabuf;
@ -225,10 +203,34 @@ struct rpcrdma_rep {
struct work_struct rr_work;
struct xdr_buf rr_hdrbuf;
struct xdr_stream rr_stream;
struct rpc_rqst *rr_rqst;
struct list_head rr_list;
struct ib_recv_wr rr_recv_wr;
};
/* struct rpcrdma_sendctx - DMA mapped SGEs to unmap after Send completes
*/
struct rpcrdma_req;
struct rpcrdma_xprt;
struct rpcrdma_sendctx {
struct ib_send_wr sc_wr;
struct ib_cqe sc_cqe;
struct rpcrdma_xprt *sc_xprt;
struct rpcrdma_req *sc_req;
unsigned int sc_unmap_count;
struct ib_sge sc_sges[];
};
/* Limit the number of SGEs that can be unmapped during one
* Send completion. This caps the amount of work a single
* completion can do before returning to the provider.
*
* Setting this to zero disables Send completion batching.
*/
enum {
RPCRDMA_MAX_SEND_BATCH = 7,
};
/*
* struct rpcrdma_mw - external memory region metadata
*
@ -340,26 +342,30 @@ enum {
struct rpcrdma_buffer;
struct rpcrdma_req {
struct list_head rl_list;
unsigned int rl_mapped_sges;
unsigned int rl_connect_cookie;
struct rpcrdma_buffer *rl_buffer;
struct rpcrdma_rep *rl_reply;
struct xdr_stream rl_stream;
struct xdr_buf rl_hdrbuf;
struct ib_send_wr rl_send_wr;
struct ib_sge rl_send_sge[RPCRDMA_MAX_SEND_SGES];
struct rpcrdma_sendctx *rl_sendctx;
struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */
struct rpcrdma_regbuf *rl_sendbuf; /* rq_snd_buf */
struct rpcrdma_regbuf *rl_recvbuf; /* rq_rcv_buf */
struct ib_cqe rl_cqe;
struct list_head rl_all;
bool rl_backchannel;
unsigned long rl_flags;
struct list_head rl_registered; /* registered segments */
struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];
};
/* rl_flags */
enum {
RPCRDMA_REQ_F_BACKCHANNEL = 0,
RPCRDMA_REQ_F_PENDING,
RPCRDMA_REQ_F_TX_RESOURCES,
};
static inline void
rpcrdma_set_xprtdata(struct rpc_rqst *rqst, struct rpcrdma_req *req)
{
@ -399,12 +405,17 @@ struct rpcrdma_buffer {
struct list_head rb_mws;
struct list_head rb_all;
unsigned long rb_sc_head;
unsigned long rb_sc_tail;
unsigned long rb_sc_last;
struct rpcrdma_sendctx **rb_sc_ctxs;
spinlock_t rb_lock; /* protect buf lists */
int rb_send_count, rb_recv_count;
struct list_head rb_send_bufs;
struct list_head rb_recv_bufs;
u32 rb_max_requests;
atomic_t rb_credits; /* most recent credit grant */
u32 rb_credits; /* most recent credit grant */
u32 rb_bc_srv_max_requests;
spinlock_t rb_reqslock; /* protect rb_allreqs */
@ -453,10 +464,12 @@ struct rpcrdma_stats {
unsigned long mrs_recovered;
unsigned long mrs_orphaned;
unsigned long mrs_allocated;
unsigned long empty_sendctx_q;
/* accessed when receiving a reply */
unsigned long long total_rdma_reply;
unsigned long long fixup_copy_count;
unsigned long reply_waits_for_send;
unsigned long local_inv_needed;
unsigned long nomsg_call_count;
unsigned long bcall_count;
@ -473,8 +486,6 @@ struct rpcrdma_memreg_ops {
struct rpcrdma_mw **);
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
struct list_head *);
void (*ro_unmap_safe)(struct rpcrdma_xprt *,
struct rpcrdma_req *, bool);
void (*ro_recover_mr)(struct rpcrdma_mw *);
int (*ro_open)(struct rpcrdma_ia *,
struct rpcrdma_ep *,
@ -532,6 +543,8 @@ void rpcrdma_ia_close(struct rpcrdma_ia *);
bool frwr_is_supported(struct rpcrdma_ia *);
bool fmr_is_supported(struct rpcrdma_ia *);
extern struct workqueue_struct *rpcrdma_receive_wq;
/*
* Endpoint calls - xprtrdma/verbs.c
*/
@ -554,6 +567,8 @@ struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *);
void rpcrdma_destroy_req(struct rpcrdma_req *);
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);
void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
@ -610,12 +625,18 @@ enum rpcrdma_chunktype {
rpcrdma_replych
};
bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *,
u32, struct xdr_buf *, enum rpcrdma_chunktype);
void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req, u32 hdrlen,
struct xdr_buf *xdr,
enum rpcrdma_chunktype rtype);
void rpcrdma_unmap_sendctx(struct rpcrdma_sendctx *sc);
int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
void rpcrdma_reply_handler(struct work_struct *work);
void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_req *req);
void rpcrdma_deferred_completion(struct work_struct *work);
static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
{

View file

@ -552,6 +552,7 @@ static int xs_local_send_request(struct rpc_task *task)
default:
dprintk("RPC: sendmsg returned unrecognized error %d\n",
-status);
/* fall through */
case -EPIPE:
xs_close(xprt);
status = -ENOTCONN;
@ -1611,6 +1612,7 @@ static void xs_tcp_state_change(struct sock *sk)
xprt->connect_cookie++;
clear_bit(XPRT_CONNECTED, &xprt->state);
xs_tcp_force_close(xprt);
/* fall through */
case TCP_CLOSING:
/*
* If the server closed down the connection, make sure that
@ -2368,6 +2370,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
switch (ret) {
case 0:
xs_set_srcport(transport, sock);
/* fall through */
case -EINPROGRESS:
/* SYN_SENT! */
if (xprt->reestablish_timeout < XS_TCP_INIT_REEST_TO)
@ -2419,6 +2422,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
default:
printk("%s: connect returned unhandled error %d\n",
__func__, status);
/* fall through */
case -EADDRNOTAVAIL:
/* We're probably in TIME_WAIT. Get rid of existing socket,
* and retry