struct svc_fh *current_fh = &cstate->current_fh;
struct svc_fh *save_fh = &cstate->save_fh;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfsd_thread_local_info *ntli = rqstp->rq_private;
__be32 status;
resp->xdr = &rqstp->rq_res_stream;
}
check_if_stalefh_allowed(args);
- rqstp->rq_lease_breaker = (void **)&cstate->clp;
+ ntli->ntli_lease_breaker = &cstate->clp;
trace_nfsd_compound(rqstp, args->tag, args->taglen, args->client_opcnt);
while (!status && resp->opcnt < args->opcnt) {
static bool nfsd_breaker_owns_lease(struct file_lease *fl)
{
struct nfs4_delegation *dl = fl->c.flc_owner;
+ struct nfsd_thread_local_info *ntli;
struct svc_rqst *rqst;
struct nfs4_client *clp;
rqst = nfsd_current_rqst();
if (!nfsd_v4client(rqst))
return false;
- clp = *(rqst->rq_lease_breaker);
+ ntli = rqst->rq_private;
+ clp = *ntli->ntli_lease_breaker;
return dl->dl_stid.sc_client == clp;
}
nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
struct nfs4_delegation **pdp)
{
- __be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
+ struct nfsd_thread_local_info *ntli = rqstp->rq_private;
struct file_lock_context *ctx;
struct nfs4_delegation *dp = NULL;
struct file_lease *fl;
struct nfs4_cb_fattr *ncf;
struct inode *inode = d_inode(dentry);
+ __be32 status;
ctx = locks_inode_context(inode);
if (!ctx)
break;
}
if (dp == NULL || dp == NON_NFSD_LEASE ||
- dp->dl_recall.cb_clp == *(rqstp->rq_lease_breaker)) {
+ dp->dl_recall.cb_clp == *(ntli->ntli_lease_breaker)) {
spin_unlock(&ctx->flc_lock);
if (dp == NON_NFSD_LEASE) {
status = nfserrno(nfsd_open_break_lease(inode,
extern const struct seq_operations nfs_exports_op;
+struct nfsd_thread_local_info {
+ struct nfs4_client **ntli_lease_breaker;
+};
+
/*
* Common void argument and result helpers
*/
struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list);
struct net *net = perm_sock->xpt_net;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct nfsd_thread_local_info ntli = { };
bool have_mutex = false;
/* At this point, the thread shares current->fs
set_freezable();
+ /* use dynamic allocation if ntli should ever become large */
+ static_assert(sizeof(struct nfsd_thread_local_info) < 256);
+ rqstp->rq_private = &ntli;
+
/*
* The main request loop
*/
/*
* The context of a single thread, including the request currently being
* processed.
+ *
+ * RPC programs are free to use rq_private to stash thread-local information.
+ * The sunrpc layer will not access it.
*/
struct svc_rqst {
struct list_head rq_all; /* all threads list */
unsigned long bc_to_initval;
unsigned int bc_to_retries;
unsigned int rq_status_counter; /* RPC processing counter */
- void **rq_lease_breaker; /* The v4 client breaking a lease */
+ void *rq_private; /* For use by the service thread */
};
/* bits for rq_flags */