/* * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and some members * of the svc_serv struct such as ->sv_temp_socks and ->sv_permsocks. * * Finally, the nfsd_mutex also protects some of the global variables that are * accessed when nfsd starts and that are settable via the write_* routines in * nfsctl.c. In particular: * * user_recovery_dirname * user_lease_time * nfsd_versions
*/
DEFINE_MUTEX(nfsd_mutex);
/** * nfsd_copy_write_verifier - Atomically copy a write verifier * @verf: buffer in which to receive the verifier cookie * @nn: NFS net namespace * * This function provides a wait-free mechanism for copying the * namespace's write verifier without tearing it.
*/ void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn)
{ unsignedint seq;
do {
seq = read_seqbegin(&nn->writeverf_lock);
memcpy(verf, nn->writeverf, sizeof(nn->writeverf));
} while (read_seqretry(&nn->writeverf_lock, seq));
}
/* * Because the time value is hashed, y2038 time_t overflow * is irrelevant in this usage.
*/
ktime_get_raw_ts64(&now);
verf = siphash_2u64(now.tv_sec, now.tv_nsec, &nn->siphash_key);
memcpy(nn->writeverf, &verf, sizeof(nn->writeverf));
}
/** * nfsd_reset_write_verifier - Generate a new write verifier * @nn: NFS net namespace * * This function updates the ->writeverf field of @nn. This field * contains an opaque cookie that, according to Section 18.32.3 of * RFC 8881, "the client can use to determine whether a server has * changed instance state (e.g., server restart) between a call to * WRITE and a subsequent call to either WRITE or COMMIT. This * cookie MUST be unchanged during a single instance of the NFSv4.1 * server and MUST be unique between instances of the NFSv4.1 * server."
*/ void nfsd_reset_write_verifier(struct nfsd_net *nn)
{
write_seqlock(&nn->writeverf_lock);
nfsd_reset_write_verifier_locked(nn);
write_sequnlock(&nn->writeverf_lock);
}
/* * Crank up a set of per-namespace resources for a new NFSD instance, * including lockd, a duplicate reply cache, an open file cache * instance, and a cache of NFSv4 state objects.
*/ staticint nfsd_startup_net(struct net *net, conststruct cred *cred)
{ struct nfsd_net *nn = net_generic(net, nfsd_net_id); int ret;
if (nn->nfsd_net_up) return 0;
ret = nfsd_startup_generic(); if (ret) return ret;
ret = nfsd_init_socks(net, cred); if (ret) goto out_socks;
if (nfsd_needs_lockd(nn) && !nn->lockd_up) {
ret = lockd_up(net, cred); if (ret) goto out_socks;
nn->lockd_up = true;
}
ret = nfsd_file_cache_start_net(net); if (ret) goto out_lockd;
ret = nfsd_reply_cache_init(nn); if (ret) goto out_filecache;
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
nfsd4_ssc_init_umount_work(nn); #endif
ret = nfs4_state_start_net(net); if (ret) goto out_reply_cache;
/* Only used under nfsd_mutex, so this atomic may be overkill: */ static atomic_t nfsd_notifier_refcount = ATOMIC_INIT(0);
/** * nfsd_destroy_serv - tear down NFSD's svc_serv for a namespace * @net: network namespace the NFS service is associated with
*/ void nfsd_destroy_serv(struct net *net)
{ struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct svc_serv *serv = nn->nfsd_serv;
/* check if the notifier still has clients */ if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
unregister_inetaddr_notifier(&nfsd_inetaddr_notifier); #if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&nfsd_inet6addr_notifier); #endif
}
svc_xprt_destroy_all(serv, net);
/* * write_ports can create the server without actually starting * any threads--if we get shut down before any threads are * started, then nfsd_destroy_serv will be run before any of this * other initialization has been done except the rpcb information.
*/
svc_rpcb_cleanup(serv, net);
nfsd_shutdown_net(net);
svc_destroy(&serv);
}
void nfsd_reset_versions(struct nfsd_net *nn)
{ int i;
for (i = 0; i <= NFSD_MAXVERS; i++) if (nfsd_vers(nn, i, NFSD_TEST)) return;
for (i = 0; i <= NFSD_MAXVERS; i++) if (i != 4)
nfsd_vers(nn, i, NFSD_SET); else { int minor = 0; while (nfsd_minorversion(nn, minor, NFSD_SET) >= 0)
minor++;
}
}
si_meminfo(&i);
target = (i.totalram - i.totalhigh) << PAGE_SHIFT; /* * Aim for 1/4096 of memory per thread This gives 1MB on 4Gig * machines, but only uses 32K on 128M machines. Bottom out at * 8K on 32M and smaller. Of course, this is only a default.
*/
target >>= 12;
ret = NFSSVC_DEFBLKSIZE; while (ret > target && ret >= 8*1024*2)
ret /= 2; return ret;
}
/* check if the notifier is already set */ if (atomic_inc_return(&nfsd_notifier_refcount) == 1) {
register_inetaddr_notifier(&nfsd_inetaddr_notifier); #if IS_ENABLED(CONFIG_IPV6)
register_inet6addr_notifier(&nfsd_inet6addr_notifier); #endif
}
nfsd_reset_write_verifier(nn); return 0;
}
int nfsd_nrpools(struct net *net)
{ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
if (nn->nfsd_serv == NULL) return 0; else return nn->nfsd_serv->sv_nrpools;
}
int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
{ struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct svc_serv *serv = nn->nfsd_serv; int i;
if (serv) for (i = 0; i < serv->sv_nrpools && i < n; i++)
nthreads[i] = serv->sv_pools[i].sp_nrthreads; return 0;
}
/** * nfsd_set_nrthreads - set the number of running threads in the net's service * @n: number of array members in @nthreads * @nthreads: array of thread counts for each pool * @net: network namespace to operate within * * This function alters the number of running threads for the given network * namespace in each pool. If passed an array longer then the number of pools * the extra pool settings are ignored. If passed an array shorter than the * number of pools, the missing values are interpreted as 0's. * * Returns 0 on success or a negative errno on error.
*/ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
{ int i = 0; int tot = 0; int err = 0; struct nfsd_net *nn = net_generic(net, nfsd_net_id);
lockdep_assert_held(&nfsd_mutex);
if (nn->nfsd_serv == NULL || n <= 0) return 0;
/* * Special case: When n == 1, pass in NULL for the pool, so that the * change is distributed equally among them.
*/ if (n == 1) return svc_set_num_threads(nn->nfsd_serv, NULL, nthreads[0]);
if (n > nn->nfsd_serv->sv_nrpools)
n = nn->nfsd_serv->sv_nrpools;
/* enforce a global maximum number of threads */
tot = 0; for (i = 0; i < n; i++) {
nthreads[i] = min(nthreads[i], NFSD_MAXSERVS);
tot += nthreads[i];
} if (tot > NFSD_MAXSERVS) { /* total too large: scale down requested numbers */ for (i = 0; i < n && tot > 0; i++) { intnew = nthreads[i] * NFSD_MAXSERVS / tot;
tot -= (nthreads[i] - new);
nthreads[i] = new;
} for (i = 0; i < n && tot > 0; i++) {
nthreads[i]--;
tot--;
}
}
/* apply the new numbers */ for (i = 0; i < n; i++) {
err = svc_set_num_threads(nn->nfsd_serv,
&nn->nfsd_serv->sv_pools[i],
nthreads[i]); if (err) goto out;
}
/* Anything undefined in array is considered to be 0 */ for (i = n; i < nn->nfsd_serv->sv_nrpools; ++i) {
err = svc_set_num_threads(nn->nfsd_serv,
&nn->nfsd_serv->sv_pools[i],
0); if (err) goto out;
}
out: return err;
}
/** * nfsd_svc: start up or shut down the nfsd server * @n: number of array members in @nthreads * @nthreads: array of thread counts for each pool * @net: network namespace to operate within * @cred: credentials to use for xprt creation * @scope: server scope value (defaults to nodename) * * Adjust the number of threads in each pool and return the new * total number of threads in the service.
*/ int
nfsd_svc(int n, int *nthreads, struct net *net, conststruct cred *cred, constchar *scope)
{ int error; struct nfsd_net *nn = net_generic(net, nfsd_net_id); struct svc_serv *serv;
if (likely(nfsd_vers(nn, rqstp->rq_vers, NFSD_TEST))) return svc_generic_init_request(rqstp, progp, ret);
ret->mismatch.lovers = NFSD_MAXVERS + 1; for (i = NFSD_MINVERS; i <= NFSD_MAXVERS; i++) { if (nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.lovers = i; break;
}
} if (ret->mismatch.lovers > NFSD_MAXVERS) return rpc_prog_unavail;
ret->mismatch.hivers = NFSD_MINVERS; for (i = NFSD_MAXVERS; i >= NFSD_MINVERS; i--) { if (nfsd_vers(nn, i, NFSD_TEST)) {
ret->mismatch.hivers = i; break;
}
} return rpc_prog_mismatch;
}
/* * This is the NFS server kernel thread
*/ staticint
nfsd(void *vrqstp)
{ struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp; struct svc_xprt *perm_sock = list_entry(rqstp->rq_server->sv_permsocks.next, typeof(struct svc_xprt), xpt_list); struct net *net = perm_sock->xpt_net; struct nfsd_net *nn = net_generic(net, nfsd_net_id);
/* At this point, the thread shares current->fs * with the init process. We need to create files with the * umask as defined by the client instead of init's umask.
*/
svc_thread_init_status(rqstp, unshare_fs_struct());
current->fs->umask = 0;
atomic_inc(&nfsd_th_cnt);
set_freezable();
/* * The main request loop
*/ while (!svc_thread_should_stop(rqstp)) {
svc_recv(rqstp);
nfsd_file_net_dispose(nn);
}
atomic_dec(&nfsd_th_cnt);
/* Release the thread */
svc_exit_thread(rqstp); return 0;
}
/** * nfsd_dispatch - Process an NFS or NFSACL or LOCALIO Request * @rqstp: incoming request * * This RPC dispatcher integrates the NFS server's duplicate reply cache. * * Return values: * %0: Processing complete; do not send a Reply * %1: Processing complete; send Reply in rqstp->rq_res
*/ int nfsd_dispatch(struct svc_rqst *rqstp)
{ conststruct svc_procedure *proc = rqstp->rq_procinfo;
__be32 *statp = rqstp->rq_accept_statp; struct nfsd_cacherep *rp; unsignedint start, len;
__be32 *nfs_reply;
/* * Give the xdr decoder a chance to change this if it wants * (necessary in the NFSv4.0 compound case)
*/
rqstp->rq_cachetype = proc->pc_cachetype;
/* * ->pc_decode advances the argument stream past the NFS * Call header, so grab the header's starting location and * size now for the call to nfsd_cache_lookup().
*/
start = xdr_stream_pos(&rqstp->rq_arg_stream);
len = xdr_stream_remaining(&rqstp->rq_arg_stream); if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream)) goto out_decode_err;
/* * Release rq_status_counter setting it to an odd value after the rpc * request has been properly parsed. rq_status_counter is used to * notify the consumers if the rqstp fields are stable * (rq_status_counter is odd) or not meaningful (rq_status_counter * is even).
*/
smp_store_release(&rqstp->rq_status_counter, rqstp->rq_status_counter | 1);
rp = NULL; switch (nfsd_cache_lookup(rqstp, start, len, &rp)) { case RC_DOIT: break; case RC_REPLY: goto out_cached_reply; case RC_DROPIT: goto out_dropit;
}
if (!proc->pc_encode(rqstp, &rqstp->rq_res_stream)) goto out_encode_err;
/* * Release rq_status_counter setting it to an even value after the rpc * request has been properly processed.
*/
smp_store_release(&rqstp->rq_status_counter, rqstp->rq_status_counter + 1);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.