status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
lock, FMODE_WRITE); if (status) { if (status == -EAGAIN)
status = -NFS4ERR_BAD_STATEID; return status;
}
staticint process_copy_commit(struct file *dst, loff_t pos_dst, struct nfs42_copy_res *res)
{ struct nfs_commitres cres; int status = -ENOMEM;
cres.verf = kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL); if (!cres.verf) goto out;
status = nfs4_proc_commit(dst, pos_dst, res->write_res.count, &cres); if (status) goto out_free; if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
&cres.verf->verifier)) {
dprintk("commit verf differs from copy verf\n");
status = -EAGAIN;
}
out_free:
kfree(cres.verf);
out: return status;
}
/** * nfs42_copy_dest_done - perform inode cache updates after clone/copy offload * @file: pointer to destination file * @pos: destination offset * @len: copy length * @oldsize: length of the file prior to clone/copy * * Punch a hole in the inode page cache, so that the NFS client will * know to retrieve new data. * Update the file size if necessary, and then mark the inode as having * invalid cached values for change attribute, ctime, mtime and space used.
*/ staticvoid nfs42_copy_dest_done(struct file *file, loff_t pos, loff_t len,
loff_t oldsize)
{ struct inode *inode = file_inode(file); struct address_space *mapping = file->f_mapping;
loff_t newsize = pos + len;
loff_t end = newsize - 1;
nfs_truncate_last_folio(mapping, oldsize, pos);
WARN_ON_ONCE(invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,
end >> PAGE_SHIFT));
if (nss) {
args->cp_src = nss;
nfs4_stateid_copy(&args->src_stateid, cnr_stateid);
} else {
status = nfs4_set_rw_stateid(&args->src_stateid,
src_lock->open_context, src_lock, FMODE_READ); if (status) { if (status == -EAGAIN)
status = -NFS4ERR_BAD_STATEID; return status;
}
}
status = nfs_filemap_write_and_wait_range(src->f_mapping,
pos_src, pos_src + (loff_t)count - 1); if (status) return status;
status = nfs4_set_rw_stateid(&args->dst_stateid, dst_lock->open_context,
dst_lock, FMODE_WRITE); if (status) { if (status == -EAGAIN)
status = -NFS4ERR_BAD_STATEID; return status;
}
nfs_file_block_o_direct(NFS_I(dst_inode));
status = nfs_sync_inode(dst_inode); if (status) return status;
res->commit_res.verf = NULL; if (args->sync) {
res->commit_res.verf =
kzalloc(sizeof(struct nfs_writeverf), GFP_KERNEL); if (!res->commit_res.verf) return -ENOMEM;
}
set_bit(NFS_CLNT_SRC_SSC_COPY_STATE,
&src_lock->open_context->state->flags);
set_bit(NFS_CLNT_DST_SSC_COPY_STATE,
&dst_lock->open_context->state->flags);
status = nfs4_call_sync(dst_server->client, dst_server, &msg,
&args->seq_args, &res->seq_res, 0);
trace_nfs4_copy(src_inode, dst_inode, args, res, nss, status); if (status == -ENOTSUPP)
dst_server->caps &= ~NFS_CAP_COPY; if (status) goto out;
if (args->sync &&
nfs_write_verifier_cmp(&res->write_res.verifier.verifier,
&res->commit_res.verf->verifier)) {
status = -EAGAIN; goto out;
}
if (!res->synchronous) {
status = handle_async_copy(res, dst_server, src_server, src,
dst, &args->src_stateid, restart); if (status) goto out;
}
if ((!res->synchronous || !args->sync) &&
res->write_res.verifier.committed != NFS_FILE_SYNC) {
status = process_copy_commit(dst, pos_dst, res); if (status) goto out;
}
nfs42_copy_dest_done(dst, pos_dst, res->write_res.count, oldsize_dst);
nfs_invalidate_atime(src_inode);
status = res->write_res.count;
out: if (args->sync)
kfree(res->commit_res.verf); return status;
}
status = nfs4_call_sync(server->client, server, &msg,
&data->args.osa_seq_args,
&data->res.osr_seq_res, 1);
trace_nfs4_offload_status(&data->args, status); switch (status) { case 0: break;
case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OLD_STATEID: /* * Server does not recognize the COPY stateid. CB_OFFLOAD * could have purged it, or server might have rebooted. * Since COPY stateids don't have an associated inode, * avoid triggering state recovery.
*/
status = -EBADF; break; case -NFS4ERR_NOTSUPP: case -ENOTSUPP: case -EOPNOTSUPP:
server->caps &= ~NFS_CAP_OFFLOAD_STATUS;
status = -EOPNOTSUPP; break;
}
return status;
}
/** * nfs42_proc_offload_status - Poll completion status of an async copy operation * @dst: handle of file being copied into * @stateid: copy stateid (from async COPY result) * @copied: OUT: number of bytes copied so far * * Return values: * %0: Server returned an NFS4_OK completion status * %-EINPROGRESS: Server returned no completion status * %-EREMOTEIO: Server returned an error completion status * %-EBADF: Server did not recognize the copy stateid * %-EOPNOTSUPP: Server does not support OFFLOAD_STATUS * %-ERESTARTSYS: Wait interrupted by signal * * Other negative errnos indicate the client could not complete the * request.
*/ staticint
nfs42_proc_offload_status(struct file *dst, nfs4_stateid *stateid, u64 *copied)
{ struct inode *inode = file_inode(dst); struct nfs_server *server = NFS_SERVER(inode); struct nfs4_exception exception = {
.inode = inode,
}; struct nfs42_offload_data *data; int status;
if (!(server->caps & NFS_CAP_OFFLOAD_STATUS)) return -EOPNOTSUPP;
data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM;
data->seq_server = server;
data->args.osa_src_fh = NFS_FH(inode);
memcpy(&data->args.osa_stateid, stateid, sizeof(data->args.osa_stateid));
exception.stateid = &data->args.osa_stateid; do {
status = _nfs42_proc_offload_status(server, dst, data); if (status == -EOPNOTSUPP) goto out;
status = nfs4_handle_exception(server, status, &exception);
} while (exception.retry); if (status) goto out;
*copied = data->res.osr_count; if (!data->res.complete_count)
status = -EINPROGRESS; elseif (data->res.osr_complete != NFS_OK)
status = -EREMOTEIO;
ctx = get_nfs_open_context(nfs_file_open_context(src));
l_ctx = nfs_get_lock_context(ctx); if (IS_ERR(l_ctx)) {
status = PTR_ERR(l_ctx); goto out;
}
status = nfs4_set_rw_stateid(&args->cna_src_stateid, ctx, l_ctx,
FMODE_READ);
nfs_put_lock_context(l_ctx); if (status) { if (status == -EAGAIN)
status = -NFS4ERR_BAD_STATEID; goto out;
}
status = nfs4_call_sync(src_server->client, src_server, &msg,
&args->cna_seq_args, &res->cnr_seq_res, 0);
trace_nfs4_copy_notify(file_inode(src), args, res, status); if (status == -ENOTSUPP)
src_server->caps &= ~NFS_CAP_COPY_NOTIFY;
do {
status = _nfs42_proc_copy_notify(src, dst, args, res); if (status == -ENOTSUPP) {
status = -EOPNOTSUPP; goto out;
}
status = nfs4_handle_exception(src_server, status, &exception);
} while (exception.retry);
if (!nfs_server_capable(inode, NFS_CAP_SEEK)) return -ENOTSUPP;
status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
lock, FMODE_READ); if (status) { if (status == -EAGAIN)
status = -NFS4ERR_BAD_STATEID; return status;
}
status = nfs_filemap_write_and_wait_range(inode->i_mapping,
offset, LLONG_MAX); if (status) return status;
status = nfs4_call_sync(server->client, server, &msg,
&args.seq_args, &res.seq_res, 0);
trace_nfs4_llseek(inode, &args, &res, status); if (status == -ENOTSUPP)
server->caps &= ~NFS_CAP_SEEK; if (status) return status;
if (!nfs4_sequence_done(task, &data->res.seq_res)) return;
switch (task->tk_status) { case 0: return; case -NFS4ERR_BADHANDLE: case -ESTALE:
pnfs_destroy_layout(NFS_I(inode)); break; case -NFS4ERR_EXPIRED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_BAD_STATEID:
spin_lock(&inode->i_lock);
lo = NFS_I(inode)->layout; if (pnfs_layout_is_valid(lo) &&
nfs4_stateid_match(&data->args.stateid,
&lo->plh_stateid)) {
LIST_HEAD(head);
/* * Mark the bad layout state as invalid, then retry * with the current stateid.
*/
pnfs_mark_layout_stateid_invalid(lo, &head);
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
nfs_commit_inode(inode, 0);
} else
spin_unlock(&inode->i_lock); break; case -NFS4ERR_OLD_STATEID:
spin_lock(&inode->i_lock);
lo = NFS_I(inode)->layout; if (pnfs_layout_is_valid(lo) &&
nfs4_stateid_match_other(&data->args.stateid,
&lo->plh_stateid)) { /* Do we need to delay before resending? */ if (!nfs4_stateid_is_newer(&lo->plh_stateid,
&data->args.stateid))
rpc_delay(task, HZ);
rpc_restart_call_prepare(task);
}
spin_unlock(&inode->i_lock); break; case -ENOTSUPP: case -EOPNOTSUPP:
NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS;
}
for (i = 0; i < data->args.num_dev; i++) { if (devinfo[i].ld_private.ops && devinfo[i].ld_private.ops->free)
devinfo[i].ld_private.ops->free(&devinfo[i].ld_private);
}
if (!nfs4_sequence_done(task, &data->res.seq_res)) return;
switch (task->tk_status) { case 0: return; case -NFS4ERR_BADHANDLE: case -ESTALE:
pnfs_destroy_layout(NFS_I(inode)); break; case -NFS4ERR_EXPIRED: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_DELEG_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_BAD_STATEID:
spin_lock(&inode->i_lock); if (pnfs_layout_is_valid(lo) &&
nfs4_stateid_match(&data->args.errors[0].stateid,
&lo->plh_stateid)) {
LIST_HEAD(head);
/* * Mark the bad layout state as invalid, then retry * with the current stateid.
*/
pnfs_mark_layout_stateid_invalid(lo, &head);
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head);
nfs_commit_inode(inode, 0);
} else
spin_unlock(&inode->i_lock); break; case -NFS4ERR_OLD_STATEID:
spin_lock(&inode->i_lock); if (pnfs_layout_is_valid(lo) &&
nfs4_stateid_match_other(&data->args.errors[0].stateid,
&lo->plh_stateid)) { /* Do we need to delay before resending? */ if (!nfs4_stateid_is_newer(&lo->plh_stateid,
&data->args.errors[0].stateid))
rpc_delay(task, HZ);
rpc_restart_call_prepare(task);
}
spin_unlock(&inode->i_lock); break; case -ENOTSUPP: case -EOPNOTSUPP:
NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR;
}
status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
src_lock, FMODE_READ); if (status) { if (status == -EAGAIN)
status = -NFS4ERR_BAD_STATEID; return status;
}
status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
dst_lock, FMODE_WRITE); if (status) { if (status == -EAGAIN)
status = -NFS4ERR_BAD_STATEID; return status;
}
res.dst_fattr = nfs_alloc_fattr(); if (!res.dst_fattr) return -ENOMEM;
ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args,
&res.seq_res, 0);
trace_nfs4_getxattr(inode, name, ret); if (ret < 0) return ret;
/* * Normally, the caching is done one layer up, but for successful * RPCS, always cache the result here, even if the caller was * just querying the length, or if the reply was too big for * the caller. This avoids a second RPC in the case of the * common query-alloc-retrieve cycle for xattrs. * * Note that xattr_len is always capped to XATTR_SIZE_MAX.
*/
for (i = 0; i < np; i++) {
pages[i] = alloc_page(GFP_KERNEL); if (!pages[i]) {
err = -ENOMEM; goto out;
}
}
/* * The GETXATTR op has no length field in the call, and the * xattr data is at the end of the reply. * * There is no downside in using the page-aligned length. It will * allow receiving and caching xattrs that are too large for the * caller but still fit in the page-rounded value.
*/ do {
err = _nfs42_proc_getxattr(inode, name, buf, buflen,
pages, np * PAGE_SIZE); if (err >= 0) break;
err = nfs4_handle_exception(NFS_SERVER(inode), err,
&exception);
} while (exception.retry);
out: while (--i >= 0)
__free_page(pages[i]);
kfree(pages);
return err;
}
int nfs42_proc_setxattr(struct inode *inode, constchar *name, constvoid *buf, size_t buflen, int flags)
{ struct nfs4_exception exception = { }; int err;
do {
err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags); if (!err) break;
err = nfs4_handle_exception(NFS_SERVER(inode), err,
&exception);
} while (exception.retry);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.