/** * nfs_folio_find_head_request - find head request associated with a folio * @folio: pointer to folio * * must be called while holding the inode lock. * * returns matching head request with reference held, or NULL if not found.
*/ staticstruct nfs_page *nfs_folio_find_head_request(struct folio *folio)
{ struct address_space *mapping = folio->mapping; struct nfs_page *req;
if (!folio_test_private(folio)) return NULL;
spin_lock(&mapping->i_private_lock);
req = folio->private; if (req) {
WARN_ON_ONCE(req->wb_head != req);
kref_get(&req->wb_kref);
}
spin_unlock(&mapping->i_private_lock); return req;
}
/* Adjust the file length if we're writing beyond the end */ staticvoid nfs_grow_file(struct folio *folio, unsignedint offset, unsignedint count)
{ struct inode *inode = folio->mapping->host;
loff_t end, i_size;
pgoff_t end_index;
spin_lock(&inode->i_lock);
i_size = i_size_read(inode);
end_index = ((i_size - 1) >> folio_shift(folio)) << folio_order(folio); if (i_size > 0 && folio->index < end_index) goto out;
end = folio_pos(folio) + (loff_t)offset + (loff_t)count; if (i_size >= end) goto out;
trace_nfs_size_grow(inode, end);
i_size_write(inode, end);
NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
out: /* Atomically update timestamps if they are delegated to us. */
nfs_update_delegated_mtime_locked(inode);
spin_unlock(&inode->i_lock);
nfs_fscache_invalidate(inode, 0);
}
/* A writeback failed: mark the page as bad, and invalidate the page cache */ staticvoid nfs_set_pageerror(struct address_space *mapping)
{ struct inode *inode = mapping->host;
/* * nfs_page_covers_folio * @req: struct nfs_page * * Return true if the request covers the whole folio. * Note that the caller should ensure all subrequests have been joined
*/ staticbool nfs_page_group_covers_page(struct nfs_page *req)
{ unsignedint len = nfs_folio_length(nfs_page_to_folio(req));
/* We can set the PG_uptodate flag if we see that a write request * covers the full page.
*/ staticvoid nfs_mark_uptodate(struct nfs_page *req)
{ struct folio *folio = nfs_page_to_folio(req);
if (folio_test_uptodate(folio)) return; if (!nfs_page_group_covers_page(req)) return;
folio_mark_uptodate(folio);
}
staticint wb_priority(struct writeback_control *wbc)
{ int ret = 0;
if (wbc->sync_mode == WB_SYNC_ALL)
ret = FLUSH_COND_STABLE; return ret;
}
/* * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests * * @destroy_list - request list (using wb_this_page) terminated by @old_head * @old_head - the old head of the list * * All subrequests must be locked and removed from all lists, so at this point * they are only "active" in this function, and possibly in nfs_wait_on_request * with a reference held by some other context.
*/ staticvoid
nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, struct nfs_page *old_head, struct inode *inode)
{ while (destroy_list) { struct nfs_page *subreq = destroy_list;
/* Note: lock subreq in order to change subreq->wb_head */
nfs_page_set_headlock(subreq);
WARN_ON_ONCE(old_head != subreq->wb_head);
/* make sure old group is not used */
subreq->wb_this_page = subreq;
subreq->wb_head = subreq;
clear_bit(PG_REMOVE, &subreq->wb_flags);
/* Note: races with nfs_page_group_destroy() */ if (!kref_read(&subreq->wb_kref)) { /* Check if we raced with nfs_page_group_destroy() */ if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) {
nfs_page_clear_headlock(subreq);
nfs_free_request(subreq);
} else
nfs_page_clear_headlock(subreq); continue;
}
nfs_page_clear_headlock(subreq);
nfs_release_request(old_head);
if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
nfs_release_request(subreq);
atomic_long_dec(&NFS_I(inode)->nrequests);
}
/* subreq is now totally disconnected from page group or any
* write / commit lists. last chance to wake any waiters */
nfs_unlock_and_release_request(subreq);
}
}
/* * nfs_join_page_group - destroy subrequests of the head req * @head: the page used to lookup the "page group" of nfs_page structures * @inode: Inode to which the request belongs. * * This function joins all sub requests to the head request by first * locking all requests in the group, cancelling any pending operations * and finally updating the head request to cover the whole range covered by * the (former) group. All subrequests are removed from any write or commit * lists, unlinked from the group and destroyed.
*/ void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, struct inode *inode)
{ struct nfs_page *subreq; struct nfs_page *destroy_list = NULL; unsignedint pgbase, off, bytes;
pgbase = head->wb_pgbase;
bytes = head->wb_bytes;
off = head->wb_offset; for (subreq = head->wb_this_page; subreq != head;
subreq = subreq->wb_this_page) { /* Subrequests should always form a contiguous range */ if (pgbase > subreq->wb_pgbase) {
off -= pgbase - subreq->wb_pgbase;
bytes += pgbase - subreq->wb_pgbase;
pgbase = subreq->wb_pgbase;
}
bytes = max(subreq->wb_pgbase + subreq->wb_bytes
- pgbase, bytes);
}
/* Set the head request's range to cover the former page group */
head->wb_pgbase = pgbase;
head->wb_bytes = bytes;
head->wb_offset = off;
/* Now that all requests are locked, make sure they aren't on any list.
* Commit list removal accounting is done after locks are dropped */
subreq = head; do {
nfs_clear_request_commit(cinfo, subreq);
subreq = subreq->wb_this_page;
} while (subreq != head);
/* unlink subrequests from head, destroy them later */ if (head->wb_this_page != head) { /* destroy list will be terminated by head */
destroy_list = head->wb_this_page;
head->wb_this_page = head;
}
/** * nfs_wait_on_request - Wait for a request to complete. * @req: request to wait upon. * * Interruptible by fatal signals only. * The user is responsible for holding a count on the request.
*/ staticint nfs_wait_on_request(struct nfs_page *req)
{ if (!test_bit(PG_BUSY, &req->wb_flags)) return 0;
set_bit(PG_CONTENDED2, &req->wb_flags);
smp_mb__after_atomic(); return wait_on_bit_io(&req->wb_flags, PG_BUSY,
TASK_UNINTERRUPTIBLE);
}
/* * nfs_unroll_locks - unlock all newly locked reqs and wait on @req * @head: head request of page group, must be holding head lock * @req: request that couldn't lock and needs to wait on the req bit lock * * This is a helper function for nfs_lock_and_join_requests * returns 0 on success, < 0 on error.
*/ staticvoid
nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
{ struct nfs_page *tmp;
/* relinquish all the locks successfully grabbed this run */ for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { if (!kref_read(&tmp->wb_kref)) continue;
nfs_unlock_and_release_request(tmp);
}
}
/* * nfs_page_group_lock_subreq - try to lock a subrequest * @head: head request of page group * @subreq: request to lock * * This is a helper function for nfs_lock_and_join_requests which * must be called with the head request and page group both locked. * On error, it returns with the page group unlocked.
*/ staticint
nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
{ int ret;
if (!kref_get_unless_zero(&subreq->wb_kref)) return 0; while (!nfs_lock_request(subreq)) {
nfs_page_group_unlock(head);
ret = nfs_wait_on_request(subreq); if (!ret)
ret = nfs_page_group_lock(head); if (ret < 0) {
nfs_unroll_locks(head, subreq);
nfs_release_request(subreq); return ret;
}
} return 0;
}
/* * nfs_lock_and_join_requests - join all subreqs to the head req * @folio: the folio used to lookup the "page group" of nfs_page structures * * This function joins all sub requests to the head request by first * locking all requests in the group, cancelling any pending operations * and finally updating the head request to cover the whole range covered by * the (former) group. All subrequests are removed from any write or commit * lists, unlinked from the group and destroyed. * * Returns a locked, referenced pointer to the head request - which after * this call is guaranteed to be the only request associated with the page. * Returns NULL if no requests are found for @folio, or a ERR_PTR if an * error was encountered.
*/ staticstruct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
{ struct inode *inode = folio->mapping->host; struct nfs_page *head, *subreq; struct nfs_commit_info cinfo; int ret;
/* * A reference is taken only on the head request which acts as a * reference to the whole page group - the group will not be destroyed * until the head reference is released.
*/
retry:
head = nfs_folio_find_head_request(folio); if (!head) return NULL;
while (!nfs_lock_request(head)) {
ret = nfs_wait_on_request(head); if (ret < 0) {
nfs_release_request(head); return ERR_PTR(ret);
}
}
ret = nfs_page_group_lock(head); if (ret < 0) goto out_unlock;
/* Ensure that nobody removed the request before we locked it */ if (head != folio->private) {
nfs_page_group_unlock(head);
nfs_unlock_and_release_request(head); goto retry;
}
nfs_cancel_remove_inode(head, inode);
/* lock each request in the page group */ for (subreq = head->wb_this_page;
subreq != head;
subreq = subreq->wb_this_page) {
ret = nfs_page_group_lock_subreq(head, subreq); if (ret < 0) goto out_unlock;
}
/* * Find an associated nfs write request, and prepare to flush it out * May return an error if the user signalled nfs_wait_on_request().
*/ staticint nfs_do_writepage(struct folio *folio, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
{ struct nfs_page *req; int ret;
nfs_pageio_cond_complete(pgio, folio->index);
req = nfs_lock_and_join_requests(folio); if (!req) return 0; if (IS_ERR(req)) return PTR_ERR(req);
/* If there is a fatal error that covers this write, just exit */
ret = pgio->pg_error; if (nfs_error_is_fatal_on_server(ret)) goto out_launder;
if (!nfs_pageio_add_request(pgio, req)) {
ret = pgio->pg_error; /* * Remove the problematic req upon fatal errors on the server
*/ if (nfs_error_is_fatal_on_server(ret)) goto out_launder;
folio_redirty_for_writepage(wbc, folio);
nfs_redirty_request(req);
pgio->pg_error = 0; return ret;
}
/* * Insert a write request into an inode
*/ staticvoid nfs_inode_add_request(struct nfs_page *req)
{ struct folio *folio = nfs_page_to_folio(req); struct address_space *mapping = folio->mapping; struct nfs_inode *nfsi = NFS_I(mapping->host);
WARN_ON_ONCE(req->wb_this_page != req);
/* Lock the request! */
nfs_lock_request(req);
spin_lock(&mapping->i_private_lock);
set_bit(PG_MAPPED, &req->wb_flags);
folio_set_private(folio);
folio->private = req;
spin_unlock(&mapping->i_private_lock);
atomic_long_inc(&nfsi->nrequests); /* this a head request for a page group - mark it as having an * extra reference so sub groups can follow suit. * This flag also informs pgio layer when to bump nrequests when
* adding subrequests. */
WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
kref_get(&req->wb_kref);
}
/* * Remove a write request from an inode
*/ staticvoid nfs_inode_remove_request(struct nfs_page *req)
{ struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
/** * nfs_request_add_commit_list_locked - add request to a commit list * @req: pointer to a struct nfs_page * @dst: commit list head * @cinfo: holds list lock and accounting info * * This sets the PG_CLEAN bit, updates the cinfo count of * number of outstanding requests requiring a commit as well as * the MM page stats. * * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the * nfs_page lock.
*/ void
nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, struct nfs_commit_info *cinfo)
{
set_bit(PG_CLEAN, &req->wb_flags);
nfs_list_add_request(req, dst);
atomic_long_inc(&cinfo->mds->ncommit);
}
EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
/** * nfs_request_add_commit_list - add request to a commit list * @req: pointer to a struct nfs_page * @cinfo: holds list lock and accounting info * * This sets the PG_CLEAN bit, updates the cinfo count of * number of outstanding requests requiring a commit as well as * the MM page stats. * * The caller must _not_ hold the cinfo->lock, but must be * holding the nfs_page lock.
*/ void
nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
{
mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo);
}
EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
/** * nfs_request_remove_commit_list - Remove request from a commit list * @req: pointer to a nfs_page * @cinfo: holds list lock and accounting info * * This clears the PG_CLEAN bit, and updates the cinfo's count of * number of outstanding requests requiring a commit * It does not update the MM page stats. * * The caller _must_ hold the cinfo->lock and the nfs_page lock.
*/ void
nfs_request_remove_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
{ if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) return;
nfs_list_remove_request(req);
atomic_long_dec(&cinfo->mds->ncommit);
}
EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
/* NFS_I(cinfo->inode)->commit_mutex held by caller */ int
nfs_scan_commit_list(struct list_head *src, struct list_head *dst, struct nfs_commit_info *cinfo, int max)
{ struct nfs_page *req, *tmp; int ret = 0;
/* * nfs_scan_commit - Scan an inode for commit requests * @inode: NFS inode to scan * @dst: mds destination list * @cinfo: mds and ds lists of reqs ready to commit * * Moves requests from the inode's 'commit' request list. * The requests are *not* checked to ensure that they form a contiguous set.
*/ int
nfs_scan_commit(struct inode *inode, struct list_head *dst, struct nfs_commit_info *cinfo)
{ int ret = 0;
if (!atomic_long_read(&cinfo->mds->ncommit)) return 0;
mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); if (atomic_long_read(&cinfo->mds->ncommit) > 0) { constint max = INT_MAX;
ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
cinfo, max);
ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
}
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); return ret;
}
/* * Search for an existing write request, and attempt to update * it to reflect a new dirty region on a given page. * * If the attempt fails, then the existing request is flushed out * to disk.
*/ staticstruct nfs_page *nfs_try_to_update_request(struct folio *folio, unsignedint offset, unsignedint bytes)
{ struct nfs_page *req; unsignedint rqend; unsignedint end; int error;
end = offset + bytes;
req = nfs_lock_and_join_requests(folio); if (IS_ERR_OR_NULL(req)) return req;
rqend = req->wb_offset + req->wb_bytes; /* * Tell the caller to flush out the request if * the offsets are non-contiguous. * Note: nfs_flush_incompatible() will already * have flushed out requests having wrong owners.
*/ if (offset > rqend || end < req->wb_offset) goto out_flushme;
/* Okay, the request matches. Update the region */ if (offset < req->wb_offset) {
req->wb_offset = offset;
req->wb_pgbase = offset;
} if (end > rqend)
req->wb_bytes = end - req->wb_offset; else
req->wb_bytes = rqend - req->wb_offset;
req->wb_nio = 0; return req;
out_flushme: /* * Note: we mark the request dirty here because * nfs_lock_and_join_requests() cannot preserve * commit flags, so we have to replay the write.
*/
nfs_mark_request_dirty(req);
nfs_unlock_and_release_request(req);
error = nfs_wb_folio(folio->mapping->host, folio); return (error < 0) ? ERR_PTR(error) : NULL;
}
/* * Try to update an existing write request, or create one if there is none. * * Note: Should always be called with the Page Lock held to prevent races * if we have to add a new request. Also assumes that the caller has * already called nfs_flush_incompatible() if necessary.
*/ staticstruct nfs_page *nfs_setup_write_request(struct nfs_open_context *ctx, struct folio *folio, unsignedint offset, unsignedint bytes)
{ struct nfs_page *req;
int nfs_flush_incompatible(struct file *file, struct folio *folio)
{ struct nfs_open_context *ctx = nfs_file_open_context(file); struct nfs_lock_context *l_ctx; struct file_lock_context *flctx = locks_inode_context(file_inode(file)); struct nfs_page *req; int do_flush, status; /* * Look for a request corresponding to this page. If there * is one, and it belongs to another file, we flush it out * before we try to copy anything into the page. Do this * due to the lack of an ACCESS-type call in NFSv2. * Also do the same if we find a request from an existing * dropped page.
*/ do {
req = nfs_folio_find_head_request(folio); if (req == NULL) return 0;
l_ctx = req->wb_lock_context;
do_flush = nfs_page_to_folio(req) != folio ||
!nfs_match_open_context(nfs_req_openctx(req), ctx); if (l_ctx && flctx &&
!(list_empty_careful(&flctx->flc_posix) &&
list_empty_careful(&flctx->flc_flock))) {
do_flush |= l_ctx->lockowner != current->files;
}
nfs_release_request(req); if (!do_flush) return 0;
status = nfs_wb_folio(folio->mapping->host, folio);
} while (status == 0); return status;
}
/* * Avoid buffered writes when a open context credential's key would * expire soon. * * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. * * Return 0 and set a credential flag which triggers the inode to flush * and performs NFS_FILE_SYNC writes if the key will expired within * RPC_KEY_EXPIRE_TIMEO.
*/ int
nfs_key_timeout_notify(struct file *filp, struct inode *inode)
{ struct nfs_open_context *ctx = nfs_file_open_context(filp);
new = auth->au_ops->lookup_cred(auth, &acred, 0); if (new == cred) {
put_rpccred(new); returntrue;
} if (IS_ERR_OR_NULL(new)) { new = NULL;
ret = true;
} elseif (new->cr_ops->crkey_timeout &&
new->cr_ops->crkey_timeout(new))
ret = true;
/* * If the page cache is marked as unsafe or invalid, then we can't rely on * the PageUptodate() flag. In this case, we will need to turn off * write optimisations that depend on the page contents being correct.
*/ staticbool nfs_folio_write_uptodate(struct folio *folio, unsignedint pagelen)
{ struct inode *inode = folio->mapping->host; struct nfs_inode *nfsi = NFS_I(inode);
if (nfs_have_delegated_attributes(inode)) goto out; if (nfsi->cache_validity &
(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE)) returnfalse;
smp_rmb(); if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0) returnfalse;
out: if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0) returnfalse; return folio_test_uptodate(folio) != 0;
}
/* If we know the page is up to date, and we're not using byte range locks (or * if we have the whole file locked for writing), it may be more efficient to * extend the write to cover the entire page in order to avoid fragmentation * inefficiencies. * * If the file is opened for synchronous writes then we can just skip the rest * of the checks.
*/ staticint nfs_can_extend_write(struct file *file, struct folio *folio, unsignedint pagelen)
{ struct inode *inode = file_inode(file); struct file_lock_context *flctx = locks_inode_context(inode); struct file_lock *fl; int ret; unsignedint mntflags = NFS_SERVER(inode)->flags;
if (mntflags & NFS_MOUNT_NO_ALIGNWRITE) return 0; if (file->f_flags & O_DSYNC) return 0; if (!nfs_folio_write_uptodate(folio, pagelen)) return 0; if (nfs_have_write_delegation(inode)) return 1; if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
list_empty_careful(&flctx->flc_posix))) return 1;
/* Check to see if there are whole file write locks */
ret = 0;
spin_lock(&flctx->flc_lock); if (!list_empty(&flctx->flc_posix)) {
fl = list_first_entry(&flctx->flc_posix, struct file_lock,
c.flc_list); if (is_whole_file_wrlock(fl))
ret = 1;
} elseif (!list_empty(&flctx->flc_flock)) {
fl = list_first_entry(&flctx->flc_flock, struct file_lock,
c.flc_list); if (lock_is_write(fl))
ret = 1;
}
spin_unlock(&flctx->flc_lock); return ret;
}
/* * Update and possibly write a cached page of an NFS file. * * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad * things with a page scheduled for an RPC call (e.g. invalidate it).
*/ int nfs_update_folio(struct file *file, struct folio *folio, unsignedint offset, unsignedint count)
{ struct nfs_open_context *ctx = nfs_file_open_context(file); struct address_space *mapping = folio->mapping; struct inode *inode = mapping->host; unsignedint pagelen = nfs_folio_length(folio); int status = 0;
/* If a nfs_flush_* function fails, it should remove reqs from @head and * call this on each, which will prepare them to be retried on next * writeback using standard nfs.
*/ staticvoid nfs_redirty_request(struct nfs_page *req)
{ struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
/* * This function is called when the WRITE call is complete.
*/ staticint nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_header *hdr, struct inode *inode)
{ int status;
/* * ->write_done will attempt to use post-op attributes to detect * conflicting writes by other clients. A strict interpretation * of close-to-open would allow us to continue caching even if * another writer had changed the file, but some applications * depend on tighter cache coherency when writing.
*/
status = NFS_PROTO(inode)->write_done(task, hdr); if (status != 0) return status;
if (task->tk_status >= 0) { enum nfs3_stable_how committed = hdr->res.verf->committed;
if (committed == NFS_UNSTABLE) { /* * We have some uncommitted data on the server at * this point, so ensure that we keep track of that * fact irrespective of what later writes do.
*/
set_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags);
}
if (committed < hdr->args.stable) { /* We tried a write call, but the server did not * commit data to stable storage even though we * requested it. * Note: There is a known bug in Tru64 < 5.0 in which * the server reports NFS_DATA_SYNC, but performs * NFS_FILE_SYNC. We therefore implement this checking * as a dprintk() in order to avoid filling syslog.
*/ staticunsignedlong complain;
/* Note this will print the MDS for a DS write */ if (time_before(complain, jiffies)) {
dprintk("NFS: faulty NFS server %s:" " (committed = %d) != (stable = %d)\n",
NFS_SERVER(inode)->nfs_client->cl_hostname,
committed, hdr->args.stable);
complain = jiffies + 300 * HZ;
}
}
}
/* Deal with the suid/sgid bit corner case */ if (nfs_should_remove_suid(inode)) {
spin_lock(&inode->i_lock);
nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE
| NFS_INO_REVAL_FORCED);
spin_unlock(&inode->i_lock);
} return 0;
}
/* * This function is called when the WRITE call is complete.
*/ staticvoid nfs_writeback_result(struct rpc_task *task, struct nfs_pgio_header *hdr)
{ struct nfs_pgio_args *argp = &hdr->args; struct nfs_pgio_res *resp = &hdr->res;
if (resp->count < argp->count) { staticunsignedlong complain;
/* This a short write! */
nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
/* Has the server at least made some progress? */ if (resp->count == 0) { if (time_before(complain, jiffies)) {
printk(KERN_WARNING "NFS: Server wrote zero bytes, expected %u.\n",
argp->count);
complain = jiffies + 300 * HZ;
}
nfs_set_pgio_error(hdr, -EIO, argp->offset);
task->tk_status = -EIO; return;
}
/* For non rpc-based layout drivers, retry-through-MDS */ if (!task->tk_ops) {
hdr->pnfs_error = -EAGAIN; return;
}
/* Was this an NFSv2 write or an NFSv3 stable write? */ if (resp->verf->committed != NFS_UNSTABLE) { /* Resend from where the server left off */
hdr->mds_offset += resp->count;
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
} else { /* Resend as a stable write in order to avoid * headaches in the case of a server crash.
*/
argp->stable = NFS_FILE_SYNC;
}
resp->count = 0;
resp->verf->committed = 0;
rpc_restart_call_prepare(task);
}
}
/* another commit raced with us */ if (list_empty(head)) return 0;
data = nfs_commitdata_alloc(); if (!data) {
nfs_retry_commit(head, NULL, cinfo, -1); return -ENOMEM;
}
/* Set up the argument struct */
nfs_init_commit(data, head, NULL, cinfo); if (NFS_SERVER(inode)->nfs_client->cl_minorversion)
task_flags = RPC_TASK_MOVEABLE;
/* Okay, COMMIT succeeded, apparently. Check the verifier
* returned by the server against all stored verfs. */ if (nfs_write_match_verf(verf, req)) { /* We have a match */ if (folio)
nfs_inode_remove_request(req);
dprintk_cont(" OK\n"); goto next;
} /* We have a mismatch. Write the page again */
dprintk_cont(" mismatch\n");
nfs_mark_request_dirty(req);
atomic_long_inc(&NFS_I(data->inode)->redirtied_pages);
next:
nfs_unlock_and_release_request(req); /* Latency breaker */
cond_resched();
}
int nfs_generic_commit_list(struct inode *inode, struct list_head *head, int how, struct nfs_commit_info *cinfo)
{ int status;
status = pnfs_commit_list(inode, head, how, cinfo); if (status == PNFS_NOT_ATTEMPTED)
status = nfs_commit_list(inode, head, how, cinfo); return status;
}
staticint __nfs_commit_inode(struct inode *inode, int how, struct writeback_control *wbc)
{
LIST_HEAD(head); struct nfs_commit_info cinfo; int may_wait = how & FLUSH_SYNC; int ret, nscan;
how &= ~FLUSH_SYNC;
nfs_init_cinfo_from_inode(&cinfo, inode);
nfs_commit_begin(cinfo.mds); for (;;) {
ret = nscan = nfs_scan_commit(inode, &head, &cinfo); if (ret <= 0) break;
ret = nfs_generic_commit_list(inode, &head, how, &cinfo); if (ret < 0) break;
ret = 0; if (wbc && wbc->sync_mode == WB_SYNC_NONE) { if (nscan < wbc->nr_to_write)
wbc->nr_to_write -= nscan; else
wbc->nr_to_write = 0;
} if (nscan < INT_MAX) break;
cond_resched();
}
nfs_commit_end(cinfo.mds); if (ret || !may_wait) return ret; return wait_on_commit(cinfo.mds);
}
int nfs_commit_inode(struct inode *inode, int how)
{ return __nfs_commit_inode(inode, how, NULL);
}
EXPORT_SYMBOL_GPL(nfs_commit_inode);
int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{ struct nfs_inode *nfsi = NFS_I(inode); int flags = FLUSH_SYNC; int ret = 0;
if (wbc->sync_mode == WB_SYNC_NONE) { /* no commits means nothing needs to be done */ if (!atomic_long_read(&nfsi->commit_info.ncommit)) goto check_requests_outstanding;
/* Don't commit yet if this is a non-blocking flush and there * are a lot of outstanding writes for this mapping.
*/ if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) goto out_mark_dirty;
/* don't wait for the COMMIT response */
flags = 0;
}
ret = __nfs_commit_inode(inode, flags, wbc); if (!ret) { if (flags & FLUSH_SYNC) return 0;
} elseif (atomic_long_read(&nfsi->commit_info.ncommit)) goto out_mark_dirty;
/* * Wrapper for filemap_write_and_wait_range() * * Needed for pNFS in order to ensure data becomes visible to the * client.
*/ int nfs_filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend)
{ int ret;
ret = filemap_write_and_wait_range(mapping, lstart, lend); if (ret == 0)
ret = pnfs_sync_inode(mapping->host, true); return ret;
}
EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range);
/* * flush the inode to disk.
*/ int nfs_wb_all(struct inode *inode)
{ int ret;
trace_nfs_writeback_inode_enter(inode);
ret = filemap_write_and_wait(inode->i_mapping); if (ret) goto out;
ret = nfs_commit_inode(inode, FLUSH_SYNC); if (ret < 0) goto out;
pnfs_sync_inode(inode, true);
ret = 0;
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
{ struct nfs_page *req; int ret = 0;
folio_wait_writeback(folio);
/* blocking call to cancel all requests and join to a single (head)
* request */
req = nfs_lock_and_join_requests(folio);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
} elseif (req) { /* all requests from this folio have been cancelled by * nfs_lock_and_join_requests, so just remove the head * request from the inode / page_private pointer and
* release it */
nfs_inode_remove_request(req);
nfs_unlock_and_release_request(req);
folio_cancel_dirty(folio);
}
return ret;
}
/** * nfs_wb_folio - Write back all requests on one page * @inode: pointer to page * @folio: pointer to folio * * Assumes that the folio has been locked by the caller, and will * not unlock it.
*/ int nfs_wb_folio(struct inode *inode, struct folio *folio)
{
loff_t range_start = folio_pos(folio);
size_t len = folio_size(folio); struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = 0,
.range_start = range_start,
.range_end = range_start + len - 1,
}; int ret;
for (;;) {
folio_wait_writeback(folio); if (folio_clear_dirty_for_io(folio)) {
ret = nfs_writepage_locked(folio, &wbc); if (ret < 0) goto out_error; continue;
}
ret = 0; if (!folio_test_private(folio)) break;
ret = nfs_commit_inode(inode, FLUSH_SYNC); if (ret < 0) goto out_error;
}
out_error:
trace_nfs_writeback_folio_done(inode, range_start, len, ret); return ret;
}
#ifdef CONFIG_MIGRATION int nfs_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode)
{ /* * If the private flag is set, the folio is currently associated with * an in-progress read or write request. Don't try to migrate it. * * FIXME: we could do this in principle, but we'll need a way to ensure * that we can safely release the inode reference while holding * the folio lock.
*/ if (folio_test_private(src)) { if (mode == MIGRATE_SYNC)
nfs_wb_folio(src->mapping->host, src); if (folio_test_private(src)) return -EBUSY;
}
if (folio_test_private_2(src)) { /* [DEPRECATED] */ if (mode == MIGRATE_ASYNC) return -EBUSY;
folio_wait_private_2(src);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.