if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) goto out; while (!list_empty(&hdr->pages)) { struct nfs_page *req = nfs_list_entry(hdr->pages.next); struct folio *folio = nfs_page_to_folio(req); unsignedlong start = req->wb_pgbase; unsignedlong end = req->wb_pgbase + req->wb_bytes;
if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) { /* note: regions of the page not covered by a * request are zeroed in nfs_read_add_folio
*/ if (bytes > hdr->good_bytes) { /* nothing in this request was good, so zero
* the full extent of the request */
folio_zero_segment(folio, start, end);
} elseif (hdr->good_bytes - bytes < req->wb_bytes) { /* part of this request has good bytes, but
* not all. zero the bad bytes */
start += hdr->good_bytes - bytes;
WARN_ON(start < req->wb_pgbase);
folio_zero_segment(folio, start, end);
}
}
error = 0;
bytes += req->wb_bytes; if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { if (bytes <= hdr->good_bytes)
nfs_page_group_set_uptodate(req); else {
error = hdr->error;
xchg(&nfs_req_openctx(req)->error, error);
}
} else
nfs_page_group_set_uptodate(req);
nfs_list_remove_request(req);
nfs_readpage_release(req, error);
}
nfs_netfs_read_completion(hdr);
/* * This is the callback from RPC telling us whether a reply was * received or some error occurred (timeout or socket shutdown).
*/ staticint nfs_readpage_done(struct rpc_task *task, struct nfs_pgio_header *hdr, struct inode *inode)
{ int status = NFS_PROTO(inode)->read_done(task, hdr); if (status != 0) return status;
/* This is a short read! */
nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
trace_nfs_readpage_short(task, hdr);
/* Has the server at least made some progress? */ if (resp->count == 0) {
nfs_set_pgio_error(hdr, -EIO, argp->offset); return;
}
/* For non rpc-based layout drivers, retry-through-MDS */ if (!task->tk_ops) {
hdr->pnfs_error = -EAGAIN; return;
}
/* Yes, so retry the read at the end of the hdr */
hdr->mds_offset += resp->count;
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
resp->count = 0;
resp->eof = 0;
rpc_restart_call_prepare(task);
}
ret = nfs_read_add_folio(&pgio, ctx, folio); if (ret) goto out_put;
nfs_pageio_complete_read(&pgio);
nfs_update_delegated_atime(inode); if (pgio.pg_error < 0) {
ret = pgio.pg_error; goto out_put;
}
ret = folio_wait_locked_killable(folio); if (!folio_test_uptodate(folio) && !ret)
ret = xchg(&ctx->error, 0);
out_put:
put_nfs_open_context(ctx); return ret;
}
/* * Synchronously read a folio. * * This is not heavily used as most users to try an asynchronous * large read through ->readahead first.
*/ int nfs_read_folio(struct file *file, struct folio *folio)
{ struct inode *inode = file_inode(file);
loff_t pos = folio_pos(folio);
size_t len = folio_size(folio); int ret;
/* * Try to flush any pending writes to the file.. * * NOTE! Because we own the folio lock, there cannot * be any new pending writes generated at this point * for this folio (other folios can be written to).
*/
ret = nfs_wb_folio(inode, folio); if (ret) goto out_unlock; if (folio_test_uptodate(folio)) goto out_unlock;
ret = -ESTALE; if (NFS_STALE(inode)) goto out_unlock;
ret = nfs_netfs_read_folio(file, folio); if (ret)
ret = nfs_do_read_folio(file, folio);
out:
trace_nfs_aop_readpage_done(inode, pos, len, ret); return ret;
out_unlock:
folio_unlock(folio); goto out;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.