// SPDX-License-Identifier: GPL-2.0-only /* Network filesystem read subrequest retrying. * * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com)
*/
/* * Go through the list of failed/short reads, retrying all retryable ones. We * need to switch failed cache reads to network downloads.
*/ staticvoid netfs_retry_read_subrequests(struct netfs_io_request *rreq)
{ struct netfs_io_subrequest *subreq; struct netfs_io_stream *stream = &rreq->io_streams[0]; struct list_head *next;
_enter("R=%x", rreq->debug_id);
if (list_empty(&stream->subrequests)) return;
if (rreq->netfs_ops->retry_request)
rreq->netfs_ops->retry_request(rreq, NULL);
/* If there's no renegotiation to do, just resend each retryable subreq * up to the first permanently failed one.
*/ if (!rreq->netfs_ops->prepare_read &&
!rreq->cache_resources.ops) {
list_for_each_entry(subreq, &stream->subrequests, rreq_link) { if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) break; if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
subreq->retry_count++;
netfs_reset_iter(subreq);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
netfs_reissue_read(rreq, subreq);
}
} return;
}
/* Okay, we need to renegotiate all the download requests and flip any * failed cache reads over to being download requests and negotiate * those also. All fully successful subreqs have been removed from the * list and any spare data from those has been donated. * * What we do is decant the list and rebuild it one subreq at a time so * that we don't end up with donations jumping over a gap we're busy * populating with smaller subrequests. In the event that the subreq * we just launched finishes before we insert the next subreq, it'll * fill in rreq->prev_donated instead. * * Note: Alternatively, we could split the tail subrequest right before * we reissue it and fix up the donations under lock.
*/
next = stream->subrequests.next;
/* Go through the subreqs and find the next span of contiguous * buffer that we then rejig (cifs, for example, needs the * rsize renegotiating) and reissue.
*/
from = list_entry(next, struct netfs_io_subrequest, rreq_link);
to = from;
start = from->start + from->transferred;
len = from->len - from->transferred;
/* Determine the set of buffers we're going to use. Each * subreq gets a subset of a single overall contiguous buffer.
*/
netfs_reset_iter(from);
source = from->io_iter;
source.count = len;
/* Work through the sublist. */
subreq = from;
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) { if (!len) {
subreq_superfluous = true; break;
}
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
subreq->start = start - subreq->transferred;
subreq->len = len + subreq->transferred;
__clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
__clear_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
subreq->retry_count++;
/* If we managed to use fewer subreqs, we can discard the * excess; if we used the same number, then we're done.
*/ if (!len) { if (!subreq_superfluous) continue;
list_for_each_entry_safe_from(subreq, tmp,
&stream->subrequests, rreq_link) {
trace_netfs_sreq(subreq, netfs_sreq_trace_superfluous);
list_del(&subreq->rreq_link);
netfs_put_subrequest(subreq, netfs_sreq_trace_put_done); if (subreq == to) break;
} continue;
}
/* We ran out of subrequests, so we need to allocate some more * and insert them after.
*/ do {
subreq = netfs_alloc_subrequest(rreq); if (!subreq) {
subreq = to; goto abandon_after;
}
subreq->source = NETFS_DOWNLOAD_FROM_SERVER;
subreq->start = start;
subreq->len = len;
subreq->stream_nr = stream->stream_nr;
subreq->retry_count = 1;
/* Wait for all outstanding I/O to quiesce before performing retries as * we may need to renegotiate the I/O sizes.
*/
set_bit(NETFS_RREQ_RETRYING, &rreq->flags);
netfs_wait_for_in_progress_stream(rreq, stream);
clear_bit(NETFS_RREQ_RETRYING, &rreq->flags);
/* * Unlock any the pages that haven't been unlocked yet due to abandoned * subrequests.
*/ void netfs_unlock_abandoned_read_pages(struct netfs_io_request *rreq)
{ struct folio_queue *p;
for (p = rreq->buffer.tail; p; p = p->next) { for (int slot = 0; slot < folioq_count(p); slot++) { struct folio *folio = folioq_folio(p, slot);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.