if (tio->error) /* * An error has already been detected on the request. * Once error occurred, just let clone->end_io() handle * the remainder.
*/ return; elseif (error) { /* * Don't notice the error to the upper layer yet. * The error handling decision is made by the target driver, * when the request is completed.
*/
tio->error = error; gotoexit;
}
/* * I/O for the bio successfully completed. * Notice the data completion to the upper layer.
*/
tio->completed += nr_bytes;
/* * Update the original request. * Do not use blk_mq_end_request() here, because it may complete * the original request before the clone, and break the ordering.
*/ if (is_last) exit:
blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
}
/* * Don't touch any member of the md after calling this function because * the md may be freed in dm_put() at the end of this function. * Or do dm_get() before calling this function and dm_put() later.
*/ staticvoid rq_completed(struct mapped_device *md)
{ /* * dm_put() must be at the end of this function. See the comment above
*/
dm_put(md);
}
/* * Complete the clone and the original request. * Must be called without clone's queue lock held, * see end_clone_request() for more details.
*/ staticvoid dm_end_request(struct request *clone, blk_status_t error)
{ struct dm_rq_target_io *tio = clone->end_io_data; struct mapped_device *md = tio->md; struct request *rq = tio->orig;
if (tio->ti) {
rq_end_io = tio->ti->type->rq_end_io;
if (mapped && rq_end_io)
r = rq_end_io(tio->ti, clone, error, &tio->info);
}
if (unlikely(error == BLK_STS_TARGET)) { if (req_op(clone) == REQ_OP_DISCARD &&
!clone->q->limits.max_discard_sectors)
blk_queue_disable_discard(tio->md->queue); elseif (req_op(clone) == REQ_OP_WRITE_ZEROES &&
!clone->q->limits.max_write_zeroes_sectors)
blk_queue_disable_write_zeroes(tio->md->queue);
}
switch (r) { case DM_ENDIO_DONE: /* The target wants to complete the I/O */
dm_end_request(clone, error); break; case DM_ENDIO_INCOMPLETE: /* The target will handle the I/O */ return; case DM_ENDIO_REQUEUE: /* The target wants to requeue the I/O */
dm_requeue_original_request(tio, false); break; case DM_ENDIO_DELAY_REQUEUE: /* The target wants to requeue the I/O after a delay */
dm_requeue_original_request(tio, true); break; default:
DMCRIT("unimplemented target endio return value: %d", r);
BUG();
}
}
/* * Complete the clone and the original request with the error status * through softirq context.
*/ staticvoid dm_complete_request(struct request *rq, blk_status_t error)
{ struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error; if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
}
/* * Complete the not-mapped clone and the original request with the error status * through softirq context. * Target's rq_end_io() function isn't called. * This may be used when the target's clone_and_map_rq() function fails.
*/ staticvoid dm_kill_unmapped_request(struct request *rq, blk_status_t error)
{
rq->rq_flags |= RQF_FAILED;
dm_complete_request(rq, error);
}
staticvoid init_tio(struct dm_rq_target_io *tio, struct request *rq, struct mapped_device *md)
{
tio->md = md;
tio->ti = NULL;
tio->clone = NULL;
tio->orig = rq;
tio->error = 0;
tio->completed = 0; /* * Avoid initializing info for blk-mq; it passes * target-specific data through info.ptr * (see: dm_mq_init_request)
*/ if (!md->init_tio_pdu)
memset(&tio->info, 0, sizeof(tio->info));
}
/* * Returns: * DM_MAPIO_* : the request has been processed as indicated * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued * < 0 : the request was completed due to failure
*/ staticint map_request(struct dm_rq_target_io *tio)
{ int r; struct dm_target *ti = tio->ti; struct mapped_device *md = tio->md; struct request *rq = tio->orig; struct request *clone = NULL;
blk_status_t ret;
r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone); switch (r) { case DM_MAPIO_SUBMITTED: /* The target has taken the I/O to submit by itself later */ break; case DM_MAPIO_REMAPPED: if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { /* -ENOMEM */
ti->type->release_clone_rq(clone, &tio->info); return DM_MAPIO_REQUEUE;
}
/* The target has remapped the I/O so dispatch it */
trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
ret = blk_insert_cloned_request(clone); switch (ret) { case BLK_STS_OK: break; case BLK_STS_RESOURCE: case BLK_STS_DEV_RESOURCE:
blk_rq_unprep_clone(clone);
blk_mq_cleanup_rq(clone);
tio->ti->type->release_clone_rq(clone, &tio->info);
tio->clone = NULL; return DM_MAPIO_REQUEUE; default: /* must complete clone in terms of original request */
dm_complete_request(rq, ret);
} break; case DM_MAPIO_REQUEUE: /* The target wants to requeue the I/O */ break; case DM_MAPIO_DELAY_REQUEUE: /* The target wants to requeue the I/O after a delay */
dm_requeue_original_request(tio, true); break; case DM_MAPIO_KILL: /* The target wants to complete the I/O */
dm_kill_unmapped_request(rq, BLK_STS_IOERR); break; default:
DMCRIT("unimplemented target map return value: %d", r);
BUG();
}
return r;
}
/* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
{ return sprintf(buf, "%u\n", 0);
}
/* * Hold the md reference here for the in-flight I/O. * We can't rely on the reference count by device opener, * because the device may be closed during the request completion * when all bios are completed. * See the comment in rq_completed() too.
*/
dm_get(md);
}
/* * blk-mq's unquiesce may come from outside events, such as * elevator switch, updating nr_requests or others, and request may * come during suspend, so simply ask for blk-mq to requeue it.
*/ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) return BLK_STS_RESOURCE;
if (unlikely(!ti)) { int srcu_idx; struct dm_table *map;
/* Direct call is fine since .queue_rq allows allocations */ if (map_request(tio) == DM_MAPIO_REQUEUE) { /* Undo dm_start_request() before requeuing */
rq_end_stats(md, rq);
rq_completed(md); return BLK_STS_RESOURCE;
}
md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
immutable_tgt = dm_table_get_immutable_target(t); if (immutable_tgt && immutable_tgt->per_io_data_size) { /* any target-specific per-io data is immediately after the tio */
md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
md->init_tio_pdu = true;
}
err = blk_mq_alloc_tag_set(md->tag_set); if (err) goto out_kfree_tag_set;
err = blk_mq_init_allocated_queue(md->tag_set, md->queue); if (err) goto out_tag_set; return 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.