/* * For queue allocation
*/ staticstruct kmem_cache *blk_requestq_cachep;
/* * Controlling structure to kblockd
*/ staticstruct workqueue_struct *kblockd_workqueue;
/** * blk_queue_flag_set - atomically set a queue flag * @flag: flag to be set * @q: request queue
*/ void blk_queue_flag_set(unsignedint flag, struct request_queue *q)
{
set_bit(flag, &q->queue_flags);
}
EXPORT_SYMBOL(blk_queue_flag_set);
/** * blk_queue_flag_clear - atomically clear a queue flag * @flag: flag to be cleared * @q: request queue
*/ void blk_queue_flag_clear(unsignedint flag, struct request_queue *q)
{
clear_bit(flag, &q->queue_flags);
}
EXPORT_SYMBOL(blk_queue_flag_clear);
/** * blk_op_str - Return string XXX in the REQ_OP_XXX. * @op: REQ_OP_XXX. * * Description: Centralize block layer function to convert REQ_OP_XXX into * string format. Useful in the debugging and tracing bio or request. For * invalid REQ_OP_XXX it returns string "UNKNOWN".
*/ inlineconstchar *blk_op_str(enum req_op op)
{ constchar *op_str = "UNKNOWN";
if (op < ARRAY_SIZE(blk_op_name) && blk_op_name[op])
op_str = blk_op_name[op];
int blk_status_to_errno(blk_status_t status)
{ int idx = (__force int)status;
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) return -EIO; return blk_errors[idx].errno;
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);
constchar *blk_status_to_str(blk_status_t status)
{ int idx = (__force int)status;
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors))) return""; return blk_errors[idx].name;
}
EXPORT_SYMBOL_GPL(blk_status_to_str);
/** * blk_sync_queue - cancel any pending callbacks on a queue * @q: the queue * * Description: * The block layer may perform asynchronous callback activity * on a queue, such as calling the unplug function after a timeout. * A block device may call blk_sync_queue to ensure that any * such activity is cancelled, thus allowing it to release resources * that the callbacks might use. The caller must already have made sure * that its ->submit_bio will not re-add plugging prior to calling * this function. * * This function does not cancel any asynchronous activity arising * out of elevator or throttling code. That would require elevator_exit() * and blkcg_exit_queue() to be called with queue lock initialized. *
*/ void blk_sync_queue(struct request_queue *q)
{
timer_delete_sync(&q->timeout);
cancel_work_sync(&q->timeout_work);
}
EXPORT_SYMBOL(blk_sync_queue);
/** * blk_put_queue - decrement the request_queue refcount * @q: the request_queue structure to decrement the refcount for * * Decrements the refcount of the request_queue and free it when the refcount * reaches 0.
*/ void blk_put_queue(struct request_queue *q)
{ if (refcount_dec_and_test(&q->refs))
blk_free_queue(q);
}
EXPORT_SYMBOL(blk_put_queue);
bool blk_queue_start_drain(struct request_queue *q)
{ /* * When queue DYING flag is set, we need to block new req * entering queue, so we call blk_freeze_queue_start() to * prevent I/O from crossing blk_queue_enter().
*/ bool freeze = __blk_freeze_queue_start(q, current); if (queue_is_mq(q))
blk_mq_wake_waiters(q); /* Make blk_queue_enter() reexamine the DYING flag. */
wake_up_all(&q->mq_freeze_wq);
while (!blk_try_enter_queue(q, pm)) { if (flags & BLK_MQ_REQ_NOWAIT) return -EAGAIN;
/* * read pair of barrier in blk_freeze_queue_start(), we need to * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and * reading .mq_freeze_depth or queue dying flag, otherwise the * following wait may never return if the two reads are * reordered.
*/
smp_rmb();
wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth &&
blk_pm_resume_queue(pm, q)) ||
blk_queue_dying(q)); if (blk_queue_dying(q)) return -ENODEV;
}
int __bio_queue_enter(struct request_queue *q, struct bio *bio)
{ while (!blk_try_enter_queue(q, false)) { struct gendisk *disk = bio->bi_bdev->bd_disk;
if (bio->bi_opf & REQ_NOWAIT) { if (test_bit(GD_DEAD, &disk->state)) goto dead;
bio_wouldblock_error(bio); return -EAGAIN;
}
/* * read pair of barrier in blk_freeze_queue_start(), we need to * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and * reading .mq_freeze_depth or queue dying flag, otherwise the * following wait may never return if the two reads are * reordered.
*/
smp_rmb();
wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth &&
blk_pm_resume_queue(false, q)) ||
test_bit(GD_DEAD, &disk->state)); if (test_bit(GD_DEAD, &disk->state)) goto dead;
}
/** * blk_get_queue - increment the request_queue refcount * @q: the request_queue structure to increment the refcount for * * Increment the refcount of the request_queue kobject. * * Context: Any context.
*/ bool blk_get_queue(struct request_queue *q)
{ if (unlikely(blk_queue_dying(q))) returnfalse;
refcount_inc(&q->refs); returntrue;
}
EXPORT_SYMBOL(blk_get_queue);
staticinlinevoid bio_check_ro(struct bio *bio)
{ if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) return;
if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED)) return;
bdev_set_flag(bio->bi_bdev, BD_RO_WARNED);
/* * Use ioctl to set underlying disk of raid/dm to read-only * will trigger this.
*/
pr_warn("Trying to write to read-only block-device %pg\n",
bio->bi_bdev);
}
}
int should_fail_bio(struct bio *bio)
{ if (should_fail_request(bdev_whole(bio->bi_bdev), bio->bi_iter.bi_size)) return -EIO; return 0;
}
ALLOW_ERROR_INJECTION(should_fail_bio, ERRNO);
/* * Check whether this bio extends beyond the end of the device or partition. * This may well happen - the kernel calls bread() without checking the size of * the device, e.g., when mounting a file system.
*/ staticinlineint bio_check_eod(struct bio *bio)
{
sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); unsignedint nr_sectors = bio_sectors(bio);
if (nr_sectors &&
(nr_sectors > maxsector ||
bio->bi_iter.bi_sector > maxsector - nr_sectors)) { if (!maxsector) return -EIO;
pr_info_ratelimited("%s: attempt to access beyond end of device\n" "%pg: rw=%d, sector=%llu, nr_sectors = %u limit=%llu\n",
current->comm, bio->bi_bdev, bio->bi_opf,
bio->bi_iter.bi_sector, nr_sectors, maxsector); return -EIO;
} return 0;
}
/* * Remap block n of partition p to block n+start(p) of the disk.
*/ staticint blk_partition_remap(struct bio *bio)
{ struct block_device *p = bio->bi_bdev;
/* * Check write append to a zoned block device.
*/ staticinline blk_status_t blk_check_zone_append(struct request_queue *q, struct bio *bio)
{ int nr_sectors = bio_sectors(bio);
/* Only applicable to zoned block devices */ if (!bdev_is_zoned(bio->bi_bdev)) return BLK_STS_NOTSUPP;
/* The bio sector must point to the start of a sequential zone */ if (!bdev_is_zone_start(bio->bi_bdev, bio->bi_iter.bi_sector)) return BLK_STS_IOERR;
/* * Not allowed to cross zone boundaries. Otherwise, the BIO will be * split and could result in non-contiguous sectors being written in * different zones.
*/ if (nr_sectors > q->limits.chunk_sectors) return BLK_STS_IOERR;
/* Make sure the BIO is small enough and will not get split */ if (nr_sectors > q->limits.max_zone_append_sectors) return BLK_STS_IOERR;
bio->bi_opf |= REQ_NOMERGE;
return BLK_STS_OK;
}
staticvoid __submit_bio(struct bio *bio)
{ /* If plug is not used, add new plug here to cache nsecs time. */ struct blk_plug plug;
/* * The loop in this function may be a bit non-obvious, and so deserves some * explanation: * * - Before entering the loop, bio->bi_next is NULL (as all callers ensure * that), so we have a list with a single bio. * - We pretend that we have just taken it off a longer list, so we assign * bio_list to a pointer to the bio_list_on_stack, thus initialising the * bio_list of new bios to be added. ->submit_bio() may indeed add some more * bios through a recursive call to submit_bio_noacct. If it did, we find a * non-NULL value in bio_list and re-enter the loop from the top. * - In this case we really did just take the bio of the top of the list (no * pretending) and so remove it from bio_list, and call into ->submit_bio() * again. * * bio_list_on_stack[0] contains bios submitted by the current ->submit_bio. * bio_list_on_stack[1] contains bios that were submitted before the current * ->submit_bio, but that haven't been processed yet.
*/ staticvoid __submit_bio_noacct(struct bio *bio)
{ struct bio_list bio_list_on_stack[2];
/* * Create a fresh bio_list for all subordinate requests.
*/
bio_list_on_stack[1] = bio_list_on_stack[0];
bio_list_init(&bio_list_on_stack[0]);
__submit_bio(bio);
/* * Sort new bios into those for a lower level and those for the * same level.
*/
bio_list_init(&lower);
bio_list_init(&same); while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) if (q == bdev_get_queue(bio->bi_bdev))
bio_list_add(&same, bio); else
bio_list_add(&lower, bio);
/* * Now assemble so we handle the lowest level first.
*/
bio_list_merge(&bio_list_on_stack[0], &lower);
bio_list_merge(&bio_list_on_stack[0], &same);
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
} while ((bio = bio_list_pop(&bio_list_on_stack[0])));
do {
__submit_bio(bio);
} while ((bio = bio_list_pop(&bio_list[0])));
current->bio_list = NULL;
}
void submit_bio_noacct_nocheck(struct bio *bio, bool split)
{
blk_cgroup_bio_start(bio);
if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
trace_block_bio_queue(bio); /* * Now that enqueuing has been traced, we need to trace * completion as well.
*/
bio_set_flag(bio, BIO_TRACE_COMPLETION);
}
/* * We only want one ->submit_bio to be active at a time, else stack * usage with stacked devices could be a problem. Use current->bio_list * to collect a list of requests submited by a ->submit_bio method while * it is active, and then process them after it returned.
*/ if (current->bio_list) { if (split)
bio_list_add_head(¤t->bio_list[0], bio); else
bio_list_add(¤t->bio_list[0], bio);
} elseif (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) {
__submit_bio_noacct_mq(bio);
} else {
__submit_bio_noacct(bio);
}
}
static blk_status_t blk_validate_atomic_write_op_size(struct request_queue *q, struct bio *bio)
{ if (bio->bi_iter.bi_size > queue_atomic_write_unit_max_bytes(q)) return BLK_STS_INVAL;
if (bio->bi_iter.bi_size % queue_atomic_write_unit_min_bytes(q)) return BLK_STS_INVAL;
return BLK_STS_OK;
}
/** * submit_bio_noacct - re-submit a bio to the block device layer for I/O * @bio: The bio describing the location in memory and on the device. * * This is a version of submit_bio() that shall only be used for I/O that is * resubmitted to lower level drivers by stacking block drivers. All file * systems and other upper level users of the block layer should use * submit_bio() instead.
*/ void submit_bio_noacct(struct bio *bio)
{ struct block_device *bdev = bio->bi_bdev; struct request_queue *q = bdev_get_queue(bdev);
blk_status_t status = BLK_STS_IOERR;
might_sleep();
/* * For a REQ_NOWAIT based request, return -EOPNOTSUPP * if queue does not support NOWAIT.
*/ if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev)) goto not_supported;
if (should_fail_bio(bio)) goto end_io;
bio_check_ro(bio); if (!bio_flagged(bio, BIO_REMAPPED)) { if (unlikely(bio_check_eod(bio))) goto end_io; if (bdev_is_partition(bdev) &&
unlikely(blk_partition_remap(bio))) goto end_io;
}
/* * Filter flush bio's early so that bio based drivers without flush * support don't have to worry about them.
*/ if (op_is_flush(bio->bi_opf)) { if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE &&
bio_op(bio) != REQ_OP_ZONE_APPEND)) goto end_io; if (!bdev_write_cache(bdev)) {
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); if (!bio_sectors(bio)) {
status = BLK_STS_OK; goto end_io;
}
}
}
switch (bio_op(bio)) { case REQ_OP_READ: break; case REQ_OP_WRITE: if (bio->bi_opf & REQ_ATOMIC) {
status = blk_validate_atomic_write_op_size(q, bio); if (status != BLK_STS_OK) goto end_io;
} break; case REQ_OP_FLUSH: /* * REQ_OP_FLUSH can't be submitted through bios, it is only * synthetized in struct request by the flush state machine.
*/ goto not_supported; case REQ_OP_DISCARD: if (!bdev_max_discard_sectors(bdev)) goto not_supported; break; case REQ_OP_SECURE_ERASE: if (!bdev_max_secure_erase_sectors(bdev)) goto not_supported; break; case REQ_OP_ZONE_APPEND:
status = blk_check_zone_append(q, bio); if (status != BLK_STS_OK) goto end_io; break; case REQ_OP_WRITE_ZEROES: if (!q->limits.max_write_zeroes_sectors) goto not_supported; break; case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_OPEN: case REQ_OP_ZONE_CLOSE: case REQ_OP_ZONE_FINISH: case REQ_OP_ZONE_RESET_ALL: if (!bdev_is_zoned(bio->bi_bdev)) goto not_supported; break; case REQ_OP_DRV_IN: case REQ_OP_DRV_OUT: /* * Driver private operations are only used with passthrough * requests.
*/
fallthrough; default: goto not_supported;
}
if (blk_throtl_bio(bio)) return;
submit_bio_noacct_nocheck(bio, false); return;
staticvoid bio_set_ioprio(struct bio *bio)
{ /* Nobody set ioprio so far? Initialize it based on task's nice value */ if (IOPRIO_PRIO_CLASS(bio->bi_ioprio) == IOPRIO_CLASS_NONE)
bio->bi_ioprio = get_current_ioprio();
blkcg_set_ioprio(bio);
}
/** * submit_bio - submit a bio to the block device layer for I/O * @bio: The &struct bio which describes the I/O * * submit_bio() is used to submit I/O requests to block devices. It is passed a * fully set up &struct bio that describes the I/O that needs to be done. The * bio will be send to the device described by the bi_bdev field. * * The success/failure status of the request, along with notification of * completion, is delivered asynchronously through the ->bi_end_io() callback * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has * been called.
*/ void submit_bio(struct bio *bio)
{ if (bio_op(bio) == REQ_OP_READ) {
task_io_account_read(bio->bi_iter.bi_size);
count_vm_events(PGPGIN, bio_sectors(bio));
} elseif (bio_op(bio) == REQ_OP_WRITE) {
count_vm_events(PGPGOUT, bio_sectors(bio));
}
/** * bio_poll - poll for BIO completions * @bio: bio to poll for * @iob: batches of IO * @flags: BLK_POLL_* flags that control the behavior * * Poll for completions on queue associated with the bio. Returns number of * completed entries found. * * Note: the caller must either be the context that submitted @bio, or * be in a RCU critical section to prevent freeing of @bio.
*/ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsignedint flags)
{
blk_qc_t cookie = READ_ONCE(bio->bi_cookie); struct block_device *bdev; struct request_queue *q; int ret = 0;
bdev = READ_ONCE(bio->bi_bdev); if (!bdev) return 0;
q = bdev_get_queue(bdev); if (cookie == BLK_QC_T_NONE) return 0;
blk_flush_plug(current->plug, false);
/* * We need to be able to enter a frozen queue, similar to how * timeouts also need to do that. If that is blocked, then we can * have pending IO when a queue freeze is started, and then the * wait for the freeze to finish will wait for polled requests to * timeout as the poller is preventer from entering the queue and * completing them. As long as we prevent new IO from being queued, * that should be all that matters.
*/ if (!percpu_ref_tryget(&q->q_usage_counter)) return 0; if (queue_is_mq(q)) {
ret = blk_mq_poll(q, cookie, iob, flags);
} else { struct gendisk *disk = q->disk;
if ((q->limits.features & BLK_FEAT_POLL) && disk &&
disk->fops->poll_bio)
ret = disk->fops->poll_bio(bio, iob, flags);
}
blk_queue_exit(q); return ret;
}
EXPORT_SYMBOL_GPL(bio_poll);
/* * Helper to implement file_operations.iopoll. Requires the bio to be stored * in iocb->private, and cleared before freeing the bio.
*/ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob, unsignedint flags)
{ struct bio *bio; int ret = 0;
/* * Note: the bio cache only uses SLAB_TYPESAFE_BY_RCU, so bio can * point to a freshly allocated bio at this point. If that happens * we have a few cases to consider: * * 1) the bio is beeing initialized and bi_bdev is NULL. We can just * simply nothing in this case * 2) the bio points to a not poll enabled device. bio_poll will catch * this and return 0 * 3) the bio points to a poll capable device, including but not * limited to the one that the original bio pointed to. In this * case we will call into the actual poll method and poll for I/O, * even if we don't need to, but it won't cause harm either. * * For cases 2) and 3) above the RCU grace period ensures that bi_bdev * is still allocated. Because partitions hold a reference to the whole * device bdev and thus disk, the disk is also still valid. Grabbing * a reference to the queue in bio_poll() ensures the hctxs and requests * are still valid as well.
*/
rcu_read_lock();
bio = READ_ONCE(kiocb->private); if (bio)
ret = bio_poll(bio, iob, flags);
rcu_read_unlock();
/** * bio_start_io_acct - start I/O accounting for bio based drivers * @bio: bio to start account for * * Returns the start time that should be passed back to bio_end_io_acct().
*/ unsignedlong bio_start_io_acct(struct bio *bio)
{ return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies);
}
EXPORT_SYMBOL_GPL(bio_start_io_acct);
/** * blk_lld_busy - Check if underlying low-level drivers of a device are busy * @q : the queue of the device being checked * * Description: * Check if underlying low-level drivers of a device are busy. * If the drivers want to export their busy state, they must set own * exporting function using blk_queue_lld_busy() first. * * Basically, this function is used only by request stacking drivers * to stop dispatching requests to underlying devices when underlying * devices are busy. This behavior helps more I/O merging on the queue * of the request stacking driver and prevents I/O throughput regression * on burst I/O load. * * Return: * 0 - Not busy (The request stacking driver should dispatch request) * 1 - Busy (The request stacking driver should stop dispatching request)
*/ int blk_lld_busy(struct request_queue *q)
{ if (queue_is_mq(q) && q->mq_ops->busy) return q->mq_ops->busy(q);
return 0;
}
EXPORT_SYMBOL_GPL(blk_lld_busy);
int kblockd_schedule_work(struct work_struct *work)
{ return queue_work(kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work);
/* * Store ordering should not be needed here, since a potential * preempt will imply a full memory barrier
*/
tsk->plug = plug;
}
/** * blk_start_plug - initialize blk_plug and track it inside the task_struct * @plug: The &struct blk_plug that needs to be initialized * * Description: * blk_start_plug() indicates to the block layer an intent by the caller * to submit multiple I/O requests in a batch. The block layer may use * this hint to defer submitting I/Os from the caller until blk_finish_plug() * is called. However, the block layer may choose to submit requests * before a call to blk_finish_plug() if the number of queued I/Os * exceeds %BLK_MAX_REQUEST_COUNT, or if the size of the I/O is larger than * %BLK_PLUG_FLUSH_SIZE. The queued I/Os may also be submitted early if * the task schedules (see below). * * Tracking blk_plug inside the task_struct will help with auto-flushing the * pending I/O should the task end up blocking between blk_start_plug() and * blk_finish_plug(). This is important from a performance perspective, but * also ensures that we don't deadlock. For instance, if the task is blocking * for a memory allocation, memory reclaim could end up wanting to free a * page belonging to that request that is currently residing in our private * plug. By flushing the pending I/O when the process goes to sleep, we avoid * this kind of deadlock.
*/ void blk_start_plug(struct blk_plug *plug)
{
blk_start_plug_nr_ios(plug, 1);
}
EXPORT_SYMBOL(blk_start_plug);
/* Not currently on the callback list */
BUG_ON(size < sizeof(*cb));
cb = kzalloc(size, GFP_ATOMIC); if (cb) {
cb->data = data;
cb->callback = unplug;
list_add(&cb->list, &plug->cb_list);
} return cb;
}
EXPORT_SYMBOL(blk_check_plugged);
void __blk_flush_plug(struct blk_plug *plug, bool from_schedule)
{ if (!list_empty(&plug->cb_list))
flush_plug_callbacks(plug, from_schedule);
blk_mq_flush_plug_list(plug, from_schedule); /* * Unconditionally flush out cached requests, even if the unplug * event came from schedule. Since we know hold references to the * queue for cached requests, we don't want a blocked task holding * up a queue freeze/quiesce event.
*/ if (unlikely(!rq_list_empty(&plug->cached_rqs)))
blk_mq_free_plug_rqs(plug);
/** * blk_finish_plug - mark the end of a batch of submitted I/O * @plug: The &struct blk_plug passed to blk_start_plug() * * Description: * Indicate that a batch of I/O submissions is complete. This function * must be paired with an initial call to blk_start_plug(). The intent * is to allow the block layer to optimize I/O submission. See the * documentation for blk_start_plug() for more information.
*/ void blk_finish_plug(struct blk_plug *plug)
{ if (plug == current->plug) {
__blk_flush_plug(plug, false);
current->plug = NULL;
}
}
EXPORT_SYMBOL(blk_finish_plug);
void blk_io_schedule(void)
{ /* Prevent hang_check timer from firing at us during very long I/O */ unsignedlong timeout = sysctl_hung_task_timeout_secs * HZ / 2;
if (timeout)
io_schedule_timeout(timeout); else
io_schedule();
}
EXPORT_SYMBOL_GPL(blk_io_schedule);
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.