// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2003 Sistina Software * Copyright (C) 2006 Red Hat GmbH * * This file is released under the GPL.
*/
/* *------------------------------------------------------------------- * We need to keep track of which region a bio is doing io for. * To avoid a memory allocation to store just 5 or 6 bits, we * ensure the 'struct io' pointer is aligned so enough low bits are * always zero and then combine it with the region number directly in * bi_private. *-------------------------------------------------------------------
*/ staticvoid store_io_and_region_in_bio(struct bio *bio, struct io *io, unsignedint region)
{ if (unlikely(!IS_ALIGNED((unsignedlong)io, DM_IO_MAX_REGIONS))) {
DMCRIT("Unaligned struct io pointer %p", io);
BUG();
}
/* *-------------------------------------------------------------- * We need an io object to keep track of the number of bios that * have been dispatched for a particular io. *--------------------------------------------------------------
*/ staticvoid complete_io(struct io *io)
{ unsignedlong error_bits = io->error_bits;
io_notify_fn fn = io->callback; void *context = io->context;
if (io->vma_invalidate_size)
invalidate_kernel_vmap_range(io->vma_invalidate_address,
io->vma_invalidate_size);
if (bio->bi_status && bio_data_dir(bio) == READ)
zero_fill_bio(bio);
/* * The bio destructor in bio_put() may use the io object.
*/
retrieve_io_and_region_from_bio(bio, &io, ®ion);
error = bio->bi_status;
bio_put(bio);
dec_count(io, region, error);
}
/* *-------------------------------------------------------------- * These little objects provide an abstraction for getting a new * destination page for io. *--------------------------------------------------------------
*/ struct dpages { void (*get_page)(struct dpages *dp, struct page **p, unsignedlong *len, unsignedint *offset); void (*next_page)(struct dpages *dp);
union { unsignedint context_u; struct bvec_iter context_bi;
}; void *context_ptr;
/* * We just use bvec iterator to retrieve pages, so it is ok to * access the bvec table directly here
*/
dp->context_ptr = bio->bi_io_vec;
dp->context_bi = bio->bi_iter;
}
/* * Functions for getting the pages from a VMA.
*/ staticvoid vm_get_page(struct dpages *dp, struct page **p, unsignedlong *len, unsignedint *offset)
{
*p = vmalloc_to_page(dp->context_ptr);
*offset = dp->context_u;
*len = PAGE_SIZE - dp->context_u;
}
/* * Reject unsupported discard and write same requests.
*/ if (op == REQ_OP_DISCARD)
special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev); elseif (op == REQ_OP_WRITE_ZEROES)
special_cmd_max_sectors = q->limits.max_write_zeroes_sectors; if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
special_cmd_max_sectors == 0) {
atomic_inc(&io->count);
dec_count(io, region, BLK_STS_NOTSUPP); return;
}
/* * where->count may be zero if op holds a flush and we need to * send a zero-sized flush.
*/ do { /* * Allocate a suitably sized-bio.
*/ switch (op) { case REQ_OP_DISCARD: case REQ_OP_WRITE_ZEROES:
num_bvecs = 0; break; default:
num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
(PAGE_SIZE >> SECTOR_SHIFT)) + 1);
}
/* * For multiple regions we need to be careful to rewind * the dp object for each call to do_region.
*/ for (i = 0; i < num_regions; i++) {
*dp = old_pages; if (where[i].count || (opf & REQ_PREFLUSH))
do_region(opf, i, where + i, dp, io, ioprio);
}
/* * Drop the extra reference that we were holding to avoid * the io being completed too early.
*/
dec_count(io, 0, 0);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.