// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2020 Red Hat GmbH * * This file is released under the GPL. * * Device-mapper target to emulate smaller logical block * size on backing devices exposing (natively) larger ones. * * E.g. 512 byte sector emulation on 4K native disks.
*/
/* Return number of blocks for a bio, accounting for misalignment of start and end sectors. */ staticinlineunsignedint __nr_blocks(struct ebs_c *ec, struct bio *bio)
{
sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio);
/* Avoid reading for writes in case bio vector's page overwrites block completely. */ if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio))
ba = dm_bufio_read(ec->bufio, block, &b); else
ba = dm_bufio_new(ec->bufio, block, &b);
if (IS_ERR(ba)) { /* * Carry on with next buffer, if any, to issue all possible * data but return error.
*/
r = PTR_ERR(ba);
} else { /* Copy data to/from bio to buffer if read/new was successful above. */
ba += buf_off; if (op == REQ_OP_READ) {
memcpy(pa, ba, cur_len);
flush_dcache_page(bv->bv_page);
} else {
flush_dcache_page(bv->bv_page);
memcpy(ba, pa, cur_len);
dm_bufio_mark_partial_buffer_dirty(b, buf_off, buf_off + cur_len);
}
/* READ/WRITE: iterate bio vector's copying between (partial) pages and bufio blocks. */ staticint __ebs_rw_bio(struct ebs_c *ec, enum req_op op, struct bio *bio)
{ int r = 0, rr; struct bio_vec bv; struct bvec_iter iter;
bio_for_each_bvec(bv, bio, iter) {
rr = __ebs_rw_bvec(ec, op, &bv, &iter); if (rr)
r = rr;
}
return r;
}
/* * Discard bio's blocks, i.e. pass discards down. * * Avoid discarding partial blocks at beginning and end; * return 0 in case no blocks can be discarded as a result.
*/ staticint __ebs_discard_bio(struct ebs_c *ec, struct bio *bio)
{
sector_t block, blocks, sector = bio->bi_iter.bi_sector;
/* Prefetch all read and any mis-aligned write buffers */
bio_list_for_each(bio, &bios) {
block1 = __sector_to_block(ec, bio->bi_iter.bi_sector); if (bio_op(bio) == REQ_OP_READ)
dm_bufio_prefetch(ec->bufio, block1, __nr_blocks(ec, bio)); elseif (bio_op(bio) == REQ_OP_WRITE && !(bio->bi_opf & REQ_PREFLUSH)) {
block2 = __sector_to_block(ec, bio_end_sector(bio)); if (__block_mod(bio->bi_iter.bi_sector, ec->u_bs))
dm_bufio_prefetch(ec->bufio, block1, 1); if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1)
dm_bufio_prefetch(ec->bufio, block2, 1);
}
}
bio_list_for_each(bio, &bios) {
r = -EIO; if (bio_op(bio) == REQ_OP_READ)
r = __ebs_rw_bio(ec, REQ_OP_READ, bio); elseif (bio_op(bio) == REQ_OP_WRITE) {
write = true;
r = __ebs_rw_bio(ec, REQ_OP_WRITE, bio);
} elseif (bio_op(bio) == REQ_OP_DISCARD) {
__ebs_forget_bio(ec, bio);
r = __ebs_discard_bio(ec, bio);
}
if (r < 0)
bio->bi_status = errno_to_blk_status(r);
}
/* * We write dirty buffers after processing I/O on them * but before we endio thus addressing REQ_FUA/REQ_SYNC.
*/
r = write ? dm_bufio_write_dirty_buffers(ec->bufio) : 0;
while ((bio = bio_list_pop(&bios))) { /* Any other request is endioed. */ if (unlikely(r && bio_op(bio) == REQ_OP_WRITE))
bio_io_error(bio); else
bio_endio(bio);
}
}
/* * Construct an emulated block size mapping: <dev_path> <offset> <ebs> [<ubs>] * * <dev_path>: path of the underlying device * <offset>: offset in 512 bytes sectors into <dev_path> * <ebs>: emulated block size in units of 512 bytes exposed to the upper layer * [<ubs>]: underlying block size in units of 512 bytes imposed on the lower layer; * optional, if not supplied, retrieve logical block size from underlying device
*/ staticint ebs_ctr(struct dm_target *ti, unsignedint argc, char **argv)
{ int r; unsignedshort tmp1; unsignedlonglong tmp; char dummy; struct ebs_c *ec;
if (unlikely(bio_op(bio) == REQ_OP_FLUSH)) return DM_MAPIO_REMAPPED; /* * Only queue for bufio processing in case of partial or overlapping buffers * -or- * emulation with ebs == ubs aiming for tests of dm-bufio overhead.
*/ if (likely(__block_mod(bio->bi_iter.bi_sector, ec->u_bs) ||
__block_mod(bio_end_sector(bio), ec->u_bs) ||
ec->e_bs == ec->u_bs)) {
spin_lock_irq(&ec->lock);
bio_list_add(&ec->bios_in, bio);
spin_unlock_irq(&ec->lock);
queue_work(ec->wq, &ec->ws);
return DM_MAPIO_SUBMITTED;
}
/* Forget any buffer content relative to this direct backing device I/O. */
__ebs_forget_bio(ec, bio);
/* * Only pass ioctls through if the device sizes match exactly.
*/
*bdev = dev->bdev; return !!(ec->start || ti->len != bdev_nr_sectors(dev->bdev));
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.