// SPDX-License-Identifier: GPL-2.0-or-later /******************************************************************************* * Filename: target_core_file.c * * This file contains the Storage Engine <-> FILEIO transport specific functions * * (c) Copyright 2005-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> *
******************************************************************************/
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
pr_err("Missing fd_dev_name=\n"); return -EINVAL;
}
/* * Use O_DSYNC by default instead of O_SYNC to forgo syncing * of pure timestamp updates.
*/
flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
/* * Optionally allow fd_buffered_io=1 to be enabled for people * who want use the fs buffer cache as an WriteCache mechanism. * * This means that in event of a hard failure, there is a risk * of silent data-loss if the SCSI client has *not* performed a * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE * to write-out the entire device cache.
*/ if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
flags &= ~O_DSYNC;
}
file = filp_open(fd_dev->fd_dev_name, flags, 0600); if (IS_ERR(file)) {
pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
ret = PTR_ERR(file); goto fail;
}
fd_dev->fd_file = file; /* * If using a block backend with this struct file, we extract * fd_dev->fd_[block,dev]_size from struct block_device. * * Otherwise, we use the passed fd_size= from configfs
*/
inode = file->f_mapping->host; if (S_ISBLK(inode->i_mode)) { struct block_device *bdev = I_BDEV(inode); unsignedlonglong dev_size;
fd_dev->fd_block_size = bdev_logical_block_size(bdev); /* * Determine the number of bytes from i_size_read() minus * one (1) logical sector from underlying struct block_device
*/
dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
pr_debug("FILEIO: Using size: %llu bytes from struct" " block_device blocks: %llu logical_block_size: %d\n",
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size); /* * Enable write same emulation for IBLOCK and use 0xFFFF as * the smaller WRITE_SAME(10) only has a two-byte block count.
*/
dev->dev_attrib.max_write_same_len = 0xFFFF;
if (bdev_nonrot(bdev))
dev->dev_attrib.is_nonrot = 1;
} else { if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
pr_err("FILEIO: Missing fd_dev_size=" " parameter, and no backing struct" " block_device\n"); goto fail;
}
fd_dev->fd_block_size = FD_BLOCKSIZE;
/* * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB) * based upon struct iovec limit for vfs_writev()
*/
dev->dev_attrib.max_write_same_len = 0x1000;
}
iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len); if (is_write)
ret = vfs_iter_write(fd, &iter, &pos, 0); else
ret = vfs_iter_read(fd, &iter, &pos, 0);
if (is_write) { if (ret < 0 || ret != data_length) {
pr_err("%s() write returned %d\n", __func__, ret); if (ret >= 0)
ret = -EINVAL;
}
} else { /* * Return zeros and GOOD status even if the READ did not return * the expected virt_size for struct file w/o a backing struct * block_device.
*/ if (S_ISBLK(file_inode(fd)->i_mode)) { if (ret < 0 || ret != data_length) {
pr_err("%s() returned %d, expecting %u for " "S_ISBLK\n", __func__, ret,
data_length); if (ret >= 0)
ret = -EINVAL;
}
} else { if (ret < 0) {
pr_err("%s() returned %d for non S_ISBLK\n",
__func__, ret);
} elseif (ret != data_length) { /* * Short read case: * Probably some one truncate file under us. * We must explicitly zero sg-pages to prevent * expose uninizialized pages to userspace.
*/ if (ret < data_length)
ret += iov_iter_zero(data_length - ret, &iter); else
ret = -EINVAL;
}
}
}
kfree(bvec); return ret;
}
/* * If the Immediate bit is set, queue up the GOOD response * for this SYNCHRONIZE_CACHE op
*/ if (immed)
target_complete_cmd(cmd, SAM_STAT_GOOD);
/* * Determine if we will be flushing the entire device.
*/ if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
start = 0;
end = LLONG_MAX;
} else {
start = cmd->t_task_lba * dev->dev_attrib.block_size; if (cmd->data_length)
end = start + cmd->data_length - 1; else
end = LLONG_MAX;
}
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1); if (ret != 0)
pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (immed) return 0;
if (ret)
target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); else
target_complete_cmd(cmd, SAM_STAT_GOOD);
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with FILEIO" " backends not supported\n"); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
if (!cmd->t_data_nents) return TCM_INVALID_CDB_FIELD;
bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL); if (!bvec) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
for (i = 0; i < nolb; i++) {
bvec_set_page(&bvec[i], sg_page(&cmd->t_data_sg[0]),
cmd->t_data_sg[0].length,
cmd->t_data_sg[0].offset);
len += se_dev->dev_attrib.block_size;
}
kfree(bvec); if (ret < 0 || ret != len) {
pr_err("vfs_iter_write() returned %zd for write same\n", ret); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/* * We are currently limited by the number of iovecs (2048) per * single vfs_[writev,readv] call.
*/ if (cmd->data_length > FD_MAX_BYTES) {
pr_err("FILEIO: Not able to process I/O of %u bytes due to" "FD_MAX_BYTES: %u iovec count limitation\n",
cmd->data_length, FD_MAX_BYTES); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
static sector_t fd_get_blocks(struct se_device *dev)
{ struct fd_dev *fd_dev = FD_DEV(dev); struct file *f = fd_dev->fd_file; struct inode *i = f->f_mapping->host; unsignedlonglong dev_size; /* * When using a file that references an underlying struct block_device, * ensure dev_size is always based on the current inode size in order * to handle underlying block_device resize operations.
*/ if (S_ISBLK(i->i_mode))
dev_size = i_size_read(i); else
dev_size = fd_dev->fd_dev_size;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.