// SPDX-License-Identifier: GPL-2.0-or-later /******************************************************************************* * Filename: target_core_device.c (based on iscsi_target_device.c) * * This file contains the TCM Virtual Device and Disk Transport * agnostic related functions. * * (c) Copyright 2003-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@kernel.org> *
******************************************************************************/
if (!se_lun) { /* * Use the se_portal_group->tpg_virt_lun0 to allow for * REPORT_LUNS, et al to be returned when no active * MappedLUN=0 exists for this Initiator Port.
*/ if (se_cmd->orig_fe_lun != 0) {
pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN" " Access for 0x%08llx from %s\n",
se_cmd->se_tfo->fabric_name,
se_cmd->orig_fe_lun,
nacl->initiatorname); return TCM_NON_EXISTENT_LUN;
}
/* * Force WRITE PROTECT for virtual LUN 0
*/ if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
(se_cmd->data_direction != DMA_NONE)) return TCM_WRITE_PROTECTED;
se_lun = se_sess->se_tpg->tpg_virt_lun0; if (!percpu_ref_tryget_live(&se_lun->lun_ref)) return TCM_NON_EXISTENT_LUN;
se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
se_cmd->lun_ref_active = true;
} /* * RCU reference protected by percpu se_lun->lun_ref taken above that * must drop to zero (including initial reference) before this se_lun * pointer can be kfree_rcu() by the final se_lun->lun_group put via * target_core_fabric_configfs.c:target_fabric_port_release
*/
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
this_cpu_inc(se_cmd->se_dev->stats->total_cmds);
/* * This function is called from core_scsi3_emulate_pro_register_and_move() * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref * when a matching rtpi is found.
*/ struct se_dev_entry *core_get_se_deve_from_rtpi( struct se_node_acl *nacl,
u16 rtpi)
{ struct se_dev_entry *deve; struct se_lun *lun; struct se_portal_group *tpg = nacl->se_tpg;
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
lun = deve->se_lun; if (!lun) {
pr_err("%s device entries device pointer is" " NULL, but Initiator has access.\n",
tpg->se_tpg_tfo->fabric_name); continue;
} if (lun->lun_tpg->tpg_rtpi != rtpi) continue;
/* * If the MappedLUN entry is being disabled, the entry in * lun->lun_deve_list must be removed now before clearing the * struct se_dev_entry pointers below as logic in * core_alua_do_transition_tg_pt() depends on these being present. * * deve->se_lun_acl will be NULL for demo-mode created LUNs * that have not been explicitly converted to MappedLUNs -> * struct se_lun_acl, but we remove deve->lun_link from * lun->lun_deve_list. This also means that active UAs and * NodeACL context specific PR metadata for demo-mode * MappedLUN *deve will be released below..
*/
spin_lock(&lun->lun_deve_lock);
list_del(&orig->lun_link);
spin_unlock(&lun->lun_deve_lock); /* * Disable struct se_dev_entry LUN ACL mapping
*/
core_scsi3_ua_release_all(orig);
hlist_del_rcu(&orig->link);
clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
orig->lun_access_ro = false;
orig->creation_time = 0;
orig->attach_count--; /* * Before firing off RCU callback, wait for any in process SPEC_I_PT=1 * or REGISTER_AND_MOVE PR operation to complete.
*/
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for " " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
lun_access_ro ? "RO" : "RW",
nacl->initiatorname); /* * Check to see if there are any existing persistent reservation APTPL * pre-registrations that need to be enabled for this LUN ACL..
*/
core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
lacl->mapped_lun); return 0;
}
/* * Convert from blocksize advertised to the initiator to the 512 byte * units unconditionally used by the Linux block layer.
*/
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
{ switch (dev->dev_attrib.block_size) { case 4096: return lb << 3; case 2048: return lb << 2; case 1024: return lb << 1; default: return lb;
}
}
EXPORT_SYMBOL(target_to_linux_sector);
/* * We add the device early to the idr, so it can be used * by backend modules during configuration. We do not want * to allow other callers to access partially setup devices, * so we skip them here.
*/ if (!target_dev_configured(dev)) return 0;
item = config_item_get_unless_zero(&dev->dev_group.cg_item); if (!item) return 0;
mutex_unlock(&device_mutex);
ret = iter->fn(dev, iter->data);
config_item_put(item);
mutex_lock(&device_mutex); return ret;
}
/** * target_for_each_device - iterate over configured devices * @fn: iterator function * @data: pointer to data that will be passed to fn * * fn must return 0 to continue looping over devices. non-zero will break * from the loop and return that value to the caller.
*/ int target_for_each_device(int (*fn)(struct se_device *dev, void *data), void *data)
{ struct devices_idr_iter iter = { .fn = fn, .data = data }; int ret;
mutex_lock(&device_mutex);
ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
mutex_unlock(&device_mutex); return ret;
}
int target_configure_device(struct se_device *dev)
{ struct se_hba *hba = dev->se_hba; int ret, id;
if (target_dev_configured(dev)) {
pr_err("se_dev->se_dev_ptr already set for storage" " object\n"); return -EEXIST;
}
/* * Add early so modules like tcmu can use during its * configuration.
*/
mutex_lock(&device_mutex); /* * Use cyclic to try and avoid collisions with devices * that were recently removed.
*/
id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
mutex_unlock(&device_mutex); if (id < 0) {
ret = -ENOMEM; goto out;
}
dev->dev_index = id;
ret = dev->transport->configure_device(dev); if (ret) goto out_free_index;
if (dev->transport->configure_unmap &&
dev->transport->configure_unmap(dev)) {
pr_debug("Discard support available, but disabled by default.\n");
}
/* * XXX: there is not much point to have two different values here..
*/
dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
/* * Align max_hw_sectors down to PAGE_SIZE I/O transfers
*/
dev->dev_attrib.hw_max_sectors =
se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
dev->dev_attrib.hw_block_size);
dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
dev->creation_time = get_jiffies_64();
ret = core_setup_alua(dev); if (ret) goto out_destroy_device;
/* * Setup work_queue for QUEUE_FULL
*/
INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
if (g_lun0_dev)
target_free_device(g_lun0_dev);
core_delete_hba(hba);
}
/* * Common CDB parsing for kernel and user passthrough.
*/
sense_reason_t
passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
{ unsignedchar *cdb = cmd->t_task_cdb; struct se_device *dev = cmd->se_dev; unsignedint size;
/* * For REPORT LUNS we always need to emulate the response, for everything * else, pass it up.
*/ if (cdb[0] == REPORT_LUNS) {
cmd->execute_cmd = spc_emulate_report_luns; return TCM_NO_SENSE;
}
/* * With emulate_pr disabled, all reservation requests should fail, * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
*/ if (!dev->dev_attrib.emulate_pr &&
((cdb[0] == PERSISTENT_RESERVE_IN) ||
(cdb[0] == PERSISTENT_RESERVE_OUT) ||
(cdb[0] == RELEASE_6 || cdb[0] == RELEASE_10) ||
(cdb[0] == RESERVE_6 || cdb[0] == RESERVE_10))) { return TCM_UNSUPPORTED_SCSI_OPCODE;
}
/* * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to * emulate the response, since tcmu does not have the information * required to process these commands.
*/ if (!(dev->transport_flags &
TRANSPORT_FLAG_PASSTHROUGH_PGR)) { if (cdb[0] == PERSISTENT_RESERVE_IN) {
cmd->execute_cmd = target_scsi3_emulate_pr_in;
size = get_unaligned_be16(&cdb[7]); return target_cmd_size_check(cmd, size);
} if (cdb[0] == PERSISTENT_RESERVE_OUT) {
cmd->execute_cmd = target_scsi3_emulate_pr_out;
size = get_unaligned_be32(&cdb[5]); return target_cmd_size_check(cmd, size);
}
/* Set DATA_CDB flag for ops that should have it */ switch (cdb[0]) { case READ_6: case READ_10: case READ_12: case READ_16: case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: case WRITE_VERIFY: case WRITE_VERIFY_12: case WRITE_VERIFY_16: case COMPARE_AND_WRITE: case XDWRITEREAD_10:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; break; case VARIABLE_LENGTH_CMD: switch (get_unaligned_be16(&cdb[8])) { case READ_32: case WRITE_32: case WRITE_VERIFY_32: case XDWRITEREAD_32:
cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; break;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.