void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
{ /* * Drivers like loop do not call target_stop_session during session * shutdown so we have to drop the ref taken at init time here.
*/ if (!atomic_read(&cmd_cnt->stopped))
percpu_ref_put(&cmd_cnt->refcnt);
/** * transport_alloc_session_tags - allocate target driver private data * @se_sess: Session pointer. * @tag_num: Maximum number of in-flight commands between initiator and target. * @tag_size: Size in bytes of the private data a target driver associates with * each command.
*/ int transport_alloc_session_tags(struct se_session *se_sess, unsignedint tag_num, unsignedint tag_size)
{ int rc;
se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!se_sess->sess_cmd_map) {
pr_err("Unable to allocate se_sess->sess_cmd_map\n"); return -ENOMEM;
}
/** * transport_init_session_tags - allocate a session and target driver private data * @tag_num: Maximum number of in-flight commands between initiator and target. * @tag_size: Size in bytes of the private data a target driver associates with * each command. * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
*/ staticstruct se_session *
transport_init_session_tags(unsignedint tag_num, unsignedint tag_size, enum target_prot_op sup_prot_ops)
{ struct se_session *se_sess; int rc;
if (tag_num != 0 && !tag_size) {
pr_err("init_session_tags called with percpu-ida tag_num:" " %u, but zero tag_size\n", tag_num); return ERR_PTR(-EINVAL);
} if (!tag_num && tag_size) {
pr_err("init_session_tags called with percpu-ida tag_size:" " %u, but zero tag_num\n", tag_size); return ERR_PTR(-EINVAL);
}
se_sess = transport_alloc_session(sup_prot_ops); if (IS_ERR(se_sess)) return se_sess;
se_sess->se_tpg = se_tpg;
se_sess->fabric_sess_ptr = fabric_sess_ptr; /* * Used by struct se_node_acl's under ConfigFS to locate active se_session-t * * Only set for struct se_session's that will actually be moving I/O. * eg: *NOT* discovery sessions.
*/ if (se_nacl) { /* * * Determine if fabric allows for T10-PI feature bits exposed to * initiators for device backends with !dev->dev_attrib.pi_prot_type. * * If so, then always save prot_type on a per se_node_acl node * basis and re-instate the previous sess_prot_type to avoid * disabling PI from below any previously initiator side * registered LUNs.
*/ if (se_nacl->saved_prot_type)
se_sess->sess_prot_type = se_nacl->saved_prot_type; elseif (tfo->tpg_check_prot_fabric_only)
se_sess->sess_prot_type = se_nacl->saved_prot_type =
tfo->tpg_check_prot_fabric_only(se_tpg); /* * If the fabric module supports an ISID based TransportID, * save this value in binary from the fabric I_T Nexus now.
*/ if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
memset(&buf[0], 0, PR_REG_ISID_LEN);
se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
&buf[0], PR_REG_ISID_LEN);
se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
}
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); /* * The se_nacl->nacl_sess pointer will be set to the * last active I_T Nexus for each struct se_node_acl.
*/
se_nacl->nacl_sess = se_sess;
cmd_cnt = target_alloc_cmd_counter(); if (!cmd_cnt) return ERR_PTR(-ENOMEM); /* * If the fabric driver is using percpu-ida based pre allocation * of I/O descriptor tags, go ahead and perform that setup now..
*/ if (tag_num != 0)
sess = transport_init_session_tags(tag_num, tag_size, prot_op); else
sess = transport_alloc_session(prot_op);
spin_lock_bh(&se_tpg->session_lock);
list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { if (!se_sess->se_node_acl) continue; if (!se_sess->se_node_acl->dynamic_node_acl) continue; if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) break;
len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
se_sess->se_node_acl->initiatorname);
len += 1; /* Include NULL terminator */
}
spin_unlock_bh(&se_tpg->session_lock);
void transport_deregister_session_configfs(struct se_session *se_sess)
{ struct se_node_acl *se_nacl; unsignedlong flags; /* * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
*/
se_nacl = se_sess->se_node_acl; if (se_nacl) {
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); if (!list_empty(&se_sess->sess_acl_list))
list_del_init(&se_sess->sess_acl_list); /* * If the session list is empty, then clear the pointer. * Otherwise, set the struct se_session pointer from the tail * element of the per struct se_node_acl active session list.
*/ if (list_empty(&se_nacl->acl_sess_list))
se_nacl->nacl_sess = NULL; else {
se_nacl->nacl_sess = container_of(
se_nacl->acl_sess_list.prev, struct se_session, sess_acl_list);
}
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
}
}
EXPORT_SYMBOL(transport_deregister_session_configfs);
/* * Drop the se_node_acl->nacl_kref obtained from within * core_tpg_get_initiator_node_acl().
*/ if (se_nacl) { struct se_portal_group *se_tpg = se_nacl->se_tpg; conststruct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; unsignedlong flags;
se_sess->se_node_acl = NULL;
/* * Also determine if we need to drop the extra ->cmd_kref if * it had been previously dynamically generated, and * the endpoint is not caching dynamic ACLs.
*/
mutex_lock(&se_tpg->acl_node_mutex); if (se_nacl->dynamic_node_acl &&
!se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); if (list_empty(&se_nacl->acl_sess_list))
se_nacl->dynamic_stop = true;
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
if (se_nacl->dynamic_stop)
list_del_init(&se_nacl->acl_list);
}
mutex_unlock(&se_tpg->acl_node_mutex);
if (se_nacl->dynamic_stop)
target_put_nacl(se_nacl);
target_put_nacl(se_nacl);
} if (se_sess->sess_cmd_map) {
sbitmap_queue_free(&se_sess->sess_tag_pool);
kvfree(se_sess->sess_cmd_map);
} if (se_sess->cmd_cnt)
target_free_cmd_counter(se_sess->cmd_cnt);
kmem_cache_free(se_sess_cache, se_sess);
}
EXPORT_SYMBOL(transport_free_session);
/* * Since the session is being removed, release SPC-2 * reservations held by the session that is disappearing.
*/
target_for_each_device(target_release_res, se_sess);
pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
se_tpg->se_tpg_tfo->fabric_name); /* * If last kref is dropping now for an explicit NodeACL, awake sleeping * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group * removal context from within transport_free_session() code. * * For dynamic ACL, target_put_nacl() uses target_complete_nacl() * to release all remaining generate_node_acl=1 created ACL resources.
*/
if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
dev = cmd->se_tmr_req->tmr_dev;
if (dev) {
spin_lock_irqsave(&dev->se_tmr_lock, flags); if (cmd->se_tmr_req->tmr_dev)
list_del_init(&cmd->se_tmr_req->tmr_list);
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
}
} /* * This function is called by the target core after the target core has * finished processing a SCSI command or SCSI TMF. Both the regular command * processing code and the code for aborting commands can call this * function. CMD_T_STOP is set if and only if another thread is waiting * inside transport_wait_for_tasks() for t_transport_stop_comp.
*/ staticint transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
{ unsignedlong flags;
spin_lock_irqsave(&cmd->t_state_lock, flags); /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions.
*/ if (cmd->transport_state & CMD_T_STOP) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
__func__, __LINE__, cmd->tag);
/* * Some fabric modules like tcm_loop can release their internally * allocated I/O reference and struct se_cmd now. * * Fabric modules are expected to return '1' here if the se_cmd being * passed is released at this point, or zero if not being released.
*/ return cmd->se_tfo->check_stop_free(cmd);
}
/* * Used when asking transport to copy Sense Data from the underlying * Linux/SCSI struct scsi_cmnd
*/ staticunsignedchar *transport_get_sense_buffer(struct se_cmd *cmd)
{ struct se_device *dev = cmd->se_dev;
WARN_ON(!cmd->se_lun);
if (!dev) return NULL;
if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) return NULL;
cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); return cmd->sense_buffer;
}
if (tas) { if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
cmd->t_task_cdb[0], cmd->tag);
trace_target_cmd_complete(cmd);
ret = cmd->se_tfo->queue_status(cmd); if (ret) {
transport_handle_queue_full(cmd, cmd->se_dev,
ret, false); return;
}
} else {
cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
cmd->se_tfo->queue_tm_rsp(cmd);
}
} else { /* * Allow the fabric driver to unmap any resources before * releasing the descriptor via TFO->release_cmd().
*/
cmd->se_tfo->aborted_task(cmd); if (ack_kref)
WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); /* * To do: establish a unit attention condition on the I_T * nexus associated with cmd. See also the paragraph "Aborting * commands" in SAM.
*/
}
staticbool target_cmd_interrupted(struct se_cmd *cmd)
{ int post_ret;
if (cmd->transport_state & CMD_T_ABORTED) { if (cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false, &post_ret);
INIT_WORK(&cmd->work, target_abort_work);
queue_work(target_completion_wq, &cmd->work); returntrue;
} elseif (cmd->transport_state & CMD_T_STOP) { if (cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false, &post_ret);
complete_all(&cmd->t_transport_stop_comp); returntrue;
}
returnfalse;
}
/* May be called from interrupt context so must not sleep. */ void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
sense_reason_t sense_reason)
{ struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn; int success, cpu; unsignedlong flags;
int transport_dump_vpd_ident_type( struct t10_vpd *vpd, unsignedchar *p_buf, int p_buf_len)
{ unsignedchar buf[VPD_TMP_BUF_SIZE]; int ret = 0; int len;
if (p_buf)
strncpy(p_buf, buf, p_buf_len); else
pr_debug("%s", buf);
return ret;
}
int
transport_set_vpd_ident(struct t10_vpd *vpd, unsignedchar *page_83)
{ staticconstchar hex_str[] = "0123456789abcdef"; int j = 0, i = 4; /* offset to start of the identifier */
/* * The VPD Code Set (encoding) * * from spc3r23.pdf Section 7.6.3.1 Table 296
*/
vpd->device_identifier_code_set = (page_83[0] & 0x0f); switch (vpd->device_identifier_code_set) { case 0x01: /* Binary */
vpd->device_identifier[j++] =
hex_str[vpd->device_identifier_type]; while (i < (4 + page_83[3])) {
vpd->device_identifier[j++] =
hex_str[(page_83[i] & 0xf0) >> 4];
vpd->device_identifier[j++] =
hex_str[page_83[i] & 0x0f];
i++;
} break; case 0x02: /* ASCII */ case 0x03: /* UTF-8 */ while (i < (4 + page_83[3]))
vpd->device_identifier[j++] = page_83[i++]; break; default: break;
}
if (!cmd->se_tfo->max_data_sg_nents) return TCM_NO_SENSE; /* * Check if fabric enforced maximum SGL entries per I/O descriptor * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + * residual_count and reduce original cmd->data_length to maximum * length based on single PAGE_SIZE entry scatter-lists.
*/
mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); if (cmd->data_length > mtl) { /* * If an existing CDB overflow is present, calculate new residual * based on CDB size minus fabric maximum transfer length. * * If an existing CDB underflow is present, calculate new residual * based on original cmd->data_length minus fabric maximum transfer * length. * * Otherwise, set the underflow residual based on cmd->data_length * minus fabric maximum transfer length.
*/ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
cmd->residual_count = (size - mtl);
} elseif (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
u32 orig_dl = size + cmd->residual_count;
cmd->residual_count = (orig_dl - mtl);
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = (cmd->data_length - mtl);
}
cmd->data_length = mtl; /* * Reset sbc_check_prot() calculated protection payload * length based upon the new smaller MTL.
*/ if (cmd->prot_length) {
u32 sectors = (mtl / dev->dev_attrib.block_size);
cmd->prot_length = dev->prot_length * sectors;
}
} return TCM_NO_SENSE;
}
/** * target_cmd_size_check - Check whether there will be a residual. * @cmd: SCSI command. * @size: Data buffer size derived from CDB. The data buffer size provided by * the SCSI transport driver is available in @cmd->data_length. * * Compare the data buffer size from the CDB with the data buffer limit from the transport * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary. * * Note: target drivers set @cmd->data_length by calling __target_init_cmd(). * * Return: TCM_NO_SENSE
*/
sense_reason_t
target_cmd_size_check(struct se_cmd *cmd, unsignedint size)
{ struct se_device *dev = cmd->se_dev;
if (cmd->unknown_data_length) {
cmd->data_length = size;
} elseif (size != cmd->data_length) {
pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" " %u does not match SCSI CDB Length: %u for SAM Opcode:" " 0x%02x\n", cmd->se_tfo->fabric_name,
cmd->data_length, size, cmd->t_task_cdb[0]); /* * For READ command for the overflow case keep the existing * fabric provided ->data_length. Otherwise for the underflow * case, reset ->data_length to the smaller SCSI expected data * transfer length.
*/ if (size > cmd->data_length) {
cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
cmd->residual_count = (size - cmd->data_length);
} else {
cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
cmd->residual_count = (cmd->data_length - size); /* * Do not truncate ->data_length for WRITE command to * dump all payload
*/ if (cmd->data_direction == DMA_FROM_DEVICE) {
cmd->data_length = size;
}
}
if (cmd->data_direction == DMA_TO_DEVICE) { if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
pr_err_ratelimited("Rejecting underflow/overflow" " for WRITE data CDB\n"); return TCM_INVALID_FIELD_IN_COMMAND_IU;
} /* * Some fabric drivers like iscsi-target still expect to * always reject overflow writes. Reject this case until * full fabric driver level support for overflow writes * is introduced tree-wide.
*/ if (size > cmd->data_length) {
pr_err_ratelimited("Rejecting overflow for" " WRITE control CDB\n"); return TCM_INVALID_CDB_FIELD;
}
}
}
/* * Check if SAM Task Attribute emulation is enabled for this * struct se_device storage object
*/ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0;
if (cmd->sam_task_attr == TCM_ACA_TAG) {
pr_debug("SAM Task Attribute ACA" " emulation is not supported\n"); return TCM_INVALID_CDB_FIELD;
}
/* * Ensure that the received CDB is less than the max (252 + 8) bytes * for VARIABLE_LENGTH_CMD
*/ if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
pr_err("Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
ret = TCM_INVALID_CDB_FIELD; goto err;
} /* * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, * allocate the additional extended CDB buffer now.. Otherwise * setup the pointer from __t_task_cdb to t_task_cdb.
*/ if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); if (!cmd->t_task_cdb) {
pr_err("Unable to allocate cmd->t_task_cdb" " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsignedlong)sizeof(cmd->__t_task_cdb));
ret = TCM_OUT_OF_RESOURCES; goto err;
}
} /* * Copy the original CDB into cmd->
*/
memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
trace_target_sequencer_start(cmd); return 0;
err: /* * Copy the CDB here to allow trace_target_cmd_complete() to * print the cdb to the trace buffers.
*/
memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
(unsignedint)TCM_MAX_COMMAND_SIZE)); return ret;
}
EXPORT_SYMBOL(target_cmd_init_cdb);
/* * Check if we need to delay processing because of ALUA * Active/NonOptimized primary access state..
*/
core_alua_check_nonop_delay(cmd);
if (cmd->t_data_nents != 0) { /* * This is primarily a hack for udev and tcm loop which sends * INQUIRYs with a single page and expects the data to be * cleared.
*/ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
cmd->data_direction == DMA_FROM_DEVICE) { struct scatterlist *sgl = cmd->t_data_sg; unsignedchar *buf = NULL;
if (!cmd->se_lun) {
dump_stack();
pr_err("cmd->se_lun is NULL\n"); return -EINVAL;
}
/* * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that * outstanding descriptors are handled correctly during shutdown via * transport_wait_for_tasks() * * Also, we don't take cmd->t_state_lock here as we only expect * this to be called for initial descriptor submission.
*/
cmd->t_state = TRANSPORT_NEW_CMD;
cmd->transport_state |= CMD_T_ACTIVE;
/* * transport_generic_new_cmd() is already handling QUEUE_FULL, * so follow TRANSPORT_NEW_CMD processing thread context usage * and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd); if (ret)
transport_generic_request_failure(cmd, ret); return 0;
}
/* * Reject SCSI data overflow with map_mem_to_cmd() as incoming * scatterlists already have been set to follow what the fabric * passes for the original expected data transfer length.
*/ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
pr_warn("Rejecting SCSI DATA overflow for fabric using" " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); return TCM_INVALID_CDB_FIELD;
}
/** * target_init_cmd - initialize se_cmd * @se_cmd: command descriptor to init * @se_sess: associated se_sess for endpoint * @sense: pointer to SCSI sense buffer * @unpacked_lun: unpacked LUN to reference for struct se_lun * @data_length: fabric expected data transfer length * @task_attr: SAM task attribute * @data_dir: DMA data direction * @flags: flags for command submission from target_sc_flags_tables * * Task tags are supported if the caller has set @se_cmd->tag. * * Returns: * - less than zero to signal active I/O shutdown failure. * - zero on success. * * If the fabric driver calls target_stop_session, then it must check the * return code and handle failures. This will never fail for other drivers, * and the return code can be ignored.
*/ int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, unsignedchar *sense, u64 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags)
{ struct se_portal_group *se_tpg;
if (flags & TARGET_SCF_USE_CPUID)
se_cmd->se_cmd_flags |= SCF_USE_CPUID; /* * Signal bidirectional data payloads to target-core
*/ if (flags & TARGET_SCF_BIDI_OP)
se_cmd->se_cmd_flags |= SCF_BIDI;
if (flags & TARGET_SCF_UNKNOWN_SIZE)
se_cmd->unknown_data_length = 1; /* * Initialize se_cmd for target operation. From this point * exceptions are handled by sending exception status via * target_core_fabric_ops->queue_status() callback
*/
__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
data_dir, task_attr, sense, unpacked_lun,
se_sess->cmd_cnt);
/* * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second * kref_put() to happen during fabric packet acknowledgement.
*/ return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
}
EXPORT_SYMBOL_GPL(target_init_cmd);
/** * target_submit_prep - prepare cmd for submission * @se_cmd: command descriptor to prep * @cdb: pointer to SCSI CDB * @sgl: struct scatterlist memory for unidirectional mapping * @sgl_count: scatterlist count for unidirectional mapping * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping * @sgl_bidi_count: scatterlist count for bidirectional READ mapping * @sgl_prot: struct scatterlist memory protection information * @sgl_prot_count: scatterlist count for protection information * @gfp: gfp allocation type * * Returns: * - less than zero to signal failure. * - zero on success. * * If failure is returned, lio will the callers queue_status to complete * the cmd.
*/ int target_submit_prep(struct se_cmd *se_cmd, unsignedchar *cdb, struct scatterlist *sgl, u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count, struct scatterlist *sgl_prot, u32 sgl_prot_count,
gfp_t gfp)
{
sense_reason_t rc;
rc = target_cmd_init_cdb(se_cmd, cdb, gfp); if (rc) goto send_cc_direct;
/* * Locate se_lun pointer and attach it to struct se_cmd
*/
rc = transport_lookup_cmd_lun(se_cmd); if (rc) goto send_cc_direct;
rc = target_cmd_parse_cdb(se_cmd); if (rc != 0) goto generic_fail;
/* * Save pointers for SGLs containing protection information, * if present.
*/ if (sgl_prot_count) {
se_cmd->t_prot_sg = sgl_prot;
se_cmd->t_prot_nents = sgl_prot_count;
se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
}
/* * When a non zero sgl_count has been passed perform SGL passthrough * mapping for pre-allocated fabric memory instead of having target * core perform an internal SGL allocation..
*/ if (sgl_count != 0) {
BUG_ON(!sgl);
/** * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd * * @se_cmd: command descriptor to submit * @se_sess: associated se_sess for endpoint * @cdb: pointer to SCSI CDB * @sense: pointer to SCSI sense buffer * @unpacked_lun: unpacked LUN to reference for struct se_lun * @data_length: fabric expected data transfer length * @task_attr: SAM task attribute * @data_dir: DMA data direction * @flags: flags for command submission from target_sc_flags_tables * * Task tags are supported if the caller has set @se_cmd->tag. * * This may only be called from process context, and also currently * assumes internal allocation of fabric payload buffer by target-core. * * It also assumes interal target core SGL memory allocation. * * This function must only be used by drivers that do their own * sync during shutdown and does not use target_stop_session. If there * is a failure this function will call into the fabric driver's * queue_status with a CHECK_CONDITION.
*/ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, unsignedchar *cdb, unsignedchar *sense, u64 unpacked_lun,
u32 data_length, int task_attr, int data_dir, int flags)
{ int rc;
rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length,
task_attr, data_dir, flags);
WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n"); if (rc) return;
se_plug = se_dev->transport->plug_device(se_dev); if (!se_plug) return NULL;
se_plug->se_dev = se_dev; /* * We have a ref to the lun at this point, but the cmds could * complete before we unplug, so grab a ref to the se_device so we * can call back into the backend.
*/
config_group_get(&se_dev->dev_group); return se_plug;
}
/** * target_queue_submission - queue the cmd to run on the LIO workqueue * @se_cmd: command descriptor to submit
*/ staticvoid target_queue_submission(struct se_cmd *se_cmd)
{ struct se_device *se_dev = se_cmd->se_dev; int cpu = se_cmd->cpuid; struct se_cmd_queue *sq;
/** * target_submit - perform final initialization and submit cmd to LIO core * @se_cmd: command descriptor to submit * * target_submit_prep or something similar must have been called on the cmd, * and this must be called from process context.
*/ int target_submit(struct se_cmd *se_cmd)
{ conststruct target_core_fabric_ops *tfo = se_cmd->se_sess->se_tpg->se_tpg_tfo; struct se_dev_attrib *da = &se_cmd->se_dev->dev_attrib;
u8 submit_type;
/** * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd * for TMR CDBs * * @se_cmd: command descriptor to submit * @se_sess: associated se_sess for endpoint * @sense: pointer to SCSI sense buffer * @unpacked_lun: unpacked LUN to reference for struct se_lun * @fabric_tmr_ptr: fabric context for TMR req * @tm_type: Type of TM request * @gfp: gfp type for caller * @tag: referenced task tag for TMR_ABORT_TASK * @flags: submit cmd flags * * Callable from all contexts.
**/
int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, unsignedchar *sense, u64 unpacked_lun, void *fabric_tmr_ptr, unsignedchar tm_type,
gfp_t gfp, u64 tag, int flags)
{ struct se_portal_group *se_tpg; int ret;
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun,
se_sess->cmd_cnt); /* * FIXME: Currently expect caller to handle se_cmd->se_tmr_req * allocation failure.
*/
ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); if (ret < 0) return -ENOMEM;
if (tm_type == TMR_ABORT_TASK)
se_cmd->se_tmr_req->ref_task_tag = tag;
/* See target_submit_cmd for commentary */
ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); if (ret) {
core_tmr_release_req(se_cmd->se_tmr_req); return ret;
}
ret = transport_lookup_tmr_lun(se_cmd); if (ret) goto failure;
transport_generic_handle_tmr(se_cmd); return 0;
/* * For callback during failure handling, push this work off * to process context with TMR_LUN_DOES_NOT_EXIST status.
*/
failure:
INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
schedule_work(&se_cmd->work); return 0;
}
EXPORT_SYMBOL(target_submit_tmr);
/* * Handle SAM-esque emulation for generic transport request failures.
*/ void transport_generic_request_failure(struct se_cmd *cmd,
sense_reason_t sense_reason)
{ int ret = 0, post_ret;
switch (sense_reason) { case TCM_NON_EXISTENT_LUN: case TCM_UNSUPPORTED_SCSI_OPCODE: case TCM_INVALID_CDB_FIELD: case TCM_INVALID_PARAMETER_LIST: case TCM_PARAMETER_LIST_LENGTH_ERROR: case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: case TCM_UNKNOWN_MODE_PAGE: case TCM_WRITE_PROTECTED: case TCM_ADDRESS_OUT_OF_RANGE: case TCM_CHECK_CONDITION_ABORT_CMD: case TCM_CHECK_CONDITION_UNIT_ATTENTION: case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: case TCM_TOO_MANY_TARGET_DESCS: case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: case TCM_TOO_MANY_SEGMENT_DESCS: case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: case TCM_INVALID_FIELD_IN_COMMAND_IU: case TCM_ALUA_TG_PT_STANDBY: case TCM_ALUA_TG_PT_UNAVAILABLE: case TCM_ALUA_STATE_TRANSITION: case TCM_ALUA_OFFLINE: break; case TCM_OUT_OF_RESOURCES:
cmd->scsi_status = SAM_STAT_TASK_SET_FULL; goto queue_status; case TCM_LUN_BUSY:
cmd->scsi_status = SAM_STAT_BUSY; goto queue_status; case TCM_RESERVATION_CONFLICT: /* * No SENSE Data payload for this case, set SCSI Status * and queue the response to $FABRIC_MOD. * * Uses linux/include/scsi/scsi.h SAM status codes defs
*/
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; /* * For UA Interlock Code 11b, a RESERVATION CONFLICT will * establish a UNIT ATTENTION with PREVIOUS RESERVATION * CONFLICT STATUS. * * See spc4r17, section 7.4.6 Control Mode Page, Table 349
*/ if (cmd->se_sess &&
cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
== TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
target_ua_allocate_lun(cmd->se_sess->se_node_acl,
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
}
goto queue_status; default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
cmd->t_task_cdb[0], sense_reason);
sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break;
}
ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); if (ret) goto queue_full;
if (!cmd->execute_cmd) {
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto err;
} if (do_checks) { /* * Check for an existing UNIT ATTENTION condition after * target_handle_task_attr() has done SAM task attr * checking, and possibly have already defered execution * out to target_restart_delayed_cmds() context.
*/
ret = target_scsi3_ua_check(cmd); if (ret) goto err;
ret = target_alua_state_check(cmd); if (ret) goto err;
ret = target_check_reservation(cmd); if (ret) {
cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; goto err;
}
}
ret = cmd->execute_cmd(cmd); if (!ret) return;
err:
spin_lock_irq(&cmd->t_state_lock);
cmd->transport_state &= ~CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
transport_generic_request_failure(cmd, ret);
}
staticint target_write_prot_action(struct se_cmd *cmd)
{
u32 sectors; /* * Perform WRITE_INSERT of PI using software emulation when backend * device has PI enabled, if the transport has not already generated * PI using hardware WRITE_INSERT offload.
*/ switch (cmd->prot_op) { case TARGET_PROT_DOUT_INSERT: if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
sbc_dif_generate(cmd); break; case TARGET_PROT_DOUT_STRIP: if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) break;
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) returnfalse;
cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
/* * Check for the existence of HEAD_OF_QUEUE, and if true return 1 * to allow the passed struct se_cmd list of tasks to the front of the list.
*/ switch (cmd->sam_task_attr) { case TCM_HEAD_TAG:
pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
cmd->t_task_cdb[0]); returnfalse; case TCM_ORDERED_TAG:
pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
cmd->t_task_cdb[0]); break; default: /* * For SIMPLE and UNTAGGED Task Attribute commands
*/
retry: if (percpu_ref_tryget_live(&dev->non_ordered)) returnfalse;
break;
}
spin_lock_irqsave(&dev->delayed_cmd_lock, flags); if (cmd->sam_task_attr == TCM_SIMPLE_TAG &&
!percpu_ref_is_dying(&dev->non_ordered)) {
spin_unlock_irqrestore(&dev->delayed_cmd_lock, flags); /* We raced with the last ordered completion so retry. */ goto retry;
} elseif (!percpu_ref_is_dying(&dev->non_ordered)) {
percpu_ref_kill(&dev->non_ordered);
}
pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
cmd->t_task_cdb[0], cmd->sam_task_attr); /* * We may have no non ordered cmds when this function started or we * could have raced with the last simple/head cmd completing, so kick * the delayed handler here.
*/
schedule_work(&dev->delayed_cmd_work); returntrue;
}
void target_execute_cmd(struct se_cmd *cmd)
{ /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. * * If the received CDB has already been aborted stop processing it here.
*/ if (target_cmd_interrupted(cmd)) return;
/* * Process all commands up to the last received ORDERED task attribute which * requires another blocking boundary
*/ void target_do_delayed_work(struct work_struct *work)
{ struct se_device *dev = container_of(work, struct se_device,
delayed_cmd_work);
spin_lock(&dev->delayed_cmd_lock); while (!dev->ordered_sync_in_progress) { struct se_cmd *cmd;
/* * We can be woken up early/late due to races or the * extra wake up we do when adding commands to the list. * We check for both cases here.
*/ if (list_empty(&dev->delayed_cmd_list) ||
!percpu_ref_is_zero(&dev->non_ordered)) break;
/* * Called from I/O completion to determine which dormant/delayed * and ordered cmds need to have their tasks added to the execution queue.
*/ staticvoid transport_complete_task_attr(struct se_cmd *cmd)
{ struct se_device *dev = cmd->se_dev;
if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return;
if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) return;
cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
if (cmd->se_cmd_flags & SCF_TASK_ORDERED_SYNC) {
transport_complete_ordered_sync(cmd); return;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.