dev = rcu_dereference_check(lun->lun_se_dev,
lockdep_is_held(&tpg->tpg_lun_mutex)); /* * By default in LIO-Target $FABRIC_MOD, * demo_mode_write_protect is ON, or READ_ONLY;
*/ if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
lun_access_ro = false;
} else { /* * Allow only optical drives to issue R/W in default RO * demo mode.
*/ if (dev->transport->get_device_type(dev) == TYPE_DISK)
lun_access_ro = true; else
lun_access_ro = false;
}
pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s" " access for LUN in Demo Mode\n",
tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
lun_access_ro ? "READ-ONLY" : "READ-WRITE");
core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
lun_access_ro, acl, tpg); /* * Check to see if there are any existing persistent reservation * APTPL pre-registrations that need to be enabled for this dynamic * LUN ACL now..
*/
core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
lun->unpacked_lun);
}
mutex_unlock(&tpg->tpg_lun_mutex);
}
acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); if (acl) return acl;
if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) return NULL;
acl = target_alloc_node_acl(tpg, initiatorname); if (!acl) return NULL; /* * When allocating a dynamically generated node_acl, go ahead * and take the extra kref now before returning to the fabric * driver caller. * * Note this reference will be released at session shutdown * time within transport_free_session() code.
*/
kref_get(&acl->acl_kref);
acl->dynamic_node_acl = 1;
/* * Here we only create demo-mode MappedLUNs from the active * TPG LUNs if the fabric is not explicitly asking for * tpg_check_demo_mode_login_only() == 1.
*/ if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
(tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
core_tpg_add_node_to_devs(acl, tpg, NULL);
mutex_lock(&tpg->acl_node_mutex); if (acl->dynamic_node_acl)
acl->dynamic_node_acl = 0;
list_del_init(&acl->acl_list);
mutex_unlock(&tpg->acl_node_mutex);
target_shutdown_sessions(acl);
target_put_nacl(acl); /* * Wait for last target_put_nacl() to complete in target_complete_nacl() * for active fabric session transport_deregister_session() callbacks.
*/
wait_for_completion(&acl->acl_free_comp);
/* * Allow the setting of se_node_acl queue_depth to be idempotent, * and not force a session shutdown event if the value is not * changing.
*/ if (acl->queue_depth == queue_depth) return 0; /* * User has requested to change the queue depth for a Initiator Node. * Change the value in the Node's struct se_node_acl, and call * target_set_nacl_queue_depth() to set the new queue depth.
*/
target_set_nacl_queue_depth(tpg, acl, queue_depth);
/* * Shutdown all pending sessions to force session reinstatement.
*/
target_shutdown_sessions(acl);
pr_debug("Successfully changed queue depth to: %d for Initiator" " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
acl->initiatorname, tpg->se_tpg_tfo->fabric_name,
tpg->se_tpg_tfo->tpg_get_tag(tpg));
/* core_tpg_set_initiator_node_tag(): * * Initiator nodeacl tags are not used internally, but may be used by * userspace to emulate aliases or groups. * Returns length of newly-set tag or -EINVAL.
*/ int core_tpg_set_initiator_node_tag( struct se_portal_group *tpg, struct se_node_acl *acl, constchar *new_tag)
{ if (strlen(new_tag) >= MAX_ACL_TAG_SIZE) return -EINVAL;
staticint target_tpg_register_rtpi(struct se_portal_group *se_tpg)
{
u32 val; int ret;
if (se_tpg->rtpi_manual) {
ret = xa_insert(&tpg_xa, se_tpg->tpg_rtpi, se_tpg, GFP_KERNEL); if (ret) {
pr_info("%s_TPG[%hu] - Can not set RTPI %#x, it is already busy",
se_tpg->se_tpg_tfo->fabric_name,
se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
se_tpg->tpg_rtpi); return -EINVAL;
}
} else {
ret = xa_alloc(&tpg_xa, &val, se_tpg,
XA_LIMIT(1, USHRT_MAX), GFP_KERNEL); if (!ret)
se_tpg->tpg_rtpi = val;
}
int target_tpg_enable(struct se_portal_group *se_tpg)
{ int ret;
ret = target_tpg_register_rtpi(se_tpg); if (ret) return ret;
ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, true); if (ret) {
target_tpg_deregister_rtpi(se_tpg); return ret;
}
se_tpg->enabled = true;
return 0;
}
int target_tpg_disable(struct se_portal_group *se_tpg)
{ int ret;
target_tpg_deregister_rtpi(se_tpg);
ret = se_tpg->se_tpg_tfo->fabric_enable_tpg(se_tpg, false); if (!ret)
se_tpg->enabled = false;
return ret;
}
/* Does not change se_wwn->priv. */ int core_tpg_register( struct se_wwn *se_wwn, struct se_portal_group *se_tpg, int proto_id)
{ int ret;
if (!se_tpg) return -EINVAL; /* * For the typical case where core_tpg_register() is called by a * fabric driver from target_core_fabric_ops->fabric_make_tpg() * configfs context, use the original tf_ops pointer already saved * by target-core in target_fabric_make_wwn(). * * Otherwise, for special cases like iscsi-target discovery TPGs * the caller is responsible for setting ->se_tpg_tfo ahead of * calling core_tpg_register().
*/ if (se_wwn)
se_tpg->se_tpg_tfo = se_wwn->wwn_tf->tf_ops;
if (!se_tpg->se_tpg_tfo) {
pr_err("Unable to locate se_tpg->se_tpg_tfo pointer\n"); return -EINVAL;
}
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
mutex_lock(&se_tpg->acl_node_mutex);
list_splice_init(&se_tpg->acl_node_list, &node_list);
mutex_unlock(&se_tpg->acl_node_mutex); /* * Release any remaining demo-mode generated se_node_acl that have * not been released because of TFO->tpg_check_demo_mode_cache() == 1 * in transport_deregister_session().
*/
list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
list_del_init(&nacl->acl_list);
core_clear_lun_from_tpg(lun, tpg); /* * Wait for any active I/O references to percpu se_lun->lun_ref to * be released. Also, se_lun->lun_ref is now used by PR and ALUA * logic when referencing a remote target port during ALL_TGT_PT=1 * and generating UNIT_ATTENTIONs for ALUA access state transition.
*/
transport_clear_lun_ref(lun);
mutex_lock(&tpg->tpg_lun_mutex); if (lun->lun_se_dev) {
target_detach_tg_pt_gp(lun);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.