static uint optimize_stpg;
module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
/* * submit_stpg - Issue a SET TARGET PORT GROUP command * * Currently we're only setting the current target port group state * to 'active/optimized' and let the array firmware figure out * the states of the remaining groups.
*/ staticint submit_stpg(struct scsi_device *sdev, int group_id, struct scsi_sense_hdr *sshdr)
{
u8 cdb[MAX_COMMAND_SIZE]; unsignedchar stpg_data[8]; int stpg_len = 8;
blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; conststruct scsi_exec_args exec_args = {
.sshdr = sshdr,
};
/* Prepare the data buffer */
memset(stpg_data, 0, stpg_len);
stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL;
put_unaligned_be16(group_id, &stpg_data[6]);
if (!id_str || !id_size || !strlen(id_str)) return NULL;
list_for_each_entry(pg, &port_group_list, node) { if (pg->group_id != group_id) continue; if (!pg->device_id_len || pg->device_id_len != id_size) continue; if (strncmp(pg->device_id_str, id_str, id_size)) continue; if (!kref_get_unless_zero(&pg->kref)) continue; return pg;
}
return NULL;
}
/* * alua_alloc_pg - Allocate a new port_group structure * @sdev: scsi device * @group_id: port group id * @tpgs: target port group settings * * Allocate a new port_group structure for a given * device.
*/ staticstruct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, int group_id, int tpgs)
{ struct alua_port_group *pg, *tmp_pg;
pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL); if (!pg) return ERR_PTR(-ENOMEM);
/* * alua_check_tpgs - Evaluate TPGS setting * @sdev: device to be checked * * Examine the TPGS setting of the sdev to find out if ALUA * is supported.
*/ staticint alua_check_tpgs(struct scsi_device *sdev)
{ int tpgs = TPGS_MODE_NONE;
/* * ALUA support for non-disk devices is fraught with * difficulties, so disable it for now.
*/ if (sdev->type != TYPE_DISK) {
sdev_printk(KERN_INFO, sdev, "%s: disable for non-disk devices\n",
ALUA_DH_NAME); return tpgs;
}
/* * alua_check_vpd - Evaluate INQUIRY vpd page 0x83 * @sdev: device to be checked * * Extract the relative target port and the target port group * descriptor from the list of identificators.
*/ staticint alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, int tpgs)
{ int rel_port = -1, group_id; struct alua_port_group *pg, *old_pg = NULL; bool pg_updated = false; unsignedlong flags;
group_id = scsi_vpd_tpg_id(sdev, &rel_port); if (group_id < 0) { /* * Internal error; TPGS supported but required * VPD identification descriptors not present. * Disable ALUA support
*/
sdev_printk(KERN_INFO, sdev, "%s: No target port descriptors found\n",
ALUA_DH_NAME); return SCSI_DH_DEV_UNSUPP;
}
pg = alua_alloc_pg(sdev, group_id, tpgs); if (IS_ERR(pg)) { if (PTR_ERR(pg) == -ENOMEM) return SCSI_DH_NOMEM; return SCSI_DH_DEV_UNSUPP;
} if (pg->device_id_len)
sdev_printk(KERN_INFO, sdev, "%s: device %s port group %x rel port %x\n",
ALUA_DH_NAME, pg->device_id_str,
group_id, rel_port); else
sdev_printk(KERN_INFO, sdev, "%s: port group %x rel port %x\n",
ALUA_DH_NAME, group_id, rel_port);
kref_get(&pg->kref);
/* Check for existing port group references */
spin_lock(&h->pg_lock);
old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); if (old_pg != pg) { /* port group has changed. Update to new port group */ if (h->pg) {
spin_lock_irqsave(&old_pg->lock, flags);
list_del_rcu(&h->node);
spin_unlock_irqrestore(&old_pg->lock, flags);
}
rcu_assign_pointer(h->pg, pg);
pg_updated = true;
}
spin_lock_irqsave(&pg->lock, flags); if (pg_updated)
list_add_rcu(&h->node, &pg->dh_list);
spin_unlock_irqrestore(&pg->lock, flags);
if (old_pg)
kref_put(&old_pg->kref, release_port_group);
return SCSI_DH_OK;
}
staticchar print_alua_state(unsignedchar state)
{ switch (state) { case SCSI_ACCESS_STATE_OPTIMAL: return'A'; case SCSI_ACCESS_STATE_ACTIVE: return'N'; case SCSI_ACCESS_STATE_STANDBY: return'S'; case SCSI_ACCESS_STATE_UNAVAILABLE: return'U'; case SCSI_ACCESS_STATE_LBA: return'L'; case SCSI_ACCESS_STATE_OFFLINE: return'O'; case SCSI_ACCESS_STATE_TRANSITIONING: return'T'; default: return'X';
}
}
staticenum scsi_disposition alua_check_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr)
{ switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { /* * LUN Not Accessible - ALUA state transition
*/
alua_handle_state_transition(sdev); return NEEDS_RETRY;
} break; case UNIT_ATTENTION: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { /* * LUN Not Accessible - ALUA state transition
*/
alua_handle_state_transition(sdev); return NEEDS_RETRY;
} if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) { /* * Power On, Reset, or Bus Device Reset. * Might have obscured a state transition, * so schedule a recheck.
*/
alua_check(sdev, true); return ADD_TO_MLQUEUE;
} if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) /* * Device internal reset
*/ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) /* * Mode Parameters Changed
*/ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { /* * ALUA state changed
*/
alua_check(sdev, true); return ADD_TO_MLQUEUE;
} if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { /* * Implicit ALUA state transition failed
*/
alua_check(sdev, true); return ADD_TO_MLQUEUE;
} if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03) /* * Inquiry data has changed
*/ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) /* * REPORTED_LUNS_DATA_HAS_CHANGED is reported * when switching controllers on targets like * Intel Multi-Flex. We can just retry.
*/ return ADD_TO_MLQUEUE; break;
}
return SCSI_RETURN_NOT_HANDLED;
}
/* * alua_tur - Send a TEST UNIT READY * @sdev: device to which the TEST UNIT READY command should be send * * Send a TEST UNIT READY to @sdev to figure out the device state * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING, * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise.
*/ staticint alua_tur(struct scsi_device *sdev)
{ struct scsi_sense_hdr sense_hdr; int retval;
/* * alua_rtpg - Evaluate REPORT TARGET GROUP STATES * @sdev: the device to be evaluated. * * Evaluate the Target Port Group State. * Returns SCSI_DH_DEV_OFFLINED if the path is * found to be unusable.
*/ staticint alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
{ struct scsi_sense_hdr sense_hdr; struct alua_port_group *tmp_pg; int len, k, off, bufflen = ALUA_RTPG_SIZE; int group_id_old, state_old, pref_old, valid_states_old; unsignedchar *desc, *buff; unsigned err; int retval; unsignedint tpg_desc_tbl_off; unsignedchar orig_transition_tmo; unsignedlong flags; bool transitioning_sense = false;
if (retval) { /* * Some (broken) implementations have a habit of returning * an error during things like firmware update etc. * But if the target only supports active/optimized there's * not much we can do; it's not that we can switch paths * or anything. * So ignore any errors to avoid spurious failures during * path failover.
*/ if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
sdev_printk(KERN_INFO, sdev, "%s: ignoring rtpg result %d\n",
ALUA_DH_NAME, retval);
kfree(buff); return SCSI_DH_OK;
} if (retval < 0 || !scsi_sense_valid(&sense_hdr)) {
sdev_printk(KERN_INFO, sdev, "%s: rtpg failed, result %d\n",
ALUA_DH_NAME, retval);
kfree(buff); if (retval < 0) return SCSI_DH_DEV_TEMP_BUSY; if (host_byte(retval) == DID_NO_CONNECT) return SCSI_DH_RES_TEMP_UNAVAIL; return SCSI_DH_IO;
}
/* * submit_rtpg() has failed on existing arrays * when requesting extended header info, and * the array doesn't support extended headers, * even though it shouldn't according to T10. * The retry without rtpg_ext_hdr_req set * handles this. * Note: some arrays return a sense key of ILLEGAL_REQUEST * with ASC 00h if they don't support the extended header.
*/ if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
sense_hdr.sense_key == ILLEGAL_REQUEST) {
pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; goto retry;
} /* * If the array returns with 'ALUA state transition' * sense code here it cannot return RTPG data during * transition. So set the state to 'transitioning' directly.
*/ if (sense_hdr.sense_key == NOT_READY &&
sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
transitioning_sense = true; goto skip_rtpg;
} /* * Retry on any other UNIT ATTENTION occurred.
*/ if (sense_hdr.sense_key == UNIT_ATTENTION)
err = SCSI_DH_RETRY; if (err == SCSI_DH_RETRY &&
pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
ALUA_DH_NAME);
scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
kfree(buff); return err;
}
sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",
ALUA_DH_NAME);
scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
kfree(buff);
pg->expiry = 0; return SCSI_DH_IO;
}
switch (pg->state) { case SCSI_ACCESS_STATE_TRANSITIONING: if (time_before(jiffies, pg->expiry)) { /* State transition, retry */
pg->interval = ALUA_RTPG_RETRY_DELAY;
err = SCSI_DH_RETRY;
} else { struct alua_dh_data *h;
/* Transitioning time exceeded, set port to standby */
err = SCSI_DH_IO;
pg->state = SCSI_ACCESS_STATE_STANDBY;
pg->expiry = 0;
rcu_read_lock();
list_for_each_entry_rcu(h, &pg->dh_list, node) { if (!h->sdev) continue;
h->sdev->access_state =
(pg->state & SCSI_ACCESS_STATE_MASK); if (pg->pref)
h->sdev->access_state |=
SCSI_ACCESS_STATE_PREFERRED;
}
rcu_read_unlock();
} break; case SCSI_ACCESS_STATE_OFFLINE: /* Path unusable */
err = SCSI_DH_DEV_OFFLINED;
pg->expiry = 0; break; default: /* Useable path if active */
err = SCSI_DH_OK;
pg->expiry = 0; break;
}
spin_unlock_irqrestore(&pg->lock, flags);
kfree(buff); return err;
}
/* * alua_stpg - Issue a SET TARGET PORT GROUP command * * Issue a SET TARGET PORT GROUP command and evaluate the * response. Returns SCSI_DH_RETRY per default to trigger * a re-evaluation of the target group state or SCSI_DH_OK * if no further action needs to be taken.
*/ staticunsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
{ int retval; struct scsi_sense_hdr sense_hdr;
if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) { /* Only implicit ALUA supported, retry */ return SCSI_DH_RETRY;
} switch (pg->state) { case SCSI_ACCESS_STATE_OPTIMAL: return SCSI_DH_OK; case SCSI_ACCESS_STATE_ACTIVE: if ((pg->flags & ALUA_OPTIMIZE_STPG) &&
!pg->pref &&
(pg->tpgs & TPGS_MODE_IMPLICIT)) return SCSI_DH_OK; break; case SCSI_ACCESS_STATE_STANDBY: case SCSI_ACCESS_STATE_UNAVAILABLE: break; case SCSI_ACCESS_STATE_OFFLINE: return SCSI_DH_IO; case SCSI_ACCESS_STATE_TRANSITIONING: break; default:
sdev_printk(KERN_INFO, sdev, "%s: stpg failed, unhandled TPGS state %d",
ALUA_DH_NAME, pg->state); return SCSI_DH_NOSYS;
}
retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
/* * The caller must call scsi_device_put() on the returned pointer if it is not * NULL.
*/ staticstruct scsi_device * __must_check
alua_rtpg_select_sdev(struct alua_port_group *pg)
{ struct alua_dh_data *h; struct scsi_device *sdev = NULL, *prev_sdev;
lockdep_assert_held(&pg->lock); if (WARN_ON(!pg->rtpg_sdev)) return NULL;
/* * RCU protection isn't necessary for dh_list here * as we hold pg->lock, but for access to h->pg.
*/
rcu_read_lock();
list_for_each_entry_rcu(h, &pg->dh_list, node) { if (!h->sdev) continue; if (h->sdev == pg->rtpg_sdev) {
h->disabled = true; continue;
} if (rcu_dereference(h->pg) == pg &&
!h->disabled &&
!scsi_device_get(h->sdev)) {
sdev = h->sdev; break;
}
}
rcu_read_unlock();
if (!sdev) {
pr_warn("%s: no device found for rtpg\n",
(pg->device_id_len ?
(char *)pg->device_id_str : "(nameless PG)")); return NULL;
}
sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n");
spin_lock_irqsave(&pg->lock, flags);
sdev = pg->rtpg_sdev; if (!sdev) {
WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
spin_unlock_irqrestore(&pg->lock, flags);
kref_put(&pg->kref, release_port_group); return;
}
pg->flags |= ALUA_PG_RUNNING; if (pg->flags & ALUA_PG_RUN_RTPG) { int state = pg->state;
pg->flags &= ~ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags); if (state == SCSI_ACCESS_STATE_TRANSITIONING) { if (alua_tur(sdev) == SCSI_DH_RETRY) {
spin_lock_irqsave(&pg->lock, flags);
pg->flags &= ~ALUA_PG_RUNNING;
pg->flags |= ALUA_PG_RUN_RTPG; if (!pg->interval)
pg->interval = ALUA_RTPG_RETRY_DELAY;
spin_unlock_irqrestore(&pg->lock, flags);
queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg->interval * HZ); return;
} /* Send RTPG on failure or if TUR indicates SUCCESS */
}
err = alua_rtpg(sdev, pg);
spin_lock_irqsave(&pg->lock, flags);
/* If RTPG failed on the current device, try using another */ if (err == SCSI_DH_RES_TEMP_UNAVAIL &&
(prev_sdev = alua_rtpg_select_sdev(pg)))
err = SCSI_DH_IMM_RETRY;
list_splice_init(&pg->rtpg_list, &qdata_list); /* * We went through an RTPG, for good or bad. * Re-enable all devices for the next attempt.
*/
list_for_each_entry(h, &pg->dh_list, node)
h->disabled = false;
pg->rtpg_sdev = NULL;
spin_unlock_irqrestore(&pg->lock, flags);
queue_rtpg: if (prev_sdev)
scsi_device_put(prev_sdev);
queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ);
}
/** * alua_rtpg_queue() - cause RTPG to be submitted asynchronously * @pg: ALUA port group associated with @sdev. * @sdev: SCSI device for which to submit an RTPG. * @qdata: Information about the callback to invoke after the RTPG. * @force: Whether or not to submit an RTPG if a work item that will submit an * RTPG already has been scheduled. * * Returns true if and only if alua_rtpg_work() will be called asynchronously. * That function is responsible for calling @qdata->fn(). * * Context: may be called from atomic context (alua_check()) only if the caller * holds an sdev reference.
*/ staticbool alua_rtpg_queue(struct alua_port_group *pg, struct scsi_device *sdev, struct alua_queue_data *qdata, bool force)
{ int start_queue = 0; unsignedlong flags;
if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) returnfalse;
spin_lock_irqsave(&pg->lock, flags); if (qdata) {
list_add_tail(&qdata->entry, &pg->rtpg_list);
pg->flags |= ALUA_PG_RUN_STPG;
force = true;
} if (pg->rtpg_sdev == NULL) { struct alua_dh_data *h = sdev->handler_data;
rcu_read_lock(); if (h && rcu_dereference(h->pg) == pg) {
pg->interval = 0;
pg->flags |= ALUA_PG_RUN_RTPG;
kref_get(&pg->kref);
pg->rtpg_sdev = sdev;
start_queue = 1;
}
rcu_read_unlock();
} elseif (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
pg->flags |= ALUA_PG_RUN_RTPG; /* Do not queue if the worker is already running */ if (!(pg->flags & ALUA_PG_RUNNING)) {
kref_get(&pg->kref);
start_queue = 1;
}
}
spin_unlock_irqrestore(&pg->lock, flags);
if (start_queue) { if (queue_delayed_work(kaluad_wq, &pg->rtpg_work,
msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
sdev = NULL; else
kref_put(&pg->kref, release_port_group);
} if (sdev)
scsi_device_put(sdev);
returntrue;
}
/* * alua_initialize - Initialize ALUA state * @sdev: the device to be initialized * * For the prep_fn to work correctly we have * to initialize the ALUA state for the device.
*/ staticint alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
{ int err = SCSI_DH_DEV_UNSUPP, tpgs;
mutex_lock(&h->init_mutex);
h->disabled = false;
tpgs = alua_check_tpgs(sdev); if (tpgs != TPGS_MODE_NONE)
err = alua_check_vpd(sdev, h, tpgs);
h->init_error = err;
mutex_unlock(&h->init_mutex); return err;
} /* * alua_set_params - set/unset the optimize flag * @sdev: device on the path to be activated * params - parameters in the following format * "no_of_params\0param1\0param2\0param3\0...\0" * For example, to set the flag pass the following parameters * from multipath.conf * hardware_handler "2 alua 1"
*/ staticint alua_set_params(struct scsi_device *sdev, constchar *params)
{ struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg = NULL; unsignedint optimize = 0, argc; constchar *p = params; int result = SCSI_DH_OK; unsignedlong flags;
/* * alua_activate - activate a path * @sdev: device on the path to be activated * * We're currently switching the port group to be activated only and * let the array figure out the rest. * There may be other arrays which require us to switch all port groups * based on a certain policy. But until we actually encounter them it * should be okay.
*/ staticint alua_activate(struct scsi_device *sdev,
activate_complete fn, void *data)
{ struct alua_dh_data *h = sdev->handler_data; int err = SCSI_DH_OK; struct alua_queue_data *qdata; struct alua_port_group *pg;
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.17Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.