/** * srp_tmo_valid() - check timeout combination validity * @reconnect_delay: Reconnect delay in seconds. * @fast_io_fail_tmo: Fast I/O fail timeout in seconds. * @dev_loss_tmo: Device loss timeout in seconds. * * The combination of the timeout parameters must be such that SCSI commands * are finished in a reasonable time. Hence do not allow the fast I/O fail * timeout to exceed SCSI_DEVICE_BLOCK_MAX_TIMEOUT nor allow dev_loss_tmo to * exceed that limit if failing I/O fast has been disabled. Furthermore, these * parameters must be such that multipath can detect failed paths timely. * Hence do not allow all three parameters to be disabled simultaneously.
*/ int srp_tmo_valid(int reconnect_delay, int fast_io_fail_tmo, long dev_loss_tmo)
{ if (reconnect_delay < 0 && fast_io_fail_tmo < 0 && dev_loss_tmo < 0) return -EINVAL; if (reconnect_delay == 0) return -EINVAL; if (fast_io_fail_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) return -EINVAL; if (fast_io_fail_tmo < 0 &&
dev_loss_tmo > SCSI_DEVICE_BLOCK_MAX_TIMEOUT) return -EINVAL; if (dev_loss_tmo >= LONG_MAX / HZ) return -EINVAL; if (fast_io_fail_tmo >= 0 && dev_loss_tmo >= 0 &&
fast_io_fail_tmo >= dev_loss_tmo) return -EINVAL; return 0;
}
EXPORT_SYMBOL_GPL(srp_tmo_valid);
for (i = 0; i < ARRAY_SIZE(srp_rport_role_names); i++) if (srp_rport_role_names[i].value == rport->roles) {
name = srp_rport_role_names[i].name; break;
} return sprintf(buf, "%s\n", name ? : "unknown");
}
res = srp_parse_tmo(&fast_io_fail_tmo, buf); if (res) goto out;
res = srp_tmo_valid(rport->reconnect_delay, fast_io_fail_tmo,
rport->dev_loss_tmo); if (res) goto out;
rport->fast_io_fail_tmo = fast_io_fail_tmo;
res = count;
res = srp_parse_tmo(&dev_loss_tmo, buf); if (res) goto out;
res = srp_tmo_valid(rport->reconnect_delay, rport->fast_io_fail_tmo,
dev_loss_tmo); if (res) goto out;
rport->dev_loss_tmo = dev_loss_tmo;
res = count;
switch (new_state) { case SRP_RPORT_RUNNING: switch (old_state) { case SRP_RPORT_LOST: goto invalid; default: break;
} break; case SRP_RPORT_BLOCKED: switch (old_state) { case SRP_RPORT_RUNNING: break; default: goto invalid;
} break; case SRP_RPORT_FAIL_FAST: switch (old_state) { case SRP_RPORT_LOST: goto invalid; default: break;
} break; case SRP_RPORT_LOST: break;
}
rport->state = new_state; return 0;
invalid: return -EINVAL;
}
/** * srp_reconnect_work() - reconnect and schedule a new attempt if necessary * @work: Work structure used for scheduling this operation.
*/ staticvoid srp_reconnect_work(struct work_struct *work)
{ struct srp_rport *rport = container_of(to_delayed_work(work), struct srp_rport, reconnect_work); struct Scsi_Host *shost = rport_to_shost(rport); int delay, res;
/* * scsi_block_targets() must have been called before this function is * called to guarantee that no .queuecommand() calls are in progress.
*/ staticvoid __rport_fail_io_fast(struct srp_rport *rport)
{ struct Scsi_Host *shost = rport_to_shost(rport); struct srp_internal *i;
lockdep_assert_held(&rport->mutex);
if (srp_rport_set_state(rport, SRP_RPORT_FAIL_FAST)) return;
/* Involve the LLD if possible to terminate all I/O on the rport. */
i = to_srp_internal(shost->transportt); if (i->f->terminate_rport_io)
i->f->terminate_rport_io(rport);
}
/** * rport_fast_io_fail_timedout() - fast I/O failure timeout handler * @work: Work structure used for scheduling this operation.
*/ staticvoid rport_fast_io_fail_timedout(struct work_struct *work)
{ struct srp_rport *rport = container_of(to_delayed_work(work), struct srp_rport, fast_io_fail_work); struct Scsi_Host *shost = rport_to_shost(rport);
pr_info("fast_io_fail_tmo expired for SRP %s / %s.\n",
dev_name(&rport->dev), dev_name(&shost->shost_gendev));
mutex_lock(&rport->mutex); if (rport->state == SRP_RPORT_BLOCKED)
__rport_fail_io_fast(rport);
mutex_unlock(&rport->mutex);
}
/** * rport_dev_loss_timedout() - device loss timeout handler * @work: Work structure used for scheduling this operation.
*/ staticvoid rport_dev_loss_timedout(struct work_struct *work)
{ struct srp_rport *rport = container_of(to_delayed_work(work), struct srp_rport, dev_loss_work); struct Scsi_Host *shost = rport_to_shost(rport); struct srp_internal *i = to_srp_internal(shost->transportt);
pr_info("dev_loss_tmo expired for SRP %s / %s.\n",
dev_name(&rport->dev), dev_name(&shost->shost_gendev));
/** * srp_start_tl_fail_timers() - start the transport layer failure timers * @rport: SRP target port. * * Start the transport layer fast I/O failure and device loss timers. Do not * modify a timer that was already started.
*/ void srp_start_tl_fail_timers(struct srp_rport *rport)
{
mutex_lock(&rport->mutex);
__srp_start_tl_fail_timers(rport);
mutex_unlock(&rport->mutex);
}
EXPORT_SYMBOL(srp_start_tl_fail_timers);
/** * srp_reconnect_rport() - reconnect to an SRP target port * @rport: SRP target port. * * Blocks SCSI command queueing before invoking reconnect() such that * queuecommand() won't be invoked concurrently with reconnect() from outside * the SCSI EH. This is important since a reconnect() implementation may * reallocate resources needed by queuecommand(). * * Notes: * - This function neither waits until outstanding requests have finished nor * tries to abort these. It is the responsibility of the reconnect() * function to finish outstanding commands before reconnecting to the target * port. * - It is the responsibility of the caller to ensure that the resources * reallocated by the reconnect() function won't be used while this function * is in progress. One possible strategy is to invoke this function from * the context of the SCSI EH thread only. Another possible strategy is to * lock the rport mutex inside each SCSI LLD callback that can be invoked by * the SCSI EH (the scsi_host_template.eh_*() functions and also the * scsi_host_template.queuecommand() function).
*/ int srp_reconnect_rport(struct srp_rport *rport)
{ struct Scsi_Host *shost = rport_to_shost(rport); struct srp_internal *i = to_srp_internal(shost->transportt); struct scsi_device *sdev; int res;
res = mutex_lock_interruptible(&rport->mutex); if (res) goto out; if (rport->state != SRP_RPORT_FAIL_FAST && rport->state != SRP_RPORT_LOST) /* * sdev state must be SDEV_TRANSPORT_OFFLINE, transition * to SDEV_BLOCK is illegal. Calling scsi_target_unblock() * later is ok though, scsi_internal_device_unblock_nowait() * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
*/
scsi_block_targets(shost, &shost->shost_gendev);
res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
pr_debug("%s (state %d): transport.reconnect() returned %d\n",
dev_name(&shost->shost_gendev), rport->state, res); if (res == 0) {
cancel_delayed_work(&rport->fast_io_fail_work);
cancel_delayed_work(&rport->dev_loss_work);
rport->failed_reconnects = 0;
srp_rport_set_state(rport, SRP_RPORT_RUNNING);
scsi_target_unblock(&shost->shost_gendev, SDEV_RUNNING); /* * If the SCSI error handler has offlined one or more devices, * invoking scsi_target_unblock() won't change the state of * these devices into running so do that explicitly.
*/
shost_for_each_device(sdev, shost) {
mutex_lock(&sdev->state_mutex); if (sdev->sdev_state == SDEV_OFFLINE)
sdev->sdev_state = SDEV_RUNNING;
mutex_unlock(&sdev->state_mutex);
}
} elseif (rport->state == SRP_RPORT_RUNNING) { /* * srp_reconnect_rport() has been invoked with fast_io_fail * and dev_loss off. Mark the port as failed and start the TL * failure timers if these had not yet been started.
*/
__rport_fail_io_fast(rport);
__srp_start_tl_fail_timers(rport);
} elseif (rport->state != SRP_RPORT_BLOCKED) {
scsi_target_unblock(&shost->shost_gendev,
SDEV_TRANSPORT_OFFLINE);
}
mutex_unlock(&rport->mutex);
/** * srp_timed_out() - SRP transport intercept of the SCSI timeout EH * @scmd: SCSI command. * * If a timeout occurs while an rport is in the blocked state, ask the SCSI * EH to continue waiting (SCSI_EH_RESET_TIMER). Otherwise let the SCSI core * handle the timeout (SCSI_EH_NOT_HANDLED). * * Note: This function is called from soft-IRQ context and with the request * queue lock held.
*/ enum scsi_timeout_action srp_timed_out(struct scsi_cmnd *scmd)
{ struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; struct srp_internal *i = to_srp_internal(shost->transportt); struct srp_rport *rport = shost_to_rport(shost);
/** * srp_rport_add - add a SRP remote port to the device hierarchy * @shost: scsi host the remote port is connected to. * @ids: The port id for the remote port. * * Publishes a port to the rest of the system.
*/ struct srp_rport *srp_rport_add(struct Scsi_Host *shost, struct srp_rport_identifiers *ids)
{ struct srp_rport *rport; struct device *parent = &shost->shost_gendev; struct srp_internal *i = to_srp_internal(shost->transportt); int id, ret;
rport = kzalloc(sizeof(*rport), GFP_KERNEL); if (!rport) return ERR_PTR(-ENOMEM);
/** * srp_remove_host - tear down a Scsi_Host's SRP data structures * @shost: Scsi Host that is torn down * * Removes all SRP remote ports for a given Scsi_Host. * Must be called just before scsi_remove_host for SRP HBAs.
*/ void srp_remove_host(struct Scsi_Host *shost)
{
device_for_each_child(&shost->shost_gendev, NULL, do_srp_rport_del);
}
EXPORT_SYMBOL_GPL(srp_remove_host);
/** * srp_stop_rport_timers - stop the transport layer recovery timers * @rport: SRP remote port for which to stop the timers. * * Must be called after srp_remove_host() and scsi_remove_host(). The caller * must hold a reference on the rport (rport->dev) and on the SCSI host * (rport->dev.parent).
*/ void srp_stop_rport_timers(struct srp_rport *rport)
{
mutex_lock(&rport->mutex); if (rport->state == SRP_RPORT_BLOCKED)
__rport_fail_io_fast(rport);
srp_rport_set_state(rport, SRP_RPORT_LOST);
mutex_unlock(&rport->mutex);
static __init int srp_transport_init(void)
{ int ret;
ret = transport_class_register(&srp_host_class); if (ret) return ret;
ret = transport_class_register(&srp_rport_class); if (ret) goto unregister_host_class;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.