module_param_named(eh_deadline, shost_eh_deadline, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(eh_deadline, "SCSI EH timeout in seconds (should be between 0 and 2^31-1)");
/** * scsi_host_set_state - Take the given host through the host state model. * @shost: scsi host to change the state of. * @state: state to change to. * * Returns zero if unsuccessful or an error if the requested * transition is illegal.
**/ int scsi_host_set_state(struct Scsi_Host *shost, enum scsi_host_state state)
{ enum scsi_host_state oldstate = shost->shost_state;
if (state == oldstate) return 0;
switch (state) { case SHOST_CREATED: /* There are no legal states that come back to * created. This is the manually initialised start
* state */ goto illegal;
case SHOST_RUNNING: switch (oldstate) { case SHOST_CREATED: case SHOST_RECOVERY: break; default: goto illegal;
} break;
case SHOST_RECOVERY: switch (oldstate) { case SHOST_RUNNING: break; default: goto illegal;
} break;
case SHOST_CANCEL: switch (oldstate) { case SHOST_CREATED: case SHOST_RUNNING: case SHOST_CANCEL_RECOVERY: break; default: goto illegal;
} break;
case SHOST_DEL: switch (oldstate) { case SHOST_CANCEL: case SHOST_DEL_RECOVERY: break; default: goto illegal;
} break;
case SHOST_CANCEL_RECOVERY: switch (oldstate) { case SHOST_CANCEL: case SHOST_RECOVERY: break; default: goto illegal;
} break;
case SHOST_DEL_RECOVERY: switch (oldstate) { case SHOST_CANCEL_RECOVERY: break; default: goto illegal;
} break;
}
shost->shost_state = state; return 0;
/* * New SCSI devices cannot be attached anymore because of the SCSI host * state so drop the tag set refcnt. Wait until the tag set refcnt drops * to zero because .exit_cmd_priv implementations may need the host * pointer.
*/
kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
wait_for_completion(&shost->tagset_freed);
spin_lock_irqsave(shost->host_lock, flags); if (scsi_host_set_state(shost, SHOST_DEL))
BUG_ON(scsi_host_set_state(shost, SHOST_DEL_RECOVERY));
spin_unlock_irqrestore(shost->host_lock, flags);
/** * scsi_add_host_with_dma - add a scsi host with dma device * @shost: scsi host pointer to add * @dev: a struct device of type scsi class * @dma_dev: dma device for the host * * Note: You rarely need to worry about this unless you're in a * virtualised host environments, so use the simpler scsi_add_host() * function instead. * * Return value: * 0 on success / != 0 for error
**/ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, struct device *dma_dev)
{ conststruct scsi_host_template *sht = shost->hostt; int error = -EINVAL;
/* * Increase usage count temporarily here so that calling * scsi_autopm_put_host() will trigger runtime idle if there is * nothing else preventing suspending the device.
*/
pm_runtime_get_noresume(&shost->shost_gendev);
pm_runtime_set_active(&shost->shost_gendev);
pm_runtime_enable(&shost->shost_gendev);
device_enable_async_suspend(&shost->shost_gendev);
error = device_add(&shost->shost_gendev); if (error) goto out_disable_runtime_pm;
/* * Any host allocation in this function will be freed in * scsi_host_dev_release().
*/
out_del_dev:
device_del(&shost->shost_dev);
out_del_gendev: /* * Host state is SHOST_RUNNING so we have to explicitly release * ->shost_dev.
*/
put_device(&shost->shost_dev);
device_del(&shost->shost_gendev);
out_disable_runtime_pm:
device_disable_async_suspend(&shost->shost_gendev);
pm_runtime_disable(&shost->shost_gendev);
pm_runtime_set_suspended(&shost->shost_gendev);
pm_runtime_put_noidle(&shost->shost_gendev);
kref_put(&shost->tagset_refcnt, scsi_mq_free_tags);
fail: return error;
}
EXPORT_SYMBOL(scsi_add_host_with_dma);
/* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
rcu_barrier();
if (shost->tmf_work_q)
destroy_workqueue(shost->tmf_work_q); if (shost->ehandler)
kthread_stop(shost->ehandler); if (shost->work_q)
destroy_workqueue(shost->work_q);
if (shost->shost_state == SHOST_CREATED) { /* * Free the shost_dev device name and remove the proc host dir * here if scsi_host_{alloc,put}() have been called but neither * scsi_host_add() nor scsi_remove_host() has been called. * This avoids that the memory allocated for the shost_dev * name as well as the proc dir structure are leaked.
*/
scsi_proc_hostdir_rm(shost->hostt);
kfree(dev_name(&shost->shost_dev));
}
kfree(shost->shost_data);
ida_free(&host_index_ida, shost->host_no);
if (shost->shost_state != SHOST_CREATED)
put_device(parent);
kfree(shost);
}
/** * scsi_host_alloc - register a scsi host adapter instance. * @sht: pointer to scsi host template * @privsize: extra bytes to allocate for driver * * Note: * Allocate a new Scsi_Host and perform basic initialization. * The host is not published to the scsi midlayer until scsi_add_host * is called. * * Return value: * Pointer to a new Scsi_Host
**/ struct Scsi_Host *scsi_host_alloc(conststruct scsi_host_template *sht, int privsize)
{ struct Scsi_Host *shost; int index;
index = ida_alloc(&host_index_ida, GFP_KERNEL); if (index < 0) {
kfree(shost); return NULL;
}
shost->host_no = index;
shost->dma_channel = 0xff;
/* These three are default values which can be overridden */
shost->max_channel = 0;
shost->max_id = 8;
shost->max_lun = 8;
/* Give each shost a default transportt */
shost->transportt = &blank_transport_template;
/* * All drivers right now should be able to handle 12 byte * commands. Every so often there are requests for 16 byte * commands, but individual low-level drivers need to certify that * they actually do something sensible with such commands.
*/
shost->max_cmd_len = 12;
shost->hostt = sht;
shost->this_id = sht->this_id;
shost->can_queue = sht->can_queue;
shost->sg_tablesize = sht->sg_tablesize;
shost->sg_prot_tablesize = sht->sg_prot_tablesize;
shost->cmd_per_lun = sht->cmd_per_lun;
shost->no_write_same = sht->no_write_same;
shost->host_tagset = sht->host_tagset;
shost->queuecommand_may_block = sht->queuecommand_may_block;
if (sht->supported_mode == MODE_UNKNOWN) /* means we didn't set it ... default to INITIATOR */
shost->active_mode = MODE_INITIATOR; else
shost->active_mode = sht->supported_mode;
if (sht->max_host_blocked)
shost->max_host_blocked = sht->max_host_blocked; else
shost->max_host_blocked = SCSI_DEFAULT_HOST_BLOCKED;
/* * If the driver imposes no hard sector transfer limit, start at * machine infinity initially.
*/ if (sht->max_sectors)
shost->max_sectors = sht->max_sectors; else
shost->max_sectors = SCSI_DEFAULT_MAX_SECTORS;
shost->tmf_work_q = alloc_workqueue("scsi_tmf_%d",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
1, shost->host_no); if (!shost->tmf_work_q) {
shost_printk(KERN_WARNING, shost, "failed to create tmf workq\n"); goto fail;
} if (scsi_proc_hostdir_add(shost->hostt) < 0) goto fail; return shost;
fail: /* * Host state is still SHOST_CREATED and that is enough to release * ->shost_gendev. scsi_host_dev_release() will free * dev_name(&shost->shost_dev).
*/
put_device(&shost->shost_gendev);
p = class_to_shost(dev); return p->host_no == *hostnum;
}
/** * scsi_host_lookup - get a reference to a Scsi_Host by host no * @hostnum: host number to locate * * Return value: * A pointer to located Scsi_Host or NULL. * * The caller must do a scsi_host_put() to drop the reference * that scsi_host_get() took. The put_device() below dropped * the reference from class_find_device().
**/ struct Scsi_Host *scsi_host_lookup(unsignedint hostnum)
{ struct device *cdev; struct Scsi_Host *shost = NULL;
if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
(*count)++;
returntrue;
}
/** * scsi_host_busy - Return the host busy counter * @shost: Pointer to Scsi_Host to inc.
**/ int scsi_host_busy(struct Scsi_Host *shost)
{ int cnt = 0;
/** * scsi_queue_work - Queue work to the Scsi_Host workqueue. * @shost: Pointer to Scsi_Host. * @work: Work to queue for execution. * * Return value: * 1 - work queued for execution * 0 - work is already queued * -EINVAL - work queue doesn't exist
**/ int scsi_queue_work(struct Scsi_Host *shost, struct work_struct *work)
{ if (unlikely(!shost->work_q)) {
shost_printk(KERN_ERR, shost, "ERROR: Scsi host '%s' attempted to queue scsi-work, " "when no workqueue created.\n", shost->hostt->name);
dump_stack();
/** * scsi_host_complete_all_commands - Terminate all running commands * @shost: Scsi Host on which commands should be terminated * @status: Status to be set for the terminated commands * * There is no protection against modification of the number * of outstanding commands. It is the responsibility of the * caller to ensure that concurrent I/O submission and/or * completion is stopped when calling this function.
*/ void scsi_host_complete_all_commands(struct Scsi_Host *shost, enum scsi_host_status status)
{
blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter,
&status);
}
EXPORT_SYMBOL_GPL(scsi_host_complete_all_commands);
/** * scsi_host_busy_iter - Iterate over all busy commands * @shost: Pointer to Scsi_Host. * @fn: Function to call on each busy command * @priv: Data pointer passed to @fn * * If locking against concurrent command completions is required * ithas to be provided by the caller
**/ void scsi_host_busy_iter(struct Scsi_Host *shost, bool (*fn)(struct scsi_cmnd *, void *), void *priv)
{ struct scsi_host_busy_iter_data iter_data = {
.fn = fn,
.priv = priv,
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.