/* Time after which channel-path status may be outdated. */ staticunsignedlong chp_info_expires;
staticstruct work_struct cfg_work;
/* Wait queue for configure completion events. */ static DECLARE_WAIT_QUEUE_HEAD(cfg_wait_queue);
/* Set vary state for given chpid. */ staticvoid set_chp_logically_online(struct chp_id chpid, int onoff)
{
chpid_to_chp(chpid)->state = onoff;
}
/* On success return 0 if channel-path is varied offline, 1 if it is varied
* online. Return -ENODEV if channel-path is not registered. */ int chp_get_status(struct chp_id chpid)
{ return (chpid_to_chp(chpid) ? chpid_to_chp(chpid)->state : -ENODEV);
}
/** * chp_get_sch_opm - return opm for subchannel * @sch: subchannel * * Calculate and return the operational path mask (opm) based on the chpids * used by the subchannel and the status of the associated channel-paths.
*/
u8 chp_get_sch_opm(struct subchannel *sch)
{ struct chp_id chpid; int opm; int i;
opm = 0;
chp_id_init(&chpid); for (i = 0; i < 8; i++) {
opm <<= 1;
chpid.id = sch->schib.pmcw.chpid[i]; if (chp_get_status(chpid) != 0)
opm |= 1;
} return opm;
}
EXPORT_SYMBOL_GPL(chp_get_sch_opm);
/** * chp_is_registered - check if a channel-path is registered * @chpid: channel-path ID * * Return non-zero if a channel-path with the given chpid is registered, * zero otherwise.
*/ int chp_is_registered(struct chp_id chpid)
{ return chpid_to_chp(chpid) != NULL;
}
/* * Function: s390_vary_chpid * Varies the specified chpid online or offline
*/ staticint s390_vary_chpid(struct chp_id chpid, int on)
{ char dbf_text[15]; int status;
/** * chp_update_desc - update channel-path description * @chp: channel-path * * Update the channel-path description of the specified channel-path * including channel measurement related information. * Return zero on success, non-zero otherwise.
*/ int chp_update_desc(struct channel_path *chp)
{ int rc;
rc = chsc_determine_fmt0_channel_path_desc(chp->chpid, &chp->desc); if (rc) return rc;
/* * Fetching the following data is optional. Not all machines or * hypervisors implement the required chsc commands.
*/
chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
chsc_determine_fmt3_channel_path_desc(chp->chpid, &chp->desc_fmt3);
chsc_get_channel_measurement_chars(chp);
return 0;
}
/** * chp_new - register a new channel-path * @chpid: channel-path ID * * Create and register data structure representing new channel-path. Return * zero on success, non-zero otherwise.
*/ int chp_new(struct chp_id chpid)
{ struct channel_subsystem *css = css_by_id(chpid.cssid); struct channel_path *chp; int ret = 0;
mutex_lock(&css->mutex); if (chp_is_registered(chpid)) goto out;
chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL); if (!chp) {
ret = -ENOMEM; goto out;
} /* fill in status, etc. */
chp->chpid = chpid;
chp->state = 1;
chp->dev.parent = &css->device;
chp->dev.groups = chp_attr_groups;
chp->dev.release = chp_release;
mutex_init(&chp->lock);
/* Obtain channel path description and fill it in. */
ret = chp_update_desc(chp); if (ret) goto out_free; if ((chp->desc.flags & 0x80) == 0) {
ret = -ENODEV; goto out_free;
}
dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
/* make it known to the system */
ret = device_register(&chp->dev); if (ret) {
CIO_MSG_EVENT(0, "Could not register chp%x.%02x: %d\n",
chpid.cssid, chpid.id, ret);
put_device(&chp->dev); goto out;
}
if (css->cm_enabled) {
ret = chp_add_cmg_attr(chp); if (ret) {
device_unregister(&chp->dev); goto out;
}
}
css->chps[chpid.id] = chp; goto out;
out_free:
kfree(chp);
out:
mutex_unlock(&css->mutex); return ret;
}
/** * chp_get_chp_desc - return newly allocated channel-path description * @chpid: channel-path ID * * On success return a newly allocated copy of the channel-path description * data associated with the given channel-path ID. Return %NULL on error.
*/ struct channel_path_desc_fmt0 *chp_get_chp_desc(struct chp_id chpid)
{ struct channel_path *chp; struct channel_path_desc_fmt0 *desc;
chp = chpid_to_chp(chpid); if (!chp) return NULL;
desc = kmalloc(sizeof(*desc), GFP_KERNEL); if (!desc) return NULL;
/** * chp_process_crw - process channel-path status change * @crw0: channel report-word to handler * @crw1: second channel-report word (always NULL) * @overflow: crw overflow indication * * Handle channel-report-words indicating that the status of a channel-path * has changed.
*/ staticvoid chp_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{ struct chp_id chpid;
if (overflow) {
css_schedule_eval_all(); return;
}
CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, " "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
crw0->erc, crw0->rsid); /* * Check for solicited machine checks. These are * created by reset channel path and need not be * handled here.
*/ if (crw0->slct) {
CIO_CRW_EVENT(2, "solicited machine check for " "channel path %02X\n", crw0->rsid); return;
}
chp_id_init(&chpid);
chpid.id = crw0->rsid; switch (crw0->erc) { case CRW_ERC_IPARM: /* Path has come. */ case CRW_ERC_INIT:
chp_new(chpid);
chsc_chp_online(chpid); break; case CRW_ERC_PERRI: /* Path has gone. */ case CRW_ERC_PERRN:
chsc_chp_offline(chpid); break; default:
CIO_CRW_EVENT(2, "Don't know how to handle erc=%x\n",
crw0->erc);
}
}
int chp_ssd_get_mask(struct chsc_ssd_info *ssd, struct chp_link *link)
{ int i; int mask;
for (i = 0; i < 8; i++) {
mask = 0x80 >> i; if (!(ssd->path_mask & mask)) continue; if (!chp_id_is_equal(&ssd->chpid[i], &link->chpid)) continue; if ((ssd->fla_valid_mask & mask) &&
((ssd->fla[i] & link->fla_mask) != link->fla)) continue; return mask;
} return 0;
}
EXPORT_SYMBOL_GPL(chp_ssd_get_mask);
/* Force chp_info refresh on next call to info_validate(). */ staticvoid info_expire(void)
{
mutex_lock(&info_lock);
chp_info_expires = jiffies - 1;
mutex_unlock(&info_lock);
}
/* Ensure that chp_info is up-to-date. */ staticint info_update(void)
{ int rc;
mutex_lock(&info_lock);
rc = 0; if (time_after(jiffies, chp_info_expires)) { /* Data is too old, update. */
rc = sclp_chp_read_info(&chp_info); if (!rc)
chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL;
}
mutex_unlock(&info_lock);
return rc;
}
/** * chp_info_get_status - retrieve configure status of a channel-path * @chpid: channel-path ID * * On success, return 0 for standby, 1 for configured, 2 for reserved, * 3 for not recognized. Return negative error code on error.
*/ int chp_info_get_status(struct chp_id chpid)
{ int rc; int bit;
/* Set configure task for chpid. */ staticvoid cfg_set_task(struct chp_id chpid, enum cfg_task_t cfg)
{
chp_cfg_task[chpid.cssid][chpid.id] = cfg;
}
/* Fetch the first configure task. Set chpid accordingly. */ staticenum cfg_task_t chp_cfg_fetch_task(struct chp_id *chpid)
{ enum cfg_task_t t = cfg_none;
chp_id_for_each(chpid) {
t = cfg_get_task(*chpid); if (t != cfg_none) break;
}
return t;
}
/* Perform one configure/deconfigure request. Reschedule work function until
* last request. */ staticvoid cfg_func(struct work_struct *work)
{ struct chp_id chpid; enum cfg_task_t t; int rc;
spin_lock(&cfg_lock);
t = chp_cfg_fetch_task(&chpid);
spin_unlock(&cfg_lock);
switch (t) { case cfg_configure:
rc = sclp_chp_configure(chpid); if (rc)
CIO_MSG_EVENT(2, "chp: sclp_chp_configure(%x.%02x)=" "%d\n", chpid.cssid, chpid.id, rc); else {
info_expire();
chsc_chp_online(chpid);
} break; case cfg_deconfigure:
rc = sclp_chp_deconfigure(chpid); if (rc)
CIO_MSG_EVENT(2, "chp: sclp_chp_deconfigure(%x.%02x)=" "%d\n", chpid.cssid, chpid.id, rc); else {
info_expire();
chsc_chp_offline(chpid);
} break; case cfg_none: /* Get updated information after last change. */
info_update();
wake_up_interruptible(&cfg_wait_queue); return;
}
spin_lock(&cfg_lock); if (t == cfg_get_task(chpid))
cfg_set_task(chpid, cfg_none);
spin_unlock(&cfg_lock);
schedule_work(&cfg_work);
}
/** * chp_cfg_schedule - schedule chpid configuration request * @chpid: channel-path ID * @configure: Non-zero for configure, zero for deconfigure * * Schedule a channel-path configuration/deconfiguration request.
*/ void chp_cfg_schedule(struct chp_id chpid, int configure)
{
CIO_MSG_EVENT(2, "chp_cfg_sched%x.%02x=%d\n", chpid.cssid, chpid.id,
configure);
spin_lock(&cfg_lock);
cfg_set_task(chpid, configure ? cfg_configure : cfg_deconfigure);
spin_unlock(&cfg_lock);
schedule_work(&cfg_work);
}
/** * chp_cfg_cancel_deconfigure - cancel chpid deconfiguration request * @chpid: channel-path ID * * Cancel an active channel-path deconfiguration request if it has not yet * been performed.
*/ void chp_cfg_cancel_deconfigure(struct chp_id chpid)
{
CIO_MSG_EVENT(2, "chp_cfg_cancel:%x.%02x\n", chpid.cssid, chpid.id);
spin_lock(&cfg_lock); if (cfg_get_task(chpid) == cfg_deconfigure)
cfg_set_task(chpid, cfg_none);
spin_unlock(&cfg_lock);
}
staticint __init chp_init(void)
{ struct chp_id chpid; int state, ret;
ret = crw_register_handler(CRW_RSC_CPATH, chp_process_crw); if (ret) return ret;
INIT_WORK(&cfg_work, cfg_func); if (info_update()) return 0; /* Register available channel-paths. */
chp_id_for_each(&chpid) {
state = chp_info_get_status(chpid); if (state == CHP_STATUS_CONFIGURED ||
state == CHP_STATUS_STANDBY)
chp_new(chpid);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.