#define SEI_VF_FLA 0xc0 /* VF flag for Full Link Address */ #define SEI_RS_CHPID 0x4 /* 4 in RS field indicates CHPID */
static BLOCKING_NOTIFIER_HEAD(chsc_notifiers);
int chsc_notifier_register(struct notifier_block *nb)
{ return blocking_notifier_chain_register(&chsc_notifiers, nb);
}
EXPORT_SYMBOL(chsc_notifier_register);
int chsc_notifier_unregister(struct notifier_block *nb)
{ return blocking_notifier_chain_unregister(&chsc_notifiers, nb);
}
EXPORT_SYMBOL(chsc_notifier_unregister);
/** * chsc_error_from_response() - convert a chsc response to an error * @response: chsc response code * * Returns an appropriate Linux error code for @response.
*/ int chsc_error_from_response(int response)
{ switch (response) { case 0x0001: return 0; case 0x0002: case 0x0003: case 0x0006: case 0x0007: case 0x0008: case 0x000a: case 0x0104: return -EINVAL; case 0x0004: case 0x0106: /* "Wrong Channel Parm" for the op 0x003d */ return -EOPNOTSUPP; case 0x000b: case 0x0107: /* "Channel busy" for the op 0x003d */ return -EBUSY; case 0x0100: case 0x0102: return -ENOMEM; case 0x0108: /* "HW limit exceeded" for the op 0x003d */ return -EUSERS; default: return -EIO;
}
}
EXPORT_SYMBOL_GPL(chsc_error_from_response);
int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
{ struct chsc_ssd_area *ssd_area; unsignedlong flags; int ccode; int ret; int i; int mask;
ccode = chsc(ssd_area); /* Check response. */ if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY; goto out;
}
ret = chsc_error_from_response(ssd_area->response.code); if (ret != 0) {
CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
schid.ssid, schid.sch_no,
ssd_area->response.code); goto out;
} if (!ssd_area->sch_valid) {
ret = -ENODEV; goto out;
} /* Copy data */
ret = 0;
memset(ssd, 0, sizeof(struct chsc_ssd_info)); if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
(ssd_area->st != SUBCHANNEL_TYPE_MSG)) goto out;
ssd->path_mask = ssd_area->path_mask;
ssd->fla_valid_mask = ssd_area->fla_valid_mask; for (i = 0; i < 8; i++) {
mask = 0x80 >> i; if (ssd_area->path_mask & mask) {
chp_id_init(&ssd->chpid[i]);
ssd->chpid[i].id = ssd_area->chpid[i];
} if (ssd_area->fla_valid_mask & mask)
ssd->fla[i] = ssd_area->fla[i];
}
out:
spin_unlock_irqrestore(&chsc_page_lock, flags); return ret;
}
/** * chsc_ssqd() - store subchannel QDIO data (SSQD) * @schid: id of the subchannel on which SSQD is performed * @ssqd: request and response block for SSQD * * Returns 0 on success.
*/ int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd)
{
memset(ssqd, 0, sizeof(*ssqd));
ssqd->request.length = 0x0010;
ssqd->request.code = 0x0024;
ssqd->first_sch = schid.sch_no;
ssqd->last_sch = schid.sch_no;
ssqd->ssid = schid.ssid;
sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
link->chpid.id);
CIO_TRACE_EVENT( 2, dbf_txt); if (link->fla != 0) {
sprintf(dbf_txt, "fla%x", link->fla);
CIO_TRACE_EVENT( 2, dbf_txt);
} /* Wait until previous actions have settled. */
css_wait_for_slow_path(); /* * I/O resources may have become accessible. * Scan through all subchannels that may be concerned and * do a validation on those. * The more information we have (info), the less scanning * will we have to do.
*/
for_each_subchannel_staged(__s390_process_res_acc, NULL, link);
css_schedule_reprobe();
}
CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x, iq=%02x)\n",
sei_area->rs, sei_area->rsid, sei_area->ccdf[0]);
/* Ignore NULL Link Incident Records. */ if (lir->iq.null) return;
/* Inform user that a link requires maintenance actions because it has * become degraded or not operational. Note that this log message is
* the primary intention behind a Link Incident Record. */
staticvoid chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
{ switch (sei_area->cc) { case 1:
zpci_event_error(sei_area->ccdf); break; case 2:
zpci_event_availability(sei_area->ccdf); break; default:
CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
sei_area->cc); break;
}
}
staticvoid chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
{ /* which kind of information was stored? */ switch (sei_area->cc) { case 1: /* link incident*/
chsc_process_sei_link_incident(sei_area); break; case 2: /* i/o resource accessibility */
chsc_process_sei_res_acc(sei_area); break; case 3: /* ap config changed */
chsc_process_sei_ap_cfg_chg(sei_area); break; case 7: /* channel-path-availability information */
chsc_process_sei_chp_avail(sei_area); break; case 8: /* channel-path-configuration notification */
chsc_process_sei_chp_config(sei_area); break; case 12: /* scm change notification */
chsc_process_sei_scm_change(sei_area); break; case 14: /* scm available notification */
chsc_process_sei_scm_avail(sei_area); break; case 15: /* FCES event notification */
chsc_process_sei_fces_event(sei_area); break; default: /* other stuff */
CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
sei_area->cc); break;
}
/* Check if we might have lost some information. */ if (sei_area->flags & 0x40) {
CIO_CRW_EVENT(2, "chsc: event overflow\n");
css_schedule_eval_all();
}
}
while (true) {
memset(sei, 0, sizeof(*sei));
sei->request.length = 0x0010;
sei->request.code = 0x000e; if (!ntsm_unsupported)
sei->ntsm = ntsm;
if (chsc(sei)) break;
if (sei->response.code != 0x0001) {
CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
sei->response.code, sei->ntsm);
if (sei->response.code == 3 && sei->ntsm) { /* Fallback for old firmware. */
ntsm_unsupported = 1; continue;
} break;
}
CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt); switch (sei->nt) { case 0:
chsc_process_sei_nt0(&sei->u.nt0_area); break; case 2:
chsc_process_sei_nt2(&sei->u.nt2_area); break; default:
CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt); break;
}
if (!(sei->u.nt0_area.flags & 0x80)) break;
}
}
/* * Handle channel subsystem related CRWs. * Use store event information to find out what's going on. * * Note: Access to sei_page is serialized through machine check handler * thread, so no need for locking.
*/ staticvoid chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
{ struct chsc_sei *sei = sei_page;
/** * chsc_chp_vary - propagate channel-path vary operation to subchannels * @chpid: channl-path ID * @on: non-zero for vary online, zero for vary offline
*/ int chsc_chp_vary(struct chp_id chpid, int on)
{ struct channel_path *chp = chpid_to_chp(chpid);
/* * Redo PathVerification on the devices the chpid connects to
*/ if (on) { /* Try to update the channel path description. */
chp_update_desc(chp);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
NULL, &chpid);
css_schedule_reprobe();
} else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
NULL, &chpid);
return 0;
}
staticvoid
chsc_remove_cmg_attr(struct channel_subsystem *css)
{ int i;
for (i = 0; i <= __MAX_CHPID; i++) { if (!css->chps[i]) continue;
chp_remove_cmg_attr(css->chps[i]);
}
}
staticint
chsc_add_cmg_attr(struct channel_subsystem *css)
{ int i, ret;
ret = 0; for (i = 0; i <= __MAX_CHPID; i++) { if (!css->chps[i]) continue;
ret = chp_add_cmg_attr(css->chps[i]); if (ret) goto cleanup;
} return ret;
cleanup: while (i--) { if (!css->chps[i]) continue;
chp_remove_cmg_attr(css->chps[i]);
} return ret;
}
for (i = 0; i < CSS_NUM_CUB_PAGES; i++)
secm_area->cub[i] = (__force dma64_t)virt_to_dma32(css->cub[i]); for (i = 0; i < CSS_NUM_ECUB_PAGES; i++)
secm_area->ecub[i] = virt_to_dma64(css->ecub[i]);
secm_area->operation_code = enable ? 0 : 1;
ccode = chsc(secm_area); if (ccode > 0) {
ret = (ccode == 3) ? -ENODEV : -EBUSY; goto out;
}
switch (secm_area->response.code) { case 0x0102: case 0x0103:
ret = -EINVAL; break; default:
ret = chsc_error_from_response(secm_area->response.code);
} if (ret != 0)
CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
secm_area->response.code);
out:
spin_unlock_irqrestore(&chsc_page_lock, flags); return ret;
}
staticint cub_alloc(struct channel_subsystem *css)
{ int i;
for (i = 0; i < CSS_NUM_CUB_PAGES; i++) {
css->cub[i] = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); if (!css->cub[i]) return -ENOMEM;
} for (i = 0; i < CSS_NUM_ECUB_PAGES; i++) {
css->ecub[i] = (void *)get_zeroed_page(GFP_KERNEL); if (!css->ecub[i]) return -ENOMEM;
}
return 0;
}
staticvoid cub_free(struct channel_subsystem *css)
{ int i;
for (i = 0; i < CSS_NUM_CUB_PAGES; i++) {
free_page((unsignedlong)css->cub[i]);
css->cub[i] = NULL;
} for (i = 0; i < CSS_NUM_ECUB_PAGES; i++) {
free_page((unsignedlong)css->ecub[i]);
css->ecub[i] = NULL;
}
}
int
chsc_secm(struct channel_subsystem *css, int enable)
{ int ret;
if (enable && !css->cm_enabled) {
ret = cub_alloc(css); if (ret) goto out;
}
ret = __chsc_do_secm(css, enable); if (!ret) {
css->cm_enabled = enable; if (css->cm_enabled) {
ret = chsc_add_cmg_attr(css); if (ret) {
__chsc_do_secm(css, 0);
css->cm_enabled = 0;
}
} else
chsc_remove_cmg_attr(css);
}
out: if (!css->cm_enabled)
cub_free(css);
return ret;
}
int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, int c, int m, void *page)
{ struct chsc_scpd *scpd_area; int ccode, ret;
if ((rfmt == 1 || rfmt == 0) && c == 1 &&
!css_general_characteristics.fcs) return -EINVAL; if ((rfmt == 2) && !css_general_characteristics.cib) return -EINVAL; if ((rfmt == 3) && !css_general_characteristics.util_str) return -EINVAL;
/** * chsc_scud() - Store control-unit description. * @cu: number of the control-unit * @esm: 8 1-byte endpoint security mode values * @esm_valid: validity mask for @esm * * Interface to retrieve information about the endpoint security * modes for up to 8 paths of a control unit. * * Returns 0 on success.
*/ int chsc_scud(u16 cu, u64 *esm, u8 *esm_valid)
{ struct chsc_scud *scud = chsc_page; int ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.