/* * Function: cio_cancel * Issues a "Cancel Subchannel" on the specified subchannel * Note: We don't need any fancy intparms and flags here * since xsch is executed synchronously. * Only for common I/O internal use as for now.
*/ int
cio_cancel (struct subchannel *sch)
{ int ccode;
switch (ccode) { case 0: /* success */ /* Update information in scsw. */ if (cio_update_schib(sch)) return -ENODEV; return 0; case 1: /* status pending */ return -EBUSY; case 2: /* not applicable */ return -EINVAL; default: /* not oper */ return -ENODEV;
}
}
EXPORT_SYMBOL_GPL(cio_cancel);
/** * cio_cancel_halt_clear - Cancel running I/O by performing cancel, halt * and clear ordinally if subchannel is valid. * @sch: subchannel on which to perform the cancel_halt_clear operation * @iretry: the number of the times remained to retry the next operation * * This should be called repeatedly since halt/clear are asynchronous * operations. We do one try with cio_cancel, three tries with cio_halt, * 255 tries with cio_clear. The caller should initialize @iretry with * the value 255 for its first call to this, and keep using the same * @iretry in the subsequent calls until it gets a non -EBUSY return. * * Returns 0 if device now idle, -ENODEV for device not operational, * -EBUSY if an interrupt is expected (either from halt/clear or from a * status pending), and -EIO if out of retries.
*/ int cio_cancel_halt_clear(struct subchannel *sch, int *iretry)
{ int ret;
if (cio_update_schib(sch)) return -ENODEV; if (!sch->schib.pmcw.ena) /* Not operational -> done. */ return 0; /* Stage 1: cancel io. */ if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) &&
!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { if (!scsw_is_tm(&sch->schib.scsw)) {
ret = cio_cancel(sch); if (ret != -EINVAL) return ret;
} /* * Cancel io unsuccessful or not applicable (transport mode). * Continue with asynchronous instructions.
*/
*iretry = 3; /* 3 halt retries. */
} /* Stage 2: halt io. */ if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { if (*iretry) {
*iretry -= 1;
ret = cio_halt(sch); if (ret != -EBUSY) return (ret == 0) ? -EBUSY : ret;
} /* Halt io unsuccessful. */
*iretry = 255; /* 255 clear retries. */
} /* Stage 3: clear io. */ if (*iretry) {
*iretry -= 1;
ret = cio_clear(sch); return (ret == 0) ? -EBUSY : ret;
} /* Function was unsuccessful */ return -EIO;
}
EXPORT_SYMBOL_GPL(cio_cancel_halt_clear);
/* * cio_commit_config - apply configuration to the subchannel
*/ int cio_commit_config(struct subchannel *sch)
{ int ccode, retry, ret = 0; struct schib schib; struct irb irb;
if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV;
for (retry = 0; retry < 5; retry++) { /* copy desired changes to local schib */
cio_apply_config(sch, &schib);
ccode = msch(sch->schid, &schib); if (ccode < 0) /* -EIO if msch gets a program check. */ return ccode; switch (ccode) { case 0: /* successful */ if (stsch(sch->schid, &schib) ||
!css_sch_is_valid(&schib)) return -ENODEV; if (cio_check_config(sch, &schib)) { /* commit changes from local schib */
memcpy(&sch->schib, &schib, sizeof(schib)); return 0;
}
ret = -EAGAIN; break; case 1: /* status pending */
ret = -EBUSY; if (tsch(sch->schid, &irb)) return ret; break; case 2: /* busy */
udelay(100); /* allow for recovery */
ret = -EBUSY; break; case 3: /* not operational */ return -ENODEV;
}
} return ret;
}
EXPORT_SYMBOL_GPL(cio_commit_config);
/** * cio_update_schib - Perform stsch and update schib if subchannel is valid. * @sch: subchannel on which to perform stsch * Return zero on success, -ENODEV otherwise.
*/ int cio_update_schib(struct subchannel *sch)
{ struct schib schib;
if (stsch(sch->schid, &schib)) return -ENODEV;
memcpy(&sch->schib, &schib, sizeof(schib));
if (!css_sch_is_valid(&schib)) return -EACCES;
return 0;
}
EXPORT_SYMBOL_GPL(cio_update_schib);
/** * cio_enable_subchannel - enable a subchannel. * @sch: subchannel to be enabled * @intparm: interruption parameter to set
*/ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{ int ret;
ret = cio_commit_config(sch); if (ret == -EIO) { /* * Got a program check in msch. Try without * the concurrent sense bit the next time.
*/
sch->config.csense = 0;
ret = cio_commit_config(sch);
}
CIO_HEX_EVENT(2, &ret, sizeof(ret)); return ret;
}
EXPORT_SYMBOL_GPL(cio_enable_subchannel);
/** * cio_disable_subchannel - disable a subchannel. * @sch: subchannel to disable
*/ int cio_disable_subchannel(struct subchannel *sch)
{ int ret;
/* * Use cio_tsch to update the subchannel status and call the interrupt handler * if status had been pending. Called with the subchannel's lock held.
*/ void cio_tsch(struct subchannel *sch)
{ struct irb *irb; int irq_context;
irb = this_cpu_ptr(&cio_irb); /* Store interrupt response block to lowcore. */ if (tsch(sch->schid, irb) != 0) /* Not status pending or not operational. */ return;
memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); /* Call interrupt handler with updated status. */
irq_context = in_interrupt(); if (!irq_context) {
local_bh_disable();
irq_enter();
}
kstat_incr_irq_this_cpu(IO_INTERRUPT); if (sch->driver && sch->driver->irq)
sch->driver->irq(sch); else
inc_irq_stat(IRQIO_CIO); if (!irq_context) {
irq_exit();
_local_bh_enable();
}
}
init_subchannel_id(&schid); if (console_irq != -1) { /* VM provided us with the irq number of the console. */
schid.sch_no = console_irq; if (stsch(schid, &schib) != 0 ||
(schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv) return -1;
console_devno = schib.pmcw.dev;
} elseif (console_devno != -1) { /* At least the console device number is known. */
for_each_subchannel(cio_test_for_console, NULL);
} return console_irq;
}
int cio_is_console(struct subchannel_id schid)
{ if (!console_sch) return 0; return schid_equal(&schid, &console_sch->schid);
}
void cio_register_early_subchannels(void)
{ int ret;
if (!console_sch) return;
ret = css_register_subchannel(console_sch); if (ret)
put_device(&console_sch->dev);
} #endif/* CONFIG_CCW_CONSOLE */
/** * cio_tm_start_key - perform start function * @sch: subchannel on which to perform the start function * @tcw: transport-command word to be started * @lpm: mask of paths to use * @key: storage key to use for storage access * * Start the tcw on the given subchannel. Return zero on success, non-zero * otherwise.
*/ int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
{ int cc; union orb *orb = &to_io_private(sch)->orb;
/** * cio_tm_intrg - perform interrogate function * @sch: subchannel on which to perform the interrogate function * * If the specified subchannel is running in transport-mode, perform the * interrogate function. Return zero on success, non-zero otherwie.
*/ int cio_tm_intrg(struct subchannel *sch)
{ int cc;
if (!to_io_private(sch)->orb.tm.b) return -EINVAL;
cc = xsch(sch->schid); switch (cc) { case 0: case 2: return 0; case 1: return -EBUSY; default: return -ENODEV;
}
}
EXPORT_SYMBOL_GPL(cio_tm_intrg);
Messung V0.5
¤ Dauer der Verarbeitung: 0.14 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.