// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM * Copyright (C) 2001 Paul Mackerras <paulus@au.ibm.com>, IBM * Copyright (C) 2004 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp. * Copyright (C) 2004 IBM Corporation * * Additional Author(s): * Ryan S. Arnold <rsa@us.ibm.com>
*/
/* * Wait this long per iteration while trying to push buffered data to the * hypervisor before allowing the tty to complete a close operation.
*/ #define HVC_CLOSE_WAIT (HZ/100) /* 1/10 of a second */
/* * These sizes are most efficient for vio, because they are the * native transfer size. We could make them selectable in the * future to better deal with backends that want other buffer sizes.
*/ #define N_OUTBUF 16 #define N_INBUF 16
/* dynamic list of hvc_struct instances */ static LIST_HEAD(hvc_structs);
/* * Protect the list of hvc_struct instances from inserts and removals during * list traversal.
*/ static DEFINE_MUTEX(hvc_structs_mutex);
/* * This value is used to assign a tty->index value to a hvc_struct based * upon order of exposure via hvc_probe(), when we can not match it to * a console candidate registered with hvc_instantiate().
*/ staticint last_hvc = -1;
/* * Do not call this function with either the hvc_structs_mutex or the hvc_struct * lock held. If successful, this function increments the kref reference * count against the target hvc_struct so it should be released when finished.
*/ staticstruct hvc_struct *hvc_get_by_index(int index)
{ struct hvc_struct *hp; unsignedlong flags;
/* * Wait for the console to flush before writing more to it. This sleeps.
*/ staticint hvc_flush(struct hvc_struct *hp)
{ return __hvc_flush(hp->ops, hp->vtermno, true);
}
/* * Initial console vtermnos for console API usage prior to full console * initialization. Any vty adapter outside this range will not have usable * console interfaces but can still be used as a tty device. This has to be * static because kmalloc will not work during early console init.
*/ staticconststruct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES]; static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
{[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
/* * Console APIs, NOT TTY. These APIs are available immediately when * hvc_console_setup() finds adapters.
*/
staticvoid hvc_console_print(struct console *co, constchar *b, unsigned count)
{ char c[N_OUTBUF] __ALIGNED__; unsigned i = 0, n = 0; int r, donecr = 0, index = co->index;
/* Console access attempt outside of acceptable console range. */ if (index >= MAX_NR_HVC_CONSOLES) return;
/* This console adapter was removed so it is not usable. */ if (vtermnos[index] == -1) return;
while (count > 0 || i > 0) { if (count > 0 && i < sizeof(c)) { if (b[n] == '\n' && !donecr) {
c[i++] = '\r';
donecr = 1;
} else {
c[i++] = b[n++];
donecr = 0;
--count;
}
} else {
r = cons_ops[index]->put_chars(vtermnos[index], c, i); if (r <= 0) { /* throw away characters on error
* but spin in case of -EAGAIN */ if (r != -EAGAIN) {
i = 0;
} else {
hvc_console_flush(cons_ops[index],
vtermnos[index]);
}
} elseif (r > 0) {
i -= r; if (i > 0)
memmove(c, c+r, i);
}
}
}
hvc_console_flush(cons_ops[index], vtermnos[index]);
}
staticstruct tty_driver *hvc_console_device(struct console *c, int *index)
{ if (vtermnos[c->index] == -1) return NULL;
/* * Early console initialization. Precedes driver initialization. * * (1) we are first, and the user specified another driver * -- index will remain -1 * (2) we are first and the user specified no driver * -- index will be set to 0, then we will fail setup. * (3) we are first and the user specified our driver * -- index will be set to user specified driver, and we will fail * (4) we are after driver, and this initcall will register us * -- if the user didn't specify a driver then the console will match * * Note that for cases 2 and 3, we will match later when the io driver * calls hvc_instantiate() and call register again.
*/ staticint __init hvc_console_init(void)
{
register_console(&hvc_console); return 0;
}
console_initcall(hvc_console_init);
staticvoid hvc_check_console(int index)
{ /* Already registered, bail out */ if (console_is_registered(&hvc_console)) return;
/* If this index is what the user requested, then register * now (setup won't fail at this point). It's ok to just * call register again if previously .setup failed.
*/ if (index == hvc_console.index)
register_console(&hvc_console);
}
/* * hvc_instantiate() is an early console discovery method which locates * consoles * prior to the vio subsystem discovering them. Hotplugged * vty adapters do NOT get an hvc_instantiate() callback since they * appear after early console init.
*/ int hvc_instantiate(uint32_t vtermno, int index, conststruct hv_ops *ops)
{ struct hvc_struct *hp;
if (index < 0 || index >= MAX_NR_HVC_CONSOLES) return -1;
if (vtermnos[index] != -1) return -1;
/* make sure no tty has been registered in this index */
hp = hvc_get_by_index(index); if (hp) {
tty_port_put(&hp->port); return -1;
}
vtermnos[index] = vtermno;
cons_ops[index] = ops;
/* check if we need to re-register the kernel console */
hvc_check_console(index);
return 0;
}
EXPORT_SYMBOL_GPL(hvc_instantiate);
/* Wake the sleeping khvcd */ void hvc_kick(void)
{
hvc_kicked = 1;
wake_up_process(hvc_task);
}
EXPORT_SYMBOL_GPL(hvc_kick);
/* * The TTY interface won't be used until after the vio layer has exposed the vty * adapter to the kernel.
*/ staticint hvc_open(struct tty_struct *tty, struct file * filp)
{ struct hvc_struct *hp = tty->driver_data; unsignedlong flags; int rc = 0;
spin_lock_irqsave(&hp->port.lock, flags); /* Check and then increment for fast path open. */ if (hp->port.count++ > 0) {
spin_unlock_irqrestore(&hp->port.lock, flags);
hvc_kick(); return 0;
} /* else count == 0 */
spin_unlock_irqrestore(&hp->port.lock, flags);
tty_port_tty_set(&hp->port, tty);
if (hp->ops->notifier_add)
rc = hp->ops->notifier_add(hp, hp->data);
/* * If the notifier fails we return an error. The tty layer * will call hvc_close() after a failed open but we don't want to clean * up there so we'll clean up here and clear out the previously set * tty fields and return the kref reference.
*/ if (rc) {
printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
} else { /* We are ready... raise DTR/RTS */ if (C_BAUD(tty)) if (hp->ops->dtr_rts)
hp->ops->dtr_rts(hp, true);
tty_port_set_initialized(&hp->port, true);
}
/* Force wakeup of the polling thread */
hvc_kick();
if (--hp->port.count == 0) {
spin_unlock_irqrestore(&hp->port.lock, flags); /* We are done with the tty pointer now. */
tty_port_tty_set(&hp->port, NULL);
if (!tty_port_initialized(&hp->port)) return;
if (C_HUPCL(tty)) if (hp->ops->dtr_rts)
hp->ops->dtr_rts(hp, false);
if (hp->ops->notifier_del)
hp->ops->notifier_del(hp, hp->data);
/* cancel pending tty resize work */
cancel_work_sync(&hp->tty_resize);
/* * Chain calls chars_in_buffer() and returns immediately if * there is no buffered data otherwise sleeps on a wait queue * waking periodically to check chars_in_buffer().
*/
tty_wait_until_sent(tty, HVC_CLOSE_WAIT);
tty_port_set_initialized(&hp->port, false);
} else { if (hp->port.count < 0)
printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
hp->vtermno, hp->port.count);
spin_unlock_irqrestore(&hp->port.lock, flags);
}
}
/* cancel pending tty resize work */
cancel_work_sync(&hp->tty_resize);
spin_lock_irqsave(&hp->port.lock, flags);
/* * The N_TTY line discipline has problems such that in a close vs * open->hangup case this can be called after the final close so prevent * that from happening for now.
*/ if (hp->port.count <= 0) {
spin_unlock_irqrestore(&hp->port.lock, flags); return;
}
if (hp->ops->notifier_hangup)
hp->ops->notifier_hangup(hp, hp->data);
}
/* * Push buffered characters whether they were just recently buffered or waiting * on a blocked hypervisor. Call this function with hp->lock held.
*/ staticint hvc_push(struct hvc_struct *hp)
{ int n;
n = hp->ops->put_chars(hp->vtermno, hp->outbuf, hp->n_outbuf); if (n <= 0) { if (n == 0 || n == -EAGAIN) {
hp->do_wakeup = 1; return 0;
} /* throw away output on error; this happens when
there is no session connected to the vterm. */
hp->n_outbuf = 0;
} else
hp->n_outbuf -= n; if (hp->n_outbuf > 0)
memmove(hp->outbuf, hp->outbuf + n, hp->n_outbuf); else
hp->do_wakeup = 1;
/* * This is actually a contract between the driver and the tty layer outlining * how much write room the driver can guarantee will be sent OR BUFFERED. This * driver MUST honor the return value.
*/ staticunsignedint hvc_write_room(struct tty_struct *tty)
{ struct hvc_struct *hp = tty->driver_data;
/* * timeout will vary between the MIN and MAX values defined here. By default * and during console activity we will use a default MIN_TIMEOUT of 10. When * the console is idle, we increase the timeout value on each pass through * msleep until we reach the max. This may be noticeable as a brief (average * one second) delay on the console before the console responds to input when * there has been no input for some time.
*/ #define MIN_TIMEOUT (10) #define MAX_TIMEOUT (2000) static u32 timeout = MIN_TIMEOUT;
/* * Maximum number of bytes to get from the console driver if hvc_poll is * called from driver (and can't sleep). Any more than this and we break * and start polling with khvcd. This value was derived from an OpenBMC * console with the OPAL driver that results in about 0.25ms interrupts off * latency.
*/ #define HVC_ATOMIC_READ_MAX 128
/* Reschedule us if still some write pending */ if (hp->n_outbuf > 0) {
poll_mask |= HVC_POLL_WRITE; /* If hvc_push() was not able to write, sleep a few msecs */
timeout = (written_total) ? 0 : MIN_TIMEOUT;
}
if (may_sleep) {
spin_unlock_irqrestore(&hp->lock, flags);
cond_resched();
spin_lock_irqsave(&hp->lock, flags);
}
/* No tty attached, just skip */
tty = tty_port_tty_get(&hp->port); if (tty == NULL) goto bail;
/* Now check if we can get data (are we throttled ?) */ if (tty_throttled(tty)) goto out;
/* If we aren't notifier driven and aren't throttled, we always * request a reschedule
*/ if (!hp->irq_requested)
poll_mask |= HVC_POLL_READ;
read_again: /* Read data if any */
count = tty_buffer_request_room(&hp->port, N_INBUF);
/* If flip is full, just reschedule a later read */ if (count == 0) {
poll_mask |= HVC_POLL_READ; goto out;
}
n = hp->ops->get_chars(hp->vtermno, buf, count); if (n <= 0) { /* Hangup the tty when disconnected from host */ if (n == -EPIPE) {
spin_unlock_irqrestore(&hp->lock, flags);
tty_hangup(tty);
spin_lock_irqsave(&hp->lock, flags);
} elseif ( n == -EAGAIN ) { /* * Some back-ends can only ensure a certain min * num of bytes read, which may be > 'count'. * Let the tty clear the flip buff to make room.
*/
poll_mask |= HVC_POLL_READ;
} goto out;
}
for (i = 0; i < n; ++i) { #ifdef CONFIG_MAGIC_SYSRQ if (hp->index == hvc_console.index) { /* Handle the SysRq Hack */ /* XXX should support a sequence */ if (buf[i] == '\x0f') { /* ^O */ /* if ^O is pressed again, reset
* sysrq_pressed and flip ^O char */
sysrq_pressed = !sysrq_pressed; if (sysrq_pressed) continue;
} elseif (sysrq_pressed) {
handle_sysrq(buf[i]);
sysrq_pressed = 0; continue;
}
} #endif/* CONFIG_MAGIC_SYSRQ */
tty_insert_flip_char(&hp->port, buf[i], 0);
}
read_total += n;
if (may_sleep) { /* Keep going until the flip is full */
spin_unlock_irqrestore(&hp->lock, flags);
cond_resched();
spin_lock_irqsave(&hp->lock, flags); goto read_again;
} elseif (read_total < HVC_ATOMIC_READ_MAX) { /* Break and defer if it's a large read in atomic */ goto read_again;
}
int hvc_poll(struct hvc_struct *hp)
{ return __hvc_poll(hp, false);
}
EXPORT_SYMBOL_GPL(hvc_poll);
/** * __hvc_resize() - Update terminal window size information. * @hp: HVC console pointer * @ws: Terminal window size structure * * Stores the specified window size information in the hvc structure of @hp. * The function schedule the tty resize update. * * Locking: Locking free; the function MUST be called holding hp->lock
*/ void __hvc_resize(struct hvc_struct *hp, struct winsize ws)
{
hp->ws = ws;
schedule_work(&hp->tty_resize);
}
EXPORT_SYMBOL_GPL(__hvc_resize);
/* * This kthread is either polling or interrupt driven. This is determined by * calling hvc_poll() who determines whether a console adapter support * interrupts.
*/ staticint khvcd(void *unused)
{ int poll_mask; struct hvc_struct *hp;
set_freezable(); do {
poll_mask = 0;
hvc_kicked = 0;
try_to_freeze();
wmb(); if (!cpus_are_in_xmon()) {
mutex_lock(&hvc_structs_mutex);
list_for_each_entry(hp, &hvc_structs, next) {
poll_mask |= __hvc_poll(hp, true);
cond_resched();
}
mutex_unlock(&hvc_structs_mutex);
} else
poll_mask |= HVC_POLL_READ; if (hvc_kicked) continue;
set_current_state(TASK_INTERRUPTIBLE); if (!hvc_kicked) { if (poll_mask == 0)
schedule(); else { unsignedlong j_timeout;
/* * We don't use msleep_interruptible otherwise * "kick" will fail to wake us up
*/
j_timeout = msecs_to_jiffies(timeout) + 1;
schedule_timeout_interruptible(j_timeout);
}
}
__set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
/* * find index to use: * see if this vterm id matches one registered for console.
*/ for (i=0; i < MAX_NR_HVC_CONSOLES; i++) if (vtermnos[i] == hp->vtermno &&
cons_ops[i] == hp->ops) break;
if (i >= MAX_NR_HVC_CONSOLES) {
/* find 'empty' slot for console */ for (i = 0; i < MAX_NR_HVC_CONSOLES && vtermnos[i] != -1; i++) {
}
/* no matching slot, just use a counter */ if (i == MAX_NR_HVC_CONSOLES)
i = ++last_hvc + MAX_NR_HVC_CONSOLES;
}
hp->index = i; if (i < MAX_NR_HVC_CONSOLES) {
cons_ops[i] = ops;
vtermnos[i] = vtermno;
}
/* * We 'put' the instance that was grabbed when the kref instance * was initialized using kref_init(). Let the last holder of this * kref cause it to be removed, which will probably be the tty_vhangup * below.
*/
tty_port_put(&hp->port);
/* * This function call will auto chain call hvc_hangup.
*/ if (tty) {
tty_vhangup(tty);
tty_kref_put(tty);
}
}
EXPORT_SYMBOL_GPL(hvc_remove);
/* Driver initialization: called as soon as someone uses hvc_alloc(). */ staticint hvc_init(void)
{ struct tty_driver *drv; int err;
/* We need more than hvc_count adapters due to hotplug additions. */
drv = tty_alloc_driver(HVC_ALLOC_TTY_ADAPTERS, TTY_DRIVER_REAL_RAW |
TTY_DRIVER_RESET_TERMIOS); if (IS_ERR(drv)) {
err = PTR_ERR(drv); goto out;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.