// SPDX-License-Identifier: GPL-2.0+ /* * Driver for AMBA serial ports * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Copyright 1999 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd. * Copyright (C) 2010 ST-Ericsson SA * * This is a generic driver for ARM AMBA-type serial ports. They * have a lot of 16550-like features, but are not register compatible. * Note that although they do have CTS, DCD and DSR inputs, they do * not have an RI input, nor do they have DTR or RTS outputs. If * required, these have to be supplied via some other means (eg, GPIO) * and hooked into this driver.
*/
/* There is by now at least one vendor with differing details, so handle it */ struct vendor_data { const u16 *reg_offset; unsignedint ifls; unsignedint fr_busy; unsignedint fr_dsr; unsignedint fr_cts; unsignedint fr_ri; unsignedint inv_fr; bool access_32b; bool oversampling; bool dma_threshold; bool cts_event_workaround; bool always_enabled; bool fixed_options;
if (uap->port.iotype == UPIO_MEM32)
writel_relaxed(val, addr); else
writew_relaxed(val, addr);
}
/* * Reads up to 256 characters from the FIFO or until it's empty and * inserts them into the TTY layer. Returns the number of characters * read from the FIFO.
*/ staticint pl011_fifo_to_tty(struct uart_amba_port *uap)
{ unsignedint ch, fifotaken; int sysrq;
u16 status;
u8 flag;
for (fifotaken = 0; fifotaken != 256; fifotaken++) {
status = pl011_read(uap, REG_FR); if (status & UART01x_FR_RXFE) break;
/* Take chars from the FIFO and update status */
ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
flag = TTY_NORMAL;
uap->port.icount.rx++;
if (unlikely(ch & UART_DR_ERROR)) { if (ch & UART011_DR_BE) {
ch &= ~(UART011_DR_FE | UART011_DR_PE);
uap->port.icount.brk++; if (uart_handle_break(&uap->port)) continue;
} elseif (ch & UART011_DR_PE) {
uap->port.icount.parity++;
} elseif (ch & UART011_DR_FE) {
uap->port.icount.frame++;
} if (ch & UART011_DR_OE)
uap->port.icount.overrun++;
ch &= uap->port.read_status_mask;
if (ch & UART011_DR_BE)
flag = TTY_BREAK; elseif (ch & UART011_DR_PE)
flag = TTY_PARITY; elseif (ch & UART011_DR_FE)
flag = TTY_FRAME;
}
/* * All the DMA operation mode stuff goes inside this ifdef. * This assumes that you have a generic DMA device interface, * no custom DMA interfaces are supported.
*/ #ifdef CONFIG_DMA_ENGINE
/* * Some DMA controllers provide information on their capabilities. * If the controller does, check for suitable residue processing * otherwise assime all is well.
*/ if (dma_get_slave_caps(chan, &caps) == 0) { if (caps.residue_granularity ==
DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
dma_release_channel(chan);
dev_info(uap->port.dev, "RX DMA disabled - no residue processing\n"); return;
}
}
dmaengine_slave_config(chan, &rx_conf);
uap->dmarx.chan = chan;
uap->dmarx.auto_poll_rate = false; if (plat && plat->dma_rx_poll_enable) { /* Set poll rate if specified. */ if (plat->dma_rx_poll_rate) {
uap->dmarx.auto_poll_rate = false;
uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
} else { /* * 100 ms defaults to poll rate if not * specified. This will be adjusted with * the baud rate at set_termios.
*/
uap->dmarx.auto_poll_rate = true;
uap->dmarx.poll_rate = 100;
} /* 3 secs defaults poll_timeout if not specified. */ if (plat->dma_rx_poll_timeout)
uap->dmarx.poll_timeout =
plat->dma_rx_poll_timeout; else
uap->dmarx.poll_timeout = 3000;
} elseif (!plat && dev->of_node) {
uap->dmarx.auto_poll_rate =
of_property_read_bool(dev->of_node, "auto-poll"); if (uap->dmarx.auto_poll_rate) {
u32 x;
/* * If TX DMA was disabled, it means that we've stopped the DMA for * some reason (eg, XOFF received, or we want to send an X-char.) * * Note: we need to be careful here of a potential race between DMA * and the rest of the driver - if the driver disables TX DMA while * a TX buffer completing, we must update the tx queued status to * get further refills (hence we check dmacr).
*/ if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
kfifo_is_empty(&tport->xmit_fifo)) {
uap->dmatx.queued = false;
uart_port_unlock_irqrestore(&uap->port, flags); return;
}
if (pl011_dma_tx_refill(uap) <= 0) /* * We didn't queue a DMA buffer for some reason, but we * have data pending to be sent. Re-enable the TX IRQ.
*/
pl011_start_tx_pio(uap);
uart_port_unlock_irqrestore(&uap->port, flags);
}
/* * Try to refill the TX DMA buffer. * Locking: called with port lock held and IRQs disabled. * Returns: * 1 if we queued up a TX DMA buffer. * 0 if we didn't want to handle this by DMA * <0 on error
*/ staticint pl011_dma_tx_refill(struct uart_amba_port *uap)
{ struct pl011_dmatx_data *dmatx = &uap->dmatx; struct dma_chan *chan = dmatx->chan; struct dma_device *dma_dev = chan->device; struct dma_async_tx_descriptor *desc; struct tty_port *tport = &uap->port.state->port; unsignedint count;
/* * Try to avoid the overhead involved in using DMA if the * transaction fits in the first half of the FIFO, by using * the standard interrupt handling. This ensures that we * issue a uart_write_wakeup() at the appropriate time.
*/
count = kfifo_len(&tport->xmit_fifo); if (count < (uap->fifosize >> 1)) {
uap->dmatx.queued = false; return 0;
}
/* * Bodge: don't send the last character by DMA, as this * will prevent XON from notifying us to restart DMA.
*/
count -= 1;
/* Else proceed to copy the TX chars to the DMA buffer and fire DMA */ if (count > PL011_DMA_BUFFER_SIZE)
count = PL011_DMA_BUFFER_SIZE;
desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) {
dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE);
uap->dmatx.queued = false; /* * If DMA cannot be used right now, we complete this * transaction via IRQ and let the TTY layer retry.
*/
dev_dbg(uap->port.dev, "TX DMA busy\n"); return -EBUSY;
}
/* Some data to go along to the callback */
desc->callback = pl011_dma_tx_callback;
desc->callback_param = uap;
/* All errors should happen at prepare time */
dmaengine_submit(desc);
/* Fire the DMA transaction */
dma_dev->device_issue_pending(chan);
/* * Now we know that DMA will fire, so advance the ring buffer * with the stuff we just dispatched.
*/
uart_xmit_advance(&uap->port, count);
if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS)
uart_write_wakeup(&uap->port);
return 1;
}
/* * We received a transmit interrupt without a pending X-char but with * pending characters. * Locking: called with port lock held and IRQs disabled. * Returns: * false if we want to use PIO to transmit * true if we queued a DMA buffer
*/ staticbool pl011_dma_tx_irq(struct uart_amba_port *uap)
{ if (!uap->using_tx_dma) returnfalse;
/* * If we already have a TX buffer queued, but received a * TX interrupt, it will be because we've just sent an X-char. * Ensure the TX DMA is enabled and the TX IRQ is disabled.
*/ if (uap->dmatx.queued) {
uap->dmacr |= UART011_TXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
uap->im &= ~UART011_TXIM;
pl011_write(uap->im, uap, REG_IMSC); returntrue;
}
/* * We don't have a TX buffer queued, so try to queue one. * If we successfully queued a buffer, mask the TX IRQ.
*/ if (pl011_dma_tx_refill(uap) > 0) {
uap->im &= ~UART011_TXIM;
pl011_write(uap->im, uap, REG_IMSC); returntrue;
} returnfalse;
}
/* * Stop the DMA transmit (eg, due to received XOFF). * Locking: called with port lock held and IRQs disabled.
*/ staticinlinevoid pl011_dma_tx_stop(struct uart_amba_port *uap)
{ if (uap->dmatx.queued) {
uap->dmacr &= ~UART011_TXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
}
}
/* * Try to start a DMA transmit, or in the case of an XON/OFF * character queued for send, try to get that character out ASAP. * Locking: called with port lock held and IRQs disabled. * Returns: * false if we want the TX IRQ to be enabled * true if we have a buffer queued
*/ staticinlinebool pl011_dma_tx_start(struct uart_amba_port *uap)
{
u16 dmacr;
if (!uap->using_tx_dma) returnfalse;
if (!uap->port.x_char) { /* no X-char, try to push chars out in DMA mode */ bool ret = true;
/* * We have an X-char to send. Disable DMA to prevent it loading * the TX fifo, and then see if we can stuff it into the FIFO.
*/
dmacr = uap->dmacr;
uap->dmacr &= ~UART011_TXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) { /* * No space in the FIFO, so enable the transmit interrupt * so we know when there is space. Note that once we've * loaded the character, we should just re-enable DMA.
*/ returnfalse;
}
/* Start the RX DMA job */
dbuf = uap->dmarx.use_buf_b ?
&uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a;
desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK); /* * If the DMA engine is busy and cannot prepare a * channel, no big deal, the driver will fall back * to interrupt mode as a result of this error code.
*/ if (!desc) {
uap->dmarx.running = false;
dmaengine_terminate_all(rxchan); return -EBUSY;
}
/* Some data to go along to the callback */
desc->callback = pl011_dma_rx_callback;
desc->callback_param = uap;
dmarx->cookie = dmaengine_submit(desc);
dma_async_issue_pending(rxchan);
/* * This is called when either the DMA job is complete, or * the FIFO timeout interrupt occurred. This must be called * with the port spinlock uap->port.lock held.
*/ staticvoid pl011_dma_rx_chars(struct uart_amba_port *uap,
u32 pending, bool use_buf_b, bool readfifo)
{ struct tty_port *port = &uap->port.state->port; struct pl011_dmabuf *dbuf = use_buf_b ?
&uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; int dma_count = 0;
u32 fifotaken = 0; /* only used for vdbg() */
struct pl011_dmarx_data *dmarx = &uap->dmarx; int dmataken = 0;
if (uap->dmarx.poll_rate) { /* The data can be taken by polling */
dmataken = dbuf->len - dmarx->last_residue; /* Recalculate the pending size */ if (pending >= dmataken)
pending -= dmataken;
}
/* Pick the remain data from the DMA */ if (pending) { /* * First take all chars in the DMA pipe, then look in the FIFO. * Note that tty_insert_flip_buf() tries to take as many chars * as it can.
*/
dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, pending);
uap->port.icount.rx += dma_count; if (dma_count < pending)
dev_warn(uap->port.dev, "couldn't insert all characters (TTY is full?)\n");
}
/* Reset the last_residue for Rx DMA poll */ if (uap->dmarx.poll_rate)
dmarx->last_residue = dbuf->len;
/* * Only continue with trying to read the FIFO if all DMA chars have * been taken first.
*/ if (dma_count == pending && readfifo) { /* Clear any error flags */
pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
UART011_FEIS, uap, REG_ICR);
/* * If we read all the DMA'd characters, and we had an * incomplete buffer, that could be due to an rx error, or * maybe we just timed out. Read any pending chars and check * the error status. * * Error conditions will only occur in the FIFO, these will * trigger an immediate interrupt and stop the DMA job, so we * will always find the error in the FIFO, never in the DMA * buffer.
*/
fifotaken = pl011_fifo_to_tty(uap);
}
dev_vdbg(uap->port.dev, "Took %d chars from DMA buffer and %d chars from the FIFO\n",
dma_count, fifotaken);
tty_flip_buffer_push(port);
}
/* * Pause the transfer so we can trust the current counter, * do this before we pause the PL011 block, else we may * overflow the FIFO.
*/ if (dmaengine_pause(rxchan))
dev_err(uap->port.dev, "unable to pause DMA transfer\n");
dmastat = rxchan->device->device_tx_status(rxchan,
dmarx->cookie, &state); if (dmastat != DMA_PAUSED)
dev_err(uap->port.dev, "unable to pause DMA transfer\n");
/* Disable RX DMA - incoming data will wait in the FIFO */
uap->dmacr &= ~UART011_RXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
uap->dmarx.running = false;
pending = dbuf->len - state.residue;
BUG_ON(pending > PL011_DMA_BUFFER_SIZE); /* Then we terminate the transfer - we now know our residue */
dmaengine_terminate_all(rxchan);
/* * This will take the chars we have so far and insert * into the framework.
*/
pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
/* Switch buffer & re-trigger DMA job */
dmarx->use_buf_b = !dmarx->use_buf_b; if (pl011_dma_rx_trigger_dma(uap)) {
dev_dbg(uap->port.dev, "could not retrigger RX DMA job fall back to interrupt mode\n");
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
}
}
/* * This completion interrupt occurs typically when the * RX buffer is totally stuffed but no timeout has yet * occurred. When that happens, we just want the RX * routine to flush out the secondary DMA buffer while * we immediately trigger the next DMA job.
*/
uart_port_lock_irq(&uap->port); /* * Rx data can be taken by the UART interrupts during * the DMA irq handler. So we check the residue here.
*/
rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
pending = dbuf->len - state.residue;
BUG_ON(pending > PL011_DMA_BUFFER_SIZE); /* Then we terminate the transfer - we now know our residue */
dmaengine_terminate_all(rxchan);
uap->dmarx.running = false;
dmarx->use_buf_b = !lastbuf;
ret = pl011_dma_rx_trigger_dma(uap);
pl011_dma_rx_chars(uap, pending, lastbuf, false);
uart_unlock_and_check_sysrq(&uap->port); /* * Do this check after we picked the DMA chars so we don't * get some IRQ immediately from RX.
*/ if (ret) {
dev_dbg(uap->port.dev, "could not retrigger RX DMA job fall back to interrupt mode\n");
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
}
}
/* * Stop accepting received characters, when we're shutting down or * suspending this port. * Locking: called with port lock held and IRQs disabled.
*/ staticinlinevoid pl011_dma_rx_stop(struct uart_amba_port *uap)
{ if (!uap->using_rx_dma) return;
/* FIXME. Just disable the DMA enable */
uap->dmacr &= ~UART011_RXDMAE;
pl011_write(uap->dmacr, uap, REG_DMACR);
}
/* * Timer handler for Rx DMA polling. * Every polling, It checks the residue in the dma buffer and transfer * data to the tty. Also, last_residue is updated for the next polling.
*/ staticvoid pl011_dma_rx_poll(struct timer_list *t)
{ struct uart_amba_port *uap = timer_container_of(uap, t, dmarx.timer); struct tty_port *port = &uap->port.state->port; struct pl011_dmarx_data *dmarx = &uap->dmarx; struct dma_chan *rxchan = uap->dmarx.chan; unsignedlong flags; unsignedint dmataken = 0; unsignedint size = 0; struct pl011_dmabuf *dbuf; int dma_count; struct dma_tx_state state;
/* * If no data is received in poll_timeout, the driver will fall back * to interrupt mode. We will retrigger DMA at the first interrupt.
*/ if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
> uap->dmarx.poll_timeout) {
uart_port_lock_irqsave(&uap->port, &flags);
pl011_dma_rx_stop(uap);
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
uart_port_unlock_irqrestore(&uap->port, flags);
/* The DMA buffer is now the FIFO the TTY subsystem can use */
uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
uap->using_tx_dma = true;
if (!uap->dmarx.chan) goto skip_rx;
/* Allocate and map DMA RX buffers */
ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a,
DMA_FROM_DEVICE); if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n", "RX buffer A", ret); goto skip_rx;
}
ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b,
DMA_FROM_DEVICE); if (ret) {
dev_err(uap->port.dev, "failed to init DMA %s: %d\n", "RX buffer B", ret);
pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a,
DMA_FROM_DEVICE); goto skip_rx;
}
uap->using_rx_dma = true;
skip_rx: /* Turn on DMA error (RX/TX will be enabled on demand) */
uap->dmacr |= UART011_DMAONERR;
pl011_write(uap->dmacr, uap, REG_DMACR);
/* * ST Micro variants has some specific dma burst threshold * compensation. Set this to 16 bytes, so burst will only * be issued above/below 16 bytes.
*/ if (uap->vendor->dma_threshold)
pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
uap, REG_ST_DMAWM);
if (uap->using_rx_dma) { if (pl011_dma_rx_trigger_dma(uap))
dev_dbg(uap->port.dev, "could not trigger initial RX DMA job, fall back to interrupt mode\n"); if (uap->dmarx.poll_rate) {
timer_setup(&uap->dmarx.timer, pl011_dma_rx_poll, 0);
mod_timer(&uap->dmarx.timer,
jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
uap->dmarx.last_jiffies = jiffies;
}
}
}
staticvoid pl011_dma_shutdown(struct uart_amba_port *uap)
{ if (!(uap->using_tx_dma || uap->using_rx_dma)) return;
/* Disable RX and TX DMA */ while (pl011_read(uap, REG_FR) & uap->vendor->fr_busy)
cpu_relax();
if (uap->using_tx_dma) { /* In theory, this should already be done by pl011_dma_flush_buffer */
dmaengine_terminate_all(uap->dmatx.chan); if (uap->dmatx.queued) {
dma_unmap_single(uap->dmatx.chan->device->dev,
uap->dmatx.dma, uap->dmatx.len,
DMA_TO_DEVICE);
uap->dmatx.queued = false;
}
uart_port_unlock(&uap->port);
tty_flip_buffer_push(&uap->port.state->port); /* * If we were temporarily out of DMA mode for a while, * attempt to switch back to DMA mode again.
*/ if (pl011_dma_rx_available(uap)) { if (pl011_dma_rx_trigger_dma(uap)) {
dev_dbg(uap->port.dev, "could not trigger RX DMA job fall back to interrupt mode again\n");
uap->im |= UART011_RXIM;
pl011_write(uap->im, uap, REG_IMSC);
} else { #ifdef CONFIG_DMA_ENGINE /* Start Rx DMA poll */ if (uap->dmarx.poll_rate) {
uap->dmarx.last_jiffies = jiffies;
uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
mod_timer(&uap->dmarx.timer,
jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
} #endif
}
}
uart_port_lock(&uap->port);
}
staticbool pl011_tx_char(struct uart_amba_port *uap, unsignedchar c, bool from_irq)
{ if (unlikely(!from_irq) &&
pl011_read(uap, REG_FR) & UART01x_FR_TXFF) returnfalse; /* unable to transmit character */
if (port->status & UPSTAT_AUTORTS) { /* We need to disable auto-RTS if we want to turn RTS off */
pl011_assign_bit(mctrl & TIOCM_RTS, &cr, UART011_CR_RTSEN);
}
pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR); /* * There is no way to clear TXIM as this is "ready to transmit IRQ", so * we simply mask it. start_tx() will unmask it. * * Note we can race with start_tx(), and if the race happens, the * polling user might get another interrupt just after we clear it. * But it should be OK and can happen even w/o the race, e.g. * controller immediately got some new data and raised the IRQ. * * And whoever uses polling routines assumes that it manages the device * (including tx queue), so we're also fine with start_tx()'s caller * side.
*/
pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
REG_IMSC);
}
/* * Save interrupts enable mask, and enable RX interrupts in case if * the interrupt is used for NMI entry.
*/
uap->im = pl011_read(uap, REG_IMSC);
pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
if (dev_get_platdata(uap->port.dev)) { struct amba_pl011_data *plat;
plat = dev_get_platdata(uap->port.dev); if (plat->init)
plat->init();
} return 0;
}
staticvoid pl011_write_lcr_h(struct uart_amba_port *uap, unsignedint lcr_h)
{
pl011_write(lcr_h, uap, REG_LCRH_RX); if (pl011_split_lcrh(uap)) { int i; /* * Wait 10 PCLKs before writing LCRH_TX register, * to get this delay write read only register 10 times
*/ for (i = 0; i < 10; ++i)
pl011_write(0xff, uap, REG_MIS);
pl011_write(lcr_h, uap, REG_LCRH_TX);
}
}
/* * Enable interrupts, only timeouts when using DMA * if initial RX DMA job failed, start in interrupt mode * as well.
*/ staticvoid pl011_enable_interrupts(struct uart_amba_port *uap)
{ unsignedlong flags; unsignedint i;
uart_port_lock_irqsave(&uap->port, &flags);
/* Clear out any spuriously appearing RX interrupts */
pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
/* * RXIS is asserted only when the RX FIFO transitions from below * to above the trigger threshold. If the RX FIFO is already * full to the threshold this can't happen and RXIS will now be * stuck off. Drain the RX FIFO explicitly to fix this:
*/ for (i = 0; i < uap->fifosize * 2; ++i) { if (pl011_read(uap, REG_FR) & UART01x_FR_RXFE) break;
val = pl011_read(uap, lcrh);
val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
pl011_write(val, uap, lcrh);
}
/* * disable the port. It should not disable RTS and DTR. * Also RTS and DTR state should be preserved to restore * it during startup().
*/ staticvoid pl011_disable_uart(struct uart_amba_port *uap)
{ unsignedint cr;
if ((port->rs485.flags & SER_RS485_ENABLED && uap->rs485_tx_state != OFF))
pl011_rs485_tx_stop(uap);
free_irq(uap->port.irq, uap);
pl011_disable_uart(uap);
/* * Shut down the clock producer
*/
clk_disable_unprepare(uap->clk); /* Optionally let pins go into sleep states */
pinctrl_pm_select_sleep_state(port->dev);
if (dev_get_platdata(uap->port.dev)) { struct amba_pl011_data *plat;
plat = dev_get_platdata(uap->port.dev); if (plat->exit)
plat->exit();
}
if (uap->port.ops->flush_buffer)
uap->port.ops->flush_buffer(port);
}
switch (termios->c_cflag & CSIZE) { case CS5:
lcr_h = UART01x_LCRH_WLEN_5; break; case CS6:
lcr_h = UART01x_LCRH_WLEN_6; break; case CS7:
lcr_h = UART01x_LCRH_WLEN_7; break; default: // CS8
lcr_h = UART01x_LCRH_WLEN_8; break;
} if (termios->c_cflag & CSTOPB)
lcr_h |= UART01x_LCRH_STP2; if (termios->c_cflag & PARENB) {
lcr_h |= UART01x_LCRH_PEN; if (!(termios->c_cflag & PARODD))
lcr_h |= UART01x_LCRH_EPS; if (termios->c_cflag & CMSPAR)
lcr_h |= UART011_LCRH_SPS;
} if (uap->fifosize > 1)
lcr_h |= UART01x_LCRH_FEN;
bits = tty_get_frame_size(termios->c_cflag);
uart_port_lock_irqsave(port, &flags);
/* * Update the per-port timeout.
*/
uart_update_timeout(port, termios->c_cflag, baud);
/* * Calculate the approximated time it takes to transmit one character * with the given baud rate. We use this as the poll interval when we * wait for the tx queue to empty.
*/
uap->rs485_tx_drain_interval = ns_to_ktime(DIV_ROUND_UP(bits * NSEC_PER_SEC, baud));
pl011_setup_status_masks(port, termios);
if (UART_ENABLE_MS(port, termios->c_cflag))
pl011_enable_ms(port);
if (port->rs485.flags & SER_RS485_ENABLED)
termios->c_cflag &= ~CRTSCTS;
old_cr = pl011_read(uap, REG_CR);
if (termios->c_cflag & CRTSCTS) { if (old_cr & UART011_CR_RTS)
old_cr |= UART011_CR_RTSEN;
if (uap->vendor->oversampling) { if (baud > port->uartclk / 16)
old_cr |= ST_UART011_CR_OVSFACT; else
old_cr &= ~ST_UART011_CR_OVSFACT;
}
/* * Workaround for the ST Micro oversampling variants to * increase the bitrate slightly, by lowering the divisor, * to avoid delayed sampling of start bit at high speeds, * else we see data corruption.
*/ if (uap->vendor->oversampling) { if (baud >= 3000000 && baud < 3250000 && quot > 1)
quot -= 1; elseif (baud > 3250000 && quot > 2)
quot -= 2;
} /* Set baud rate */
pl011_write(quot & 0x3f, uap, REG_FBRD);
pl011_write(quot >> 6, uap, REG_IBRD);
/* * ----------v----------v----------v----------v----- * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER * REG_FBRD & REG_IBRD. * ----------^----------^----------^----------^-----
*/
pl011_write_lcr_h(uap, lcr_h);
/* * Receive was disabled by pl011_disable_uart during shutdown. * Need to reenable receive if you need to use a tty_driver * returns from tty_find_polling_driver() after a port shutdown.
*/
old_cr |= UART011_CR_RXE;
pl011_write(old_cr, uap, REG_CR);
/* * Configure/autoconfigure the port.
*/ staticvoid pl011_config_port(struct uart_port *port, int flags)
{ if (flags & UART_CONFIG_TYPE)
port->type = PORT_AMBA;
}
/* * verify the new serial_struct (for TIOCSSERIAL).
*/ staticint pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
{ int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
ret = -EINVAL; if (ser->irq < 0 || ser->irq >= irq_get_nr_irqs())
ret = -EINVAL; if (ser->baud_base < 9600)
ret = -EINVAL; if (port->mapbase != (unsignedlong)ser->iomem_base)
ret = -EINVAL; return ret;
}
staticint pl011_console_setup(struct console *co, char *options)
{ struct uart_amba_port *uap; int baud = 38400; int bits = 8; int parity = 'n'; int flow = 'n'; int ret;
/* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support.
*/ if (co->index >= UART_NR)
co->index = 0;
uap = amba_ports[co->index]; if (!uap) return -ENODEV;
/* Allow pins to be muxed in and configured */
pinctrl_pm_select_default_state(uap->port.dev);
ret = clk_prepare(uap->clk); if (ret) return ret;
uap->console_line_ended = true;
if (dev_get_platdata(uap->port.dev)) { struct amba_pl011_data *plat;
plat = dev_get_platdata(uap->port.dev); if (plat->init)
plat->init();
}
/** * pl011_console_match - non-standard console matching * @co: registering console * @name: name from console command line * @idx: index from console command line * @options: ptr to option string from console command line * * Only attempts to match console command lines of the form: * console=pl011,mmio|mmio32,<addr>[,<options>] * console=pl011,0x<addr>[,<options>] * This form is used to register an initial earlycon boot console and * replace it with the amba_console at pl011 driver init. * * Performs console setup for a match (as required by interface) * If no <options> are specified, then assume the h/w is already setup. * * Returns 0 if console matches; otherwise non-zero to use default matching
*/ staticint pl011_console_match(struct console *co, char *name, int idx, char *options)
{ enum uart_iotype iotype;
resource_size_t addr; int i;
/* * Systems affected by the Qualcomm Technologies QDF2400 E44 erratum * have a distinct console name, so make sure we check for that. * The actual implementation of the erratum occurs in the probe * function.
*/ if ((strcmp(name, "qdf2400_e44") != 0) && (strcmp(name, "pl011") != 0)) return -ENODEV;
if (uart_parse_earlycon(options, &iotype, &addr, &options)) return -ENODEV;
if (iotype != UPIO_MEM && iotype != UPIO_MEM32) return -ENODEV;
/* try to match the port specified on the command line */ for (i = 0; i < ARRAY_SIZE(amba_ports); i++) { struct uart_port *port;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.