/* Adjust RTS polarity in case it's driven in software */ if (stm32_usart_tx_empty(port))
stm32_usart_rs485_rts_disable(port); else
stm32_usart_rs485_rts_enable(port);
/* Return true when data is pending (in pio mode), and false when no data is pending. */ staticbool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
{ struct stm32_port *stm32_port = to_stm32_port(port); conststruct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
*sr = readl_relaxed(port->membase + ofs->isr); /* Get pending characters in RDR or FIFO */ if (*sr & USART_SR_RXNE) { /* Get all pending characters from the RDR or the FIFO when using interrupts */ if (!stm32_usart_rx_dma_started(stm32_port)) returntrue;
/* Handle only RX data errors when using DMA */ if (*sr & USART_SR_ERR_MASK) returntrue;
}
while (stm32_usart_pending_rx_pio(port, &sr)) {
sr |= USART_SR_DUMMY_RX;
flag = TTY_NORMAL;
/* * Status bits has to be cleared before reading the RDR: * In FIFO mode, reading the RDR will pop the next data * (if any) along with its status bits into the SR. * Not doing so leads to misalignement between RDR and SR, * and clear status bits of the next rx data. * * Clear errors flags for stm32f7 and stm32h7 compatible * devices. On stm32f4 compatible devices, the error bit is * cleared by the sequence [read SR - read DR].
*/ if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
writel_relaxed(sr & USART_SR_ERR_MASK,
port->membase + ofs->icr);
c = stm32_usart_get_char_pio(port);
port->icount.rx++;
size++; if (sr & USART_SR_ERR_MASK) { if (sr & USART_SR_ORE) {
port->icount.overrun++;
} elseif (sr & USART_SR_PE) {
port->icount.parity++;
} elseif (sr & USART_SR_FE) { /* Break detection if character is null */ if (!c) {
port->icount.brk++; if (uart_handle_break(port)) continue;
} else {
port->icount.frame++;
}
}
sr &= port->read_status_mask;
if (sr & USART_SR_PE) {
flag = TTY_PARITY;
} elseif (sr & USART_SR_FE) { if (!c)
flag = TTY_BREAK; else
flag = TTY_FRAME;
}
}
if (uart_prepare_sysrq_char(port, c)) continue;
uart_insert_char(port, sr, USART_SR_ORE, c, flag);
}
/* * Apply rdr_mask on buffer in order to mask parity bit. * This loop is useless in cs8 mode because DMA copies only * 8 bits and already ignores parity bit.
*/ if (!(stm32_port->rdr_mask == (BIT(8) - 1))) for (i = 0; i < dma_size; i++)
*(dma_start + i) &= stm32_port->rdr_mask;
/* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */ if (stm32_port->rx_dma_state.residue > stm32_port->last_res) { /* Conditional first part: from last_res to end of DMA buffer */
dma_size = stm32_port->last_res;
stm32_usart_push_buffer_dma(port, dma_size);
size = dma_size;
}
/* Push current DMA transaction in the pending queue */
ret = dma_submit_error(dmaengine_submit(desc)); if (ret) {
dmaengine_terminate_sync(stm32_port->rx_ch);
stm32_port->rx_dma_busy = false; return ret;
}
staticbool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
{ /* * We cannot use the function "dmaengine_tx_status" to know the * status of DMA. This function does not show if the "dma complete" * callback of the DMA transaction has been called. So we prefer * to use "tx_dma_busy" flag to prevent dual DMA transaction at the * same time.
*/ return stm32_port->tx_dma_busy;
}
/* Let's see if we have pending data to send */
uart_port_lock_irqsave(port, &flags);
stm32_usart_transmit_chars(port);
uart_port_unlock_irqrestore(port, flags);
}
/* Check that TDR is empty before filling FIFO */ if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) break;
if (!uart_fifo_get(port, &ch)) break;
writel_relaxed(ch, port->membase + ofs->tdr);
}
/* rely on TXE irq (mask or unmask) for sending remaining data */ if (kfifo_is_empty(&tport->xmit_fifo))
stm32_usart_tx_interrupt_disable(port); else
stm32_usart_tx_interrupt_enable(port);
}
/* * Set "tx_dma_busy" flag. This flag will be released when * dmaengine_terminate_async will be called. This flag helps * transmit_chars_dma not to start another DMA transaction * if the callback of the previous is not yet called.
*/
stm32port->tx_dma_busy = true;
/* Push current DMA TX transaction in the pending queue */ /* DMA no yet started, safe to free resources */
ret = dma_submit_error(dmaengine_submit(desc)); if (ret) {
dev_err(port->dev, "DMA failed with error code: %d\n", ret);
stm32_usart_tx_dma_terminate(stm32port); goto fallback_err;
}
if (port->x_char) { /* dma terminate may have been called in case of dma pause failure */
stm32_usart_tx_dma_pause(stm32_port);
/* Check that TDR is empty before filling FIFO */
ret =
readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
isr,
(isr & USART_SR_TXE),
10, 1000); if (ret)
dev_warn(port->dev, "1 character may be erased\n");
if (!stm32_port->hw_flow_control &&
port->rs485.flags & SER_RS485_ENABLED &&
(sr & USART_SR_TC)) {
stm32_usart_tc_interrupt_disable(port);
stm32_usart_rs485_rts_disable(port);
ret = IRQ_HANDLED;
}
if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG) {
writel_relaxed(USART_ICR_RTOCF,
port->membase + ofs->icr);
ret = IRQ_HANDLED;
}
if ((sr & USART_SR_WUF) && ofs->icr != UNDEF_REG) { /* Clear wake up flag and disable wake up interrupt */
writel_relaxed(USART_ICR_WUCF,
port->membase + ofs->icr);
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE); if (irqd_is_wakeup_set(irq_get_irq_data(port->irq)))
pm_wakeup_event(tport->tty->dev, 0);
ret = IRQ_HANDLED;
}
/* * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request * line has been masked by HW and rx data are stacking in FIFO.
*/ if (!stm32_port->throttled) { if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_started(stm32_port)) ||
((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_started(stm32_port))) {
uart_port_lock(port);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq(port); if (size)
tty_flip_buffer_push(tport);
ret = IRQ_HANDLED;
}
}
if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
uart_port_lock(port);
stm32_usart_transmit_chars(port);
uart_port_unlock(port);
ret = IRQ_HANDLED;
}
/* Receiver timeout irq for DMA RX */ if (stm32_usart_rx_dma_started(stm32_port) && !stm32_port->throttled) {
uart_port_lock(port);
size = stm32_usart_receive_chars(port, false);
uart_unlock_and_check_sysrq(port); if (size)
tty_flip_buffer_push(tport);
ret = IRQ_HANDLED;
}
/* dma terminate may have been called in case of dma pause failure */
stm32_usart_tx_dma_pause(stm32_port);
stm32_usart_rs485_rts_disable(port);
}
/* There are probably characters waiting to be transmitted. */ staticvoid stm32_usart_start_tx(struct uart_port *port)
{ struct tty_port *tport = &port->state->port;
if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char) return;
if (stm32_port->tx_ch)
stm32_usart_tx_dma_terminate(stm32_port);
}
/* Throttle the remote when input buffer is about to overflow. */ staticvoid stm32_usart_throttle(struct uart_port *port)
{ struct stm32_port *stm32_port = to_stm32_port(port); conststruct stm32_usart_offsets *ofs = &stm32_port->info->ofs; unsignedlong flags;
uart_port_lock_irqsave(port, &flags);
/* * Pause DMA transfer, so the RX data gets queued into the FIFO. * Hardware flow control is triggered when RX FIFO is full.
*/
stm32_usart_rx_dma_pause(stm32_port);
stm32_usart_clr_bits(port, ofs->cr1, stm32_port->cr1_irq); if (stm32_port->cr3_irq)
stm32_usart_clr_bits(port, ofs->cr3, stm32_port->cr3_irq);
/* Unthrottle the remote, the input buffer can now accept data. */ staticvoid stm32_usart_unthrottle(struct uart_port *port)
{ struct stm32_port *stm32_port = to_stm32_port(port); conststruct stm32_usart_offsets *ofs = &stm32_port->info->ofs; unsignedlong flags;
uart_port_lock_irqsave(port, &flags);
stm32_usart_set_bits(port, ofs->cr1, stm32_port->cr1_irq); if (stm32_port->cr3_irq)
stm32_usart_set_bits(port, ofs->cr3, stm32_port->cr3_irq);
stm32_port->throttled = false;
/* * Switch back to DMA mode (resume DMA). * Hardware flow control is stopped when FIFO is not full any more.
*/ if (stm32_port->rx_ch)
stm32_usart_rx_dma_start_or_resume(port);
if (stm32_usart_tx_dma_started(stm32_port))
stm32_usart_tx_dma_terminate(stm32_port);
if (stm32_port->tx_ch)
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
/* Disable modem control interrupts */
stm32_usart_disable_ms(port);
val = USART_CR1_TXEIE | USART_CR1_TE;
val |= stm32_port->cr1_irq | USART_CR1_RE;
val |= BIT(cfg->uart_enable_bit); if (stm32_port->fifoen)
val |= USART_CR1_FIFOEN;
if (cflag & PARENB) {
bits++;
cr1 |= USART_CR1_PCE;
}
/* * Word length configuration: * CS8 + parity, 9 bits word aka [M1:M0] = 0b01 * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10 * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00 * M0 and M1 already cleared by cr1 initialization.
*/ if (bits == 9) {
cr1 |= USART_CR1_M0;
} elseif ((bits == 7) && cfg->has_7bits_data) {
cr1 |= USART_CR1_M1;
} elseif (bits != 8) {
dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
, bits);
cflag &= ~CSIZE;
cflag |= CS8;
termios->c_cflag = cflag;
bits = 8; if (cflag & PARENB) {
bits++;
cr1 |= USART_CR1_M0;
}
}
if (ofs->rtor != UNDEF_REG && (stm32_port->rx_ch ||
(stm32_port->fifoen &&
stm32_port->rxftcfg >= 0))) { if (cflag & CSTOPB)
bits = bits + 3; /* 1 start bit + 2 stop bits */ else
bits = bits + 2; /* 1 start bit + 1 stop bit */
/* RX timeout irq to occur after last stop bit + bits */
stm32_port->cr1_irq = USART_CR1_RTOIE;
writel_relaxed(bits, port->membase + ofs->rtor);
cr2 |= USART_CR2_RTOEN; /* * Enable fifo threshold irq in two cases, either when there is no DMA, or when * wake up over usart, from low power until the DMA gets re-enabled by resume.
*/
stm32_port->cr3_irq = USART_CR3_RXFTIE;
}
/* * The USART supports 16 or 8 times oversampling. * By default we prefer 16 times oversampling, so that the receiver * has a better tolerance to clock deviations. * 8 times oversampling is only used to achieve higher speeds.
*/ if (usartdiv < 16) {
oversampling = 8;
cr1 |= USART_CR1_OVER8;
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
} else {
oversampling = 16;
cr1 &= ~USART_CR1_OVER8;
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
}
if (FIELD_FIT(USART_BRR_MASK, brr)) { if (ofs->presc != UNDEF_REG) {
port->uartclk = uart_clk_pres;
writel_relaxed(presc, port->membase + ofs->presc);
} elseif (presc) { /* We need a prescaler but we don't have it (STM32F4, STM32F7) */
dev_err(port->dev, "unable to set baudrate, input clock is too high");
} break;
} elseif (presc == USART_PRESC_MAX) { /* Even with prescaler and brr at max value we can't set baudrate */
dev_err(port->dev, "unable to set baudrate, input clock is too high"); break;
}
}
/* Characters to ignore */
port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR)
port->ignore_status_mask = USART_SR_PE | USART_SR_FE; if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |= USART_SR_FE; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support).
*/ if (termios->c_iflag & IGNPAR)
port->ignore_status_mask |= USART_SR_ORE;
}
/* Ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= USART_SR_DUMMY_RX;
if (stm32_port->rx_ch) { /* * Setup DMA to collect only valid data and enable error irqs. * This also enables break reception when using DMA.
*/
cr1 |= USART_CR1_PEIE;
cr3 |= USART_CR3_EIE;
cr3 |= USART_CR3_DMAR;
cr3 |= USART_CR3_DDRE;
}
/* Configure wake up from low power on start bit detection */ if (stm32_port->wakeup_src) {
cr3 &= ~USART_CR3_WUS_MASK;
cr3 |= USART_CR3_WUS_START_BIT;
}
for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) { if (stm32_usart_get_thresh_value(fifo_size, i) >= bytes) break;
} if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
dev_dbg(&pdev->dev, "%s set to %d/%d bytes\n", p,
stm32_usart_get_thresh_value(fifo_size, i), fifo_size);
stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0); if (IS_ERR(stm32port->gpios)) {
ret = PTR_ERR(stm32port->gpios); goto err_clk;
}
/* * Both CTS/RTS gpios and "st,hw-flow-ctrl" (deprecated) or "uart-has-rtscts" * properties should not be specified.
*/ if (stm32port->hw_flow_control) { if (mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_CTS) ||
mctrl_gpio_to_gpiod(stm32port->gpios, UART_GPIO_RTS)) {
dev_err(&pdev->dev, "Conflicting RTS/CTS config\n");
ret = -EINVAL; goto err_clk;
}
}
ret = dmaengine_slave_config(stm32port->tx_ch, &config); if (ret < 0) {
dev_err(dev, "tx dma channel config failed\n");
stm32_usart_of_dma_tx_remove(stm32port, pdev); return ret;
}
return 0;
}
staticint stm32_usart_serial_probe(struct platform_device *pdev)
{ struct stm32_port *stm32port; int ret;
stm32port = stm32_usart_of_get_port(pdev); if (!stm32port) return -ENODEV;
stm32port->info = of_device_get_match_data(&pdev->dev); if (!stm32port->info) return -EINVAL;
stm32port->rx_ch = dma_request_chan(&pdev->dev, "rx"); if (PTR_ERR(stm32port->rx_ch) == -EPROBE_DEFER) return -EPROBE_DEFER;
/* Fall back in interrupt mode for any non-deferral error */ if (IS_ERR(stm32port->rx_ch))
stm32port->rx_ch = NULL;
stm32port->tx_ch = dma_request_chan(&pdev->dev, "tx"); if (PTR_ERR(stm32port->tx_ch) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER; goto err_dma_rx;
} /* Fall back in interrupt mode for any non-deferral error */ if (IS_ERR(stm32port->tx_ch))
stm32port->tx_ch = NULL;
ret = stm32_usart_init_port(stm32port, pdev); if (ret) goto err_dma_tx;
if (stm32port->wakeup_src) {
device_set_wakeup_capable(&pdev->dev, true);
ret = dev_pm_set_wake_irq(&pdev->dev, stm32port->port.irq); if (ret) goto err_deinit_port;
}
if (stm32port->rx_ch && stm32_usart_of_dma_rx_probe(stm32port, pdev)) { /* Fall back in interrupt mode */
dma_release_channel(stm32port->rx_ch);
stm32port->rx_ch = NULL;
}
if (stm32port->tx_ch && stm32_usart_of_dma_tx_probe(stm32port, pdev)) { /* Fall back in interrupt mode */
dma_release_channel(stm32port->tx_ch);
stm32port->tx_ch = NULL;
}
if (!stm32port->rx_ch)
dev_info(&pdev->dev, "interrupt mode for rx (no dma)\n"); if (!stm32port->tx_ch)
dev_info(&pdev->dev, "interrupt mode for tx (no dma)\n");
/* Restore interrupt state */
writel_relaxed(old_cr1, port->membase + ofs->cr1);
if (locked)
uart_port_unlock_irqrestore(port, flags);
}
staticint stm32_usart_console_setup(struct console *co, char *options)
{ struct stm32_port *stm32port; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n';
if (co->index >= STM32_MAX_PORTS) return -ENODEV;
stm32port = &stm32_ports[co->index];
/* * This driver does not support early console initialization * (use ARM early printk support instead), so we only expect * this to be called during the uart port registration when the * driver gets probed and the port should be mapped at that point.
*/ if (stm32port->port.mapbase == 0 || !stm32port->port.membase) return -ENXIO;
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
if (!stm32_port->wakeup_src || !tty_port_initialized(tport)) return 0;
/* * Enable low-power wake-up and wake-up irq if argument is set to * "enable", disable low-power wake-up and wake-up irq otherwise
*/ if (enable) {
stm32_usart_set_bits(port, ofs->cr1, USART_CR1_UESM);
stm32_usart_set_bits(port, ofs->cr3, USART_CR3_WUFIE);
mctrl_gpio_enable_irq_wake(stm32_port->gpios);
/* * When DMA is used for reception, it must be disabled before * entering low-power mode and re-enabled when exiting from * low-power mode.
*/ if (stm32_port->rx_ch) {
uart_port_lock_irqsave(port, &flags); /* Poll data from DMA RX buffer if any */ if (!stm32_usart_rx_dma_pause(stm32_port))
size += stm32_usart_receive_chars(port, true);
stm32_usart_rx_dma_terminate(stm32_port);
uart_unlock_and_check_sysrq_irqrestore(port, flags); if (size)
tty_flip_buffer_push(tport);
}
/* Poll data from RX FIFO if any */
stm32_usart_receive_chars(port, false);
} else { if (stm32_port->rx_ch) {
ret = stm32_usart_rx_dma_start_or_resume(port); if (ret) return ret;
}
mctrl_gpio_disable_irq_wake(stm32_port->gpios);
stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_UESM);
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_WUFIE);
}
if (device_may_wakeup(dev) || device_wakeup_path(dev)) {
ret = stm32_usart_serial_en_wakeup(port, true); if (ret) return ret;
}
/* * When "no_console_suspend" is enabled, keep the pinctrl default state * and rely on bootloader stage to restore this state upon resume. * Otherwise, apply the idle or sleep states depending on wakeup * capabilities.
*/ if (console_suspend_enabled || !uart_console(port)) { if (device_may_wakeup(dev) || device_wakeup_path(dev))
pinctrl_pm_select_idle_state(dev); else
pinctrl_pm_select_sleep_state(dev);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.