/* esdACC Status Register bits. Unused bits not documented. */ #define ACC_REG_STATUS_MASK_STATUS_ES BIT(17) #define ACC_REG_STATUS_MASK_STATUS_EP BIT(18) #define ACC_REG_STATUS_MASK_STATUS_BS BIT(19)
/* esdACC Overview Module BM_IRQ_Mask register related defines */ /* Two bit wide command masks to mask or unmask a single core IRQ */ #define ACC_BM_IRQ_UNMASK BIT(0) #define ACC_BM_IRQ_MASK (ACC_BM_IRQ_UNMASK << 1) /* Command to unmask all IRQ sources. Created by shifting * and oring the two bit wide ACC_BM_IRQ_UNMASK 16 times.
*/ #define ACC_BM_IRQ_UNMASK_ALL 0x55555555U
/* Convert timestamp from esdACC time stamp ticks to ns * * The conversion factor ts2ns from time stamp counts to ns is basically * ts2ns = NSEC_PER_SEC / timestamp_frequency * * We handle here only a fixed timestamp frequency of 80MHz. The * resulting ts2ns factor would be 12.5. * * At the end we multiply by 12 and add the half of the HW timestamp * to get a multiplication by 12.5. This way any overflow is * avoided until ktime_t itself overflows.
*/ #define ACC_TS_FACTOR (NSEC_PER_SEC / ACC_TS_FREQ_80MHZ) #define ACC_TS_80MHZ_SHIFT 1
/* Depending on esdACC feature NEW_PSC enable the new prescaler * or adjust core_frequency according to the implicit division by 2.
*/ if (ov->features & ACC_OV_REG_FEAT_MASK_NEW_PSC) {
acc_ov_set_bits(ov, ACC_OV_OF_MODE,
ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE);
} else {
ov->core_frequency /= 2;
}
/* Retry to enter RESET mode if out of sync. */ if (priv->can.state != CAN_STATE_STOPPED) {
netdev_warn(netdev, "Entered %s() with bad can.state: %s\n",
__func__, can_get_state_str(priv->can.state));
acc_resetmode_enter(core);
priv->can.state = CAN_STATE_STOPPED;
}
if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK;
/* Access core->tx_fifo_tail only once because it may be changed * from the interrupt level.
*/
fifo_usage = tx_fifo_head - core->tx_fifo_tail; if (fifo_usage < 0)
fifo_usage += core->tx_fifo_size;
if (fifo_usage >= core->tx_fifo_size - 1) {
netdev_err(core->netdev, "BUG: TX ring full when queue awake!\n");
netif_stop_queue(netdev); return NETDEV_TX_BUSY;
}
if (fifo_usage == core->tx_fifo_size - 2)
netif_stop_queue(netdev);
acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); if (cf->can_id & CAN_RTR_FLAG)
acc_dlc |= ACC_DLC_RTR_FLAG; if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
acc_dlc |= ACC_DLC_SSTX_FLAG;
if (hw_fifo_head != priv->core->tx_fifo_head ||
hw_fifo_head != priv->core->tx_fifo_tail) {
netdev_warn(netdev, "TX FIFO mismatch: T %2u H %2u; TFHW %#08x\n",
priv->core->tx_fifo_tail,
priv->core->tx_fifo_head,
tx_fifo_status);
}
}
acc_resetmode_leave(priv->core); /* To leave the bus-off state the esdACC controller begins * here a grace period where it counts 128 "idle conditions" (each * of 11 consecutive recessive bits) on the bus as required * by the CAN spec. * * During this time the TX FIFO may still contain already * aborted "zombie" frames that are only drained from the FIFO * at the end of the grace period. * * To not to interfere with this drain process we don't * call netif_wake_queue() here. When the controller reaches * the error-active state again, it informs us about that * with an acc_bmmsg_errstatechange message. Then * netif_wake_queue() is called from * handle_core_msg_errstatechange() instead.
*/ break;
/* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
/* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
/* lost_cnt may be 0 if not supported by esdACC version */ if (msg->lost_cnt) {
stats->rx_errors += msg->lost_cnt;
stats->rx_over_errors += msg->lost_cnt;
} else {
stats->rx_errors++;
stats->rx_over_errors++;
}
skb = alloc_can_err_skb(core->netdev, &cf); if (!skb) return;
/* Always call can_change_state() to update the state * even if alloc_can_err_skb() may have failed. * can_change_state() can cope with a NULL cf pointer.
*/
can_change_state(core->netdev, cf, tx_state, rx_state);
}
/** * acc_card_interrupt() - handle the interrupts of an esdACC FPGA * * @ov: overview module structure * @cores: array of core structures * * This function handles all interrupts pending for the overview module and the * CAN cores of the esdACC FPGA. * * It examines for all cores (the overview module core and the CAN cores) * the bmfifo.irq_cnt and compares it with the previously saved * bmfifo.local_irq_cnt. An IRQ is pending if they differ. The esdACC FPGA * updates the bmfifo.irq_cnt values by DMA. * * The pending interrupts are masked by writing to the IRQ mask register at * ACC_OV_OF_BM_IRQ_MASK. This register has for each core a two bit command * field evaluated as follows: * * Define, bit pattern: meaning * 00: no action * ACC_BM_IRQ_UNMASK, 01: unmask interrupt * ACC_BM_IRQ_MASK, 10: mask interrupt * 11: no action * * For each CAN core with a pending IRQ handle_core_interrupt() handles all * busmaster messages from the message FIFO. The last handled message (FIFO * index) is written to the CAN core to acknowledge its handling. * * Last step is to unmask all interrupts in the FPGA using * ACC_BM_IRQ_UNMASK_ALL. * * Return: * IRQ_HANDLED, if card generated an interrupt that was handled * IRQ_NONE, if the interrupt is not ours
*/
irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores)
{
u32 irqmask; int i;
/* First we look for whom interrupts are pending, card/overview * or any of the cores. Two bits in irqmask are used for each; * Each two bit field is set to ACC_BM_IRQ_MASK if an IRQ is * pending.
*/
irqmask = 0U; if (READ_ONCE(*ov->bmfifo.irq_cnt) != ov->bmfifo.local_irq_cnt) {
irqmask |= ACC_BM_IRQ_MASK;
ov->bmfifo.local_irq_cnt = READ_ONCE(*ov->bmfifo.irq_cnt);
}
for (i = 0; i < ov->active_cores; i++) { struct acc_core *core = &cores[i];
if (READ_ONCE(*core->bmfifo.irq_cnt) != core->bmfifo.local_irq_cnt) {
irqmask |= (ACC_BM_IRQ_MASK << (2 * (i + 1)));
core->bmfifo.local_irq_cnt = READ_ONCE(*core->bmfifo.irq_cnt);
}
}
if (!irqmask) return IRQ_NONE;
/* At second we tell the card we're working on them by writing irqmask, * call handle_{ov|core}_interrupt and then acknowledge the * interrupts by writing irq_cnt:
*/
acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, irqmask);
if (irqmask & ACC_BM_IRQ_MASK) { /* handle_ov_interrupt(); - no use yet. */
acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_COUNTER,
ov->bmfifo.local_irq_cnt);
}
for (i = 0; i < ov->active_cores; i++) { struct acc_core *core = &cores[i];
if (irqmask & (ACC_BM_IRQ_MASK << (2 * (i + 1)))) {
handle_core_interrupt(core);
acc_write32(core, ACC_OV_OF_BM_IRQ_COUNTER,
core->bmfifo.local_irq_cnt);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.