if (priv->txdescphys)
dma_unmap_single(priv->device, priv->txdescphys,
priv->txdescmem, DMA_TO_DEVICE);
}
/* This function resets the SGDMA controller and clears the * descriptor memory used for transmits and receives.
*/ void sgdma_reset(struct altera_tse_private *priv)
{ /* Initialize descriptor memory to 0 */
memset_io(priv->tx_dma_desc, 0, priv->txdescmem);
memset_io(priv->rx_dma_desc, 0, priv->rxdescmem);
/* transmits buffer through SGDMA. Returns number of buffers * transmitted, 0 if not possible. * * tx_lock is held by the caller
*/ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
{ struct sgdma_descrip __iomem *descbase =
(struct sgdma_descrip __iomem *)priv->tx_dma_desc;
if (rxstatus) {
csrwr8(0, desc, sgdma_descroffs(status));
rxbuffer = dequeue_rx(priv); if (rxbuffer == NULL)
netdev_info(priv->dev, "sgdma rx and rx queue empty!\n");
/* Clear control */
csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); /* clear status */
csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status));
/* kick the rx sgdma after reaping this descriptor */
sgdma_async_read(priv);
} else { /* If the SGDMA indicated an end of packet on recv, * then it's expected that the rxstatus from the * descriptor is non-zero - meaning a valid packet * with a nonzero length, or an error has been * indicated. if not, then all we can do is signal * an error and return no packet received. Most likely * there is a system design error, or an error in the * underlying kernel (cache or cache management problem)
*/
netdev_err(priv->dev, "SGDMA RX Error Info: %x, %x, %x\n",
sts, csrrd8(desc, sgdma_descroffs(status)),
rxstatus);
}
} elseif (sts == 0) {
sgdma_async_read(priv);
}
return rxstatus;
}
/* Private functions */ staticvoid sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, struct sgdma_descrip __iomem *ndesc,
dma_addr_t ndesc_phys,
dma_addr_t raddr,
dma_addr_t waddr,
u16 length, int generate_eop, int rfixed, int wfixed)
{ /* Clear the next descriptor as not owned by hardware */
/* If hardware is busy, don't restart async read. * if status register is 0 - meaning initial state, restart async read, * probably for the first time when populating a receive buffer. * If read status indicate not busy and a status, restart the async * DMA read.
*/ staticint sgdma_async_read(struct altera_tse_private *priv)
{ struct sgdma_descrip __iomem *descbase =
(struct sgdma_descrip __iomem *)priv->rx_dma_desc;
#define list_remove_head(list, entry, type, member) \ do { \
entry = NULL; \ if (!list_empty(list)) { \
entry = list_entry((list)->next, type, member); \
list_del_init(&entry->member); \
} \
} while (0)
#define list_peek_head(list, entry, type, member) \ do { \
entry = NULL; \ if (!list_empty(list)) { \
entry = list_entry((list)->next, type, member); \
} \
} while (0)
/* adds a tse_buffer to the tail of a tx buffer list. * assumes the caller is managing and holding a mutual exclusion * primitive to avoid simultaneous pushes/pops to the list.
*/ staticvoid
queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
{
list_add_tail(&buffer->lh, &priv->txlisthd);
}
/* adds a tse_buffer to the tail of a rx buffer list * assumes the caller is managing and holding a mutual exclusion * primitive to avoid simultaneous pushes/pops to the list.
*/ staticvoid
queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
{
list_add_tail(&buffer->lh, &priv->rxlisthd);
}
/* dequeues a tse_buffer from the transmit buffer list, otherwise * returns NULL if empty. * assumes the caller is managing and holding a mutual exclusion * primitive to avoid simultaneous pushes/pops to the list.
*/ staticstruct tse_buffer *
dequeue_tx(struct altera_tse_private *priv)
{ struct tse_buffer *buffer = NULL;
list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh); return buffer;
}
/* dequeues a tse_buffer from the receive buffer list, otherwise * returns NULL if empty * assumes the caller is managing and holding a mutual exclusion * primitive to avoid simultaneous pushes/pops to the list.
*/ staticstruct tse_buffer *
dequeue_rx(struct altera_tse_private *priv)
{ struct tse_buffer *buffer = NULL;
list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); return buffer;
}
/* dequeues a tse_buffer from the receive buffer list, otherwise * returns NULL if empty * assumes the caller is managing and holding a mutual exclusion * primitive to avoid simultaneous pushes/pops to the list while the * head is being examined.
*/ staticstruct tse_buffer *
queue_rx_peekhead(struct altera_tse_private *priv)
{ struct tse_buffer *buffer = NULL;
list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh); return buffer;
}
/* check and return rx sgdma status without polling
*/ staticint sgdma_rxbusy(struct altera_tse_private *priv)
{ return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status))
& SGDMA_STSREG_BUSY;
}
/* waits for the tx sgdma to finish it's current operation, returns 0 * when it transitions to nonbusy, returns 1 if the operation times out
*/ staticint sgdma_txbusy(struct altera_tse_private *priv)
{ int delay = 0;
/* if DMA is busy, wait for current transaction to finish */ while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
& SGDMA_STSREG_BUSY) && (delay++ < 100))
udelay(1);
if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status))
& SGDMA_STSREG_BUSY) {
netdev_err(priv->dev, "timeout waiting for tx dma\n"); return 1;
} return 0;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.25 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.