/* * RX HW/SW interaction overview * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * There are 2 types of RX communication channels between driver and NIC. * 1) RX Free Fifo - RXF - holds descriptors of empty buffers to accept incoming * traffic. This Fifo is filled by SW and is readen by HW. Each descriptor holds * info about buffer's location, size and ID. An ID field is used to identify a * buffer when it's returned with data via RXD Fifo (see below) * 2) RX Data Fifo - RXD - holds descriptors of full buffers. This Fifo is * filled by HW and is readen by SW. Each descriptor holds status and ID. * HW pops descriptor from RXF Fifo, stores ID, fills buffer with incoming data, * via dma moves it into host memory, builds new RXD descriptor with same ID, * pushes it into RXD Fifo and raises interrupt to indicate new RX data. * * Current NIC configuration (registers + firmware) makes NIC use 2 RXF Fifos. * One holds 1.5K packets and another - 26K packets. Depending on incoming * packet size, HW desides on a RXF Fifo to pop buffer from. When packet is * filled with data, HW builds new RXD descriptor for it and push it into single * RXD Fifo. * * RX SW Data Structures * ~~~~~~~~~~~~~~~~~~~~~ * skb db - used to keep track of all skbs owned by SW and their dma addresses. * For RX case, ownership lasts from allocating new empty skb for RXF until * accepting full skb from RXD and passing it to OS. Each RXF Fifo has its own * skb db. Implemented as array with bitmask. * fifo - keeps info about fifo's size and location, relevant HW registers, * usage and skb db. Each RXD and RXF Fifo has its own fifo structure. * Implemented as simple struct. * * RX SW Execution Flow * ~~~~~~~~~~~~~~~~~~~~ * Upon initialization (ifconfig up) driver creates RX fifos and initializes * relevant registers. At the end of init phase, driver enables interrupts. * NIC sees that there is no RXF buffers and raises * RD_INTR interrupt, isr fills skbs and Rx begins. * Driver has two receive operation modes: * NAPI - interrupt-driven mixed with polling * interrupt-driven only * * Interrupt-driven only flow is following. When buffer is ready, HW raises * interrupt and isr is called. isr collects all available packets * (bdx_rx_receive), refills skbs (bdx_rx_alloc_skbs) and exit.
* Rx buffer allocation note * ~~~~~~~~~~~~~~~~~~~~~~~~~ * Driver cares to feed such amount of RxF descriptors that respective amount of * RxD descriptors can not fill entire RxD fifo. The main reason is lack of * overflow check in Bordeaux for RxD fifo free/used size. * FIXME: this is NOT fully implemented, more work should be done *
*/
#define bdx_enable_interrupts(priv) \ do { WRITE_REG(priv, regIMR, IR_RUN); } while (0) #define bdx_disable_interrupts(priv) \ do { WRITE_REG(priv, regIMR, 0); } while (0)
/** * bdx_fifo_init - create TX/RX descriptor fifo for host-NIC communication. * @priv: NIC private structure * @f: fifo to initialize * @fsz_type: fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB * @reg_CFG0: offsets of registers relative to base address * @reg_CFG1: offsets of registers relative to base address * @reg_RPTR: offsets of registers relative to base address * @reg_WPTR: offsets of registers relative to base address * * 1K extra space is allocated at the end of the fifo to simplify * processing of descriptors that wraps around fifo's end * * Returns 0 on success, negative value on failure *
*/ staticint
bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
u16 reg_CFG0, u16 reg_CFG1, u16 reg_RPTR, u16 reg_WPTR)
{
u16 memsz = FIFO_SIZE * (1 << fsz_type);
if (isr & IR_PCIE_LINK)
netdev_err(priv->ndev, "PCI-E Link Fault\n");
if (isr & IR_PCIE_TOUT)
netdev_err(priv->ndev, "PCI-E Time Out\n");
}
/** * bdx_isr_napi - Interrupt Service Routine for Bordeaux NIC * @irq: interrupt number * @dev: network device * * Return IRQ_NONE if it was not our interrupt, IRQ_HANDLED - otherwise * * It reads ISR register to know interrupt reasons, and proceed them one by one. * Reasons of interest are: * RX_DESC - new packet has arrived and RXD fifo holds its descriptor * RX_FREE - number of free Rx buffers in RXF fifo gets low * TX_FREE - packet was transmited and RXF fifo holds its descriptor
*/
ENTER;
isr = (READ_REG(priv, regISR) & IR_RUN); if (unlikely(!isr)) {
bdx_enable_interrupts(priv); return IRQ_NONE; /* Not our interrupt */
}
if (isr & IR_EXTRA)
bdx_isr_extra(priv, isr);
if (isr & (IR_RX_DESC_0 | IR_TX_FREE_0)) { if (likely(napi_schedule_prep(&priv->napi))) {
__napi_schedule(&priv->napi);
RET(IRQ_HANDLED);
} else { /* NOTE: we get here if intr has slipped into window * between these lines in bdx_poll: * bdx_enable_interrupts(priv); * return 0; * currently intrs are disabled (since we read ISR), * and we have failed to register next poll. * so we read the regs to trigger chip
* and allow further interrupts. */
READ_REG(priv, regTXF_WPTR_0);
READ_REG(priv, regRXD_WPTR_0);
}
}
bdx_enable_interrupts(priv);
RET(IRQ_HANDLED);
}
staticint bdx_poll(struct napi_struct *napi, int budget)
{ struct bdx_priv *priv = container_of(napi, struct bdx_priv, napi); int work_done;
ENTER;
bdx_tx_cleanup(priv);
work_done = bdx_rx_receive(priv, &priv->rxd_fifo0, budget); if ((work_done < budget) ||
(priv->napi_stop++ >= 30)) {
DBG("rx poll is done. backing to isr-driven\n");
/* from time to time we exit to let NAPI layer release
* device lock and allow waiting tasks (eg rmmod) to advance) */
priv->napi_stop = 0;
/** * bdx_fw_load - loads firmware to NIC * @priv: NIC private structure * * Firmware is loaded via TXD fifo, so it must be initialized first. * Firware must be loaded once per NIC not per PCI device provided by NIC (NIC * can have few of them). So all drivers use semaphore register to choose one * that will actually load FW to NIC.
*/
staticint bdx_fw_load(struct bdx_priv *priv)
{ conststruct firmware *fw = NULL; int master, i; int rc;
ENTER;
master = READ_REG(priv, regINIT_SEMAPHORE); if (!READ_REG(priv, regINIT_STATUS) && master) {
rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev); if (rc) goto out;
bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
mdelay(100);
} for (i = 0; i < 200; i++) { if (READ_REG(priv, regINIT_STATUS)) {
rc = 0; goto out;
}
mdelay(2);
}
rc = -EIO;
out: if (master)
WRITE_REG(priv, regINIT_SEMAPHORE, 1);
/* 7. reset queue */
WRITE_REG(priv, regRST_QU, 1); /* 8. reset port */
WRITE_REG(priv, regRST_PORT, 1); /* 9. zero all read and write pointers */ for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR); for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
WRITE_REG(priv, i, 0); /* 10. unseet port disable */
WRITE_REG(priv, regDIS_PORT, 0); /* 11. unset queue disable */
WRITE_REG(priv, regDIS_QU, 0); /* 12. unset queue reset */
WRITE_REG(priv, regRST_QU, 0); /* 13. unset port reset */
WRITE_REG(priv, regRST_PORT, 0); /* 14. enable Rx */ /* skiped. will be done later */ /* 15. save MAC (obsolete) */ for (i = regTXD_WPTR_0; i <= regTXF_RPTR_3; i += 0x10)
DBG("%x = %x\n", i, READ_REG(priv, i) & TXF_WPTR_WR_PTR);
RET(0);
}
/* bdx_reset - performs right type of reset depending on hw type */ staticint bdx_reset(struct bdx_priv *priv)
{
ENTER;
RET((priv->pdev->device == 0x3009)
? bdx_hw_reset(priv)
: bdx_sw_reset(priv));
}
/** * bdx_close - Disables a network interface * @ndev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed.
**/ staticint bdx_close(struct net_device *ndev)
{ struct bdx_priv *priv = NULL;
/** * bdx_open - Called when a network interface is made active * @ndev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready.
**/ staticint bdx_open(struct net_device *ndev)
{ struct bdx_priv *priv; int rc;
ENTER;
priv = netdev_priv(ndev);
bdx_reset(priv); if (netif_running(ndev))
netif_stop_queue(priv->ndev);
case BDX_OP_WRITE:
error = bdx_range_check(priv, data[1]); if (error < 0) return error;
WRITE_REG(priv, data[1], data[2]);
DBG("write_reg(0x%x, 0x%x)\n", data[1], data[2]); break;
default:
RET(-EOPNOTSUPP);
} return 0;
}
/** * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid * @ndev: network device * @vid: VLAN vid * @enable: enable or disable vlan * * Passes VLAN filter table to hardware
*/ staticvoid __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
{ struct bdx_priv *priv = netdev_priv(ndev);
u32 reg, bit, val;
ENTER;
DBG2("vid=%d value=%d\n", (int)vid, enable); if (unlikely(vid >= 4096)) {
pr_err("invalid VID: %u (> 4096)\n", vid);
RET();
}
reg = regVLAN_0 + (vid / 32) * 4;
bit = 1 << vid % 32;
val = READ_REG(priv, reg);
DBG2("reg=%x, val=%x, bit=%d\n", reg, val, bit); if (enable)
val |= bit; else
val &= ~bit;
DBG2("new val %x\n", val);
WRITE_REG(priv, reg, val);
RET();
}
/** * bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table * @ndev: network device * @proto: unused * @vid: VLAN vid to add
*/ staticint bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
__bdx_vlan_rx_vid(ndev, vid, 1); return 0;
}
/** * bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table * @ndev: network device * @proto: unused * @vid: VLAN vid to kill
*/ staticint bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
{
__bdx_vlan_rx_vid(ndev, vid, 0); return 0;
}
/** * bdx_change_mtu - Change the Maximum Transfer Unit * @ndev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure
*/ staticint bdx_change_mtu(struct net_device *ndev, int new_mtu)
{
ENTER;
WRITE_ONCE(ndev->mtu, new_mtu); if (netif_running(ndev)) {
bdx_close(ndev);
bdx_open(ndev);
}
RET(0);
}
/* FIXME: RXE(OFF) */ if (ndev->flags & IFF_PROMISC) {
rxf_val |= GMAC_RX_FILTER_PRM;
} elseif (ndev->flags & IFF_ALLMULTI) { /* set IMF to accept all multicast frmaes */ for (i = 0; i < MAC_MCST_HASH_NUM; i++)
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
} elseif (!netdev_mc_empty(ndev)) {
u8 hash; struct netdev_hw_addr *ha;
u32 reg, val;
/* set IMF to deny all multicast frames */ for (i = 0; i < MAC_MCST_HASH_NUM; i++)
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, 0); /* set PMF to deny all multicast frames */ for (i = 0; i < MAC_MCST_NUM; i++) {
WRITE_REG(priv, regRX_MAC_MCST0 + i * 8, 0);
WRITE_REG(priv, regRX_MAC_MCST1 + i * 8, 0);
}
/* use PMF to accept first MAC_MCST_NUM (15) addresses */ /* TBD: sort addresses and write them in ascending order * into RX_MAC_MCST regs. we skip this phase now and accept ALL
* multicast frames throu IMF */ /* accept the rest of addresses throu IMF */
netdev_for_each_mc_addr(ha, ndev) {
hash = 0; for (i = 0; i < ETH_ALEN; i++)
hash ^= ha->addr[i];
reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
val = READ_REG(priv, reg);
val |= (1 << (hash % 32));
WRITE_REG(priv, reg, val);
}
/** * bdx_rx_init - initialize RX all related HW and SW resources * @priv: NIC private structure * * Returns 0 on success, negative value on failure * * It creates rxf and rxd fifos, update relevant HW registers, preallocate * skb for rx. It assumes that Rx is desabled in HW * funcs are grouped for better cache usage * * RxD fifo is smaller than RxF fifo by design. Upon high load, RxD will be * filled and packets will be dropped by nic without getting into host or * cousing interrupt. Anyway, in that condition, host has no chance to process * all packets, but dropping in nic is cheaper, since it takes 0 cpu cycles
*/
/** * bdx_rx_alloc_skbs - fill rxf fifo with new skbs * @priv: nic's private structure * @f: RXF fifo that needs skbs * * It allocates skbs, build rxf descs and push it (rxf descr) into rxf fifo. * skb's virtual and physical addresses are stored in skb db. * To calculate free space, func uses cached values of RPTR and WPTR * When needed, it also updates RPTR and WPTR.
*/
/* TBD: do not update WPTR if no desc were written */
/** * bdx_rx_receive - receives full packets from RXD fifo and pass them to OS * NOTE: a special treatment is given to non-continuous descriptors * that start near the end, wraps around and continue at the beginning. a second * part is copied right after the first, and then descriptor is interpreted as * normal. fifo has an extra space to allow such operations * @priv: nic's private structure * @f: RXF fifo that needs skbs * @budget: maximum number of packets to receive
*/
/* * TX HW/SW interaction overview * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * There are 2 types of TX communication channels between driver and NIC. * 1) TX Free Fifo - TXF - holds ack descriptors for sent packets * 2) TX Data Fifo - TXD - holds descriptors of full buffers. * * Currently NIC supports TSO, checksuming and gather DMA * UFO and IP fragmentation is on the way * * RX SW Data Structures * ~~~~~~~~~~~~~~~~~~~~~ * txdb - used to keep track of all skbs owned by SW and their dma addresses. * For TX case, ownership lasts from geting packet via hard_xmit and until HW * acknowledges sent by TXF descriptors. * Implemented as cyclic buffer. * fifo - keeps info about fifo's size and location, relevant HW registers, * usage and skb db. Each RXD and RXF Fifo has its own fifo structure. * Implemented as simple struct. * * TX SW Execution Flow * ~~~~~~~~~~~~~~~~~~~~ * OS calls driver's hard_xmit method with packet to sent. * Driver creates DMA mappings, builds TXD descriptors and kicks HW * by updating TXD WPTR. * When packet is sent, HW write us TXF descriptor and SW frees original skb. * To prevent TXD fifo overflow without reading HW registers every time, * SW deploys "tx level" technique. * Upon strart up, tx level is initialized to TXD fifo length. * For every sent packet, SW gets its TXD descriptor sizei * (from precalculated array) and substructs it from tx level. * The size is also stored in txdb. When TXF ack arrives, SW fetch size of * original TXD descriptor from txdb and adds it to tx level. * When Tx level drops under some predefined treshhold, the driver * stops the TX queue. When TX level rises above that level, * the tx queue is enabled again. * * This technique avoids eccessive reading of RPTR and WPTR registers. * As our benchmarks shows, it adds 1.5 Gbit/sec to NIS's throuput.
*/
BDX_ASSERT(*pptr != db->rptr && /* expect either read */
*pptr != db->wptr); /* or write pointer */
BDX_ASSERT(*pptr < db->start || /* pointer has to be */
*pptr >= db->end); /* in range */
++*pptr; if (unlikely(*pptr == db->end))
*pptr = db->start;
}
/** * bdx_tx_db_inc_rptr - increment read pointer * @db: tx data base
*/ staticinlinevoid bdx_tx_db_inc_rptr(struct txdb *db)
{
BDX_ASSERT(db->rptr == db->wptr); /* can't read from empty db */
__bdx_tx_db_ptr_next(db, &db->rptr);
}
/** * bdx_tx_db_inc_wptr - increment write pointer * @db: tx data base
*/ staticinlinevoid bdx_tx_db_inc_wptr(struct txdb *db)
{
__bdx_tx_db_ptr_next(db, &db->wptr);
BDX_ASSERT(db->rptr == db->wptr); /* we can not get empty db as
a result of write */
}
/** * bdx_tx_db_init - creates and initializes tx db * @d: tx data base * @sz_type: size of tx fifo * * Returns 0 on success, error code otherwise
*/ staticint bdx_tx_db_init(struct txdb *d, int sz_type)
{ int memsz = FIFO_SIZE * (1 << (sz_type + 1));
d->start = vmalloc(memsz); if (!d->start) return -ENOMEM;
/* * In order to differentiate between db is empty and db is full * states at least one element should always be empty in order to * avoid rptr == wptr which means db is empty
*/
d->size = memsz / sizeof(struct tx_map) - 1;
d->end = d->start + d->size + 1; /* just after last element */
/* all dbs are created equally empty */
d->rptr = d->start;
d->wptr = d->start;
return 0;
}
/** * bdx_tx_db_close - closes tx db and frees all memory * @d: tx data base
*/ staticvoid bdx_tx_db_close(struct txdb *d)
{
BDX_ASSERT(d == NULL);
/* sizes of tx desc (including padding if needed) as function
* of skb's frag number */ staticstruct {
u16 bytes;
u16 qwords; /* qword = 64 bit */
} txd_sizes[MAX_SKB_FRAGS + 1];
/** * bdx_tx_map_skb - creates and stores dma mappings for skb's data blocks * @priv: NIC private structure * @skb: socket buffer to map * @txdd: TX descriptor to use * * It makes dma mappings for skb's data blocks and writes them to PBL of * new tx descriptor. It also stores them in the tx db, so they could be * unmaped after data was sent. It is reponsibility of a caller to make * sure that there is enough space in the tx db. Last element holds pointer * to skb itself and marked with zero length
*/ staticinlinevoid
bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb, struct txd_desc *txdd)
{ struct txdb *db = &priv->txdb; struct pbl *pbl = &txdd->pbl[0]; int nr_frags = skb_shinfo(skb)->nr_frags; int i;
/* init_txd_sizes - precalculate sizes of descriptors for skbs up to 16 frags * number of frags is used as index to fetch correct descriptors size,
* instead of calculating it each time */ staticvoid __init init_txd_sizes(void)
{ int i, lwords;
/* 7 - is number of lwords in txd with one phys buffer
* 3 - is number of lwords used for every additional phys buffer */ for (i = 0; i < MAX_SKB_FRAGS + 1; i++) {
lwords = 7 + (i * 3); if (lwords & 1)
lwords++; /* pad it with 1 lword */
txd_sizes[i].qwords = lwords >> 1;
txd_sizes[i].bytes = lwords << 2;
}
}
/* bdx_tx_init - initialize all Tx related stuff.
* Namely, TXD and TXF fifos, database etc */ staticint bdx_tx_init(struct bdx_priv *priv)
{ if (bdx_fifo_init(priv, &priv->txd_fifo0.m, priv->txd_size,
regTXD_CFG0_0,
regTXD_CFG1_0, regTXD_RPTR_0, regTXD_WPTR_0)) goto err_mem; if (bdx_fifo_init(priv, &priv->txf_fifo0.m, priv->txf_size,
regTXF_CFG0_0,
regTXF_CFG1_0, regTXF_RPTR_0, regTXF_WPTR_0)) goto err_mem;
/* The TX db has to keep mappings for all packets sent (on TxD)
* and not yet reclaimed (on TxF) */ if (bdx_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size))) goto err_mem;
/** * bdx_tx_space - calculates available space in TX fifo * @priv: NIC private structure * * Returns available space in TX fifo in bytes
*/ staticinlineint bdx_tx_space(struct bdx_priv *priv)
{ struct txd_fifo *f = &priv->txd_fifo0; int fsize;
/** * bdx_tx_transmit - send packet to NIC * @skb: packet to send * @ndev: network device assigned to NIC * Return codes: * o NETDEV_TX_OK everything ok. * o NETDEV_TX_BUSY Cannot transmit packet, try later * Usually a bug, means queue start/stop flow control is broken in * the driver. Note: the driver must NOT put the skb in its DMA ring.
*/ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb, struct net_device *ndev)
{ struct bdx_priv *priv = netdev_priv(ndev); struct txd_fifo *f = &priv->txd_fifo0; int txd_checksum = 7; /* full checksum */ int txd_lgsnd = 0; int txd_vlan_id = 0; int txd_vtag = 0; int txd_mss = 0;
int nr_frags = skb_shinfo(skb)->nr_frags; struct txd_desc *txdd; int len; unsignedlong flags;
/* increment TXD write pointer. In case of fifo wrapping copy reminder of the descriptor
to the beginning */
f->m.wptr += txd_sizes[nr_frags].bytes;
len = f->m.wptr - f->m.memsz; if (unlikely(len >= 0)) {
f->m.wptr = len; if (len > 0) {
BDX_ASSERT(len > f->m.memsz);
memcpy(f->m.va, f->m.va + f->m.memsz, len);
}
}
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* finished with valid wptr */
priv->tx_level -= txd_sizes[nr_frags].bytes;
BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL); #ifdef BDX_DELAY_WPTR if (priv->tx_level > priv->tx_update_mark) { /* Force memory writes to complete before letting h/w know there are new descriptors to fetch. (might be needed on platforms like IA64)
wmb(); */
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
} else { if (priv->tx_noupd++ > BDX_NO_UPD_PACKETS) {
priv->tx_noupd = 0;
WRITE_REG(priv, f->m.reg_WPTR,
f->m.wptr & TXF_WPTR_WR_PTR);
}
} #else /* Force memory writes to complete before letting h/w know there are new descriptors to fetch. (might be needed on platforms like IA64)
wmb(); */
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
/** * bdx_tx_cleanup - clean TXF fifo, run in the context of IRQ. * @priv: bdx adapter * * It scans TXF fifo for descriptors, frees DMA mappings and reports to OS * that those packets were sent
*/ staticvoid bdx_tx_cleanup(struct bdx_priv *priv)
{ struct txf_fifo *f = &priv->txf_fifo0; struct txdb *db = &priv->txdb; int tx_level = 0;
ENTER;
f->m.wptr = READ_REG(priv, f->m.reg_WPTR) & TXF_WPTR_MASK;
BDX_ASSERT(f->m.rptr >= f->m.memsz); /* started with valid rptr */
/* unmap all the fragments */ /* first has to come tx_maps containing dma */
BDX_ASSERT(db->rptr->len == 0); do {
BDX_ASSERT(db->rptr->addr.dma == 0);
dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
db->rptr->len, DMA_TO_DEVICE);
bdx_tx_db_inc_rptr(db);
} while (db->rptr->len > 0);
tx_level -= db->rptr->len; /* '-' koz len is negative */
/* now should come skb pointer - free it */
dev_consume_skb_irq(db->rptr->addr.skb);
bdx_tx_db_inc_rptr(db);
}
/* let h/w know which TXF descriptors were cleaned */
BDX_ASSERT((f->m.wptr & TXF_WPTR_WR_PTR) >= f->m.memsz);
WRITE_REG(priv, f->m.reg_RPTR, f->m.rptr & TXF_WPTR_WR_PTR);
/* We reclaimed resources, so in case the Q is stopped by xmit callback,
* we resume the transmission and use tx_lock to synchronize with xmit.*/
spin_lock(&priv->tx_lock);
priv->tx_level += tx_level;
BDX_ASSERT(priv->tx_level <= 0 || priv->tx_level > BDX_MAX_TX_LEVEL); #ifdef BDX_DELAY_WPTR if (priv->tx_noupd) {
priv->tx_noupd = 0;
WRITE_REG(priv, priv->txd_fifo0.m.reg_WPTR,
priv->txd_fifo0.m.wptr & TXF_WPTR_WR_PTR);
} #endif
/** * bdx_tx_free_skbs - frees all skbs from TXD fifo. * @priv: NIC private structure * * It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
*/ staticvoid bdx_tx_free_skbs(struct bdx_priv *priv)
{ struct txdb *db = &priv->txdb;
ENTER; while (db->rptr != db->wptr) { if (likely(db->rptr->len))
dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
db->rptr->len, DMA_TO_DEVICE); else
dev_kfree_skb(db->rptr->addr.skb);
bdx_tx_db_inc_rptr(db);
}
RET();
}
/** * bdx_tx_push_desc - push descriptor to TxD fifo * @priv: NIC private structure * @data: desc's data * @size: desc's size * * Pushes desc to TxD fifo and overlaps it if needed. * NOTE: this func does not check for available space. this is responsibility * of the caller. Neither does it check that data size is smaller than * fifo size.
*/ staticvoid bdx_tx_push_desc(struct bdx_priv *priv, void *data, int size)
{ struct txd_fifo *f = &priv->txd_fifo0; int i = f->m.memsz - f->m.wptr;
if (size == 0) return;
if (i > size) {
memcpy(f->m.va + f->m.wptr, data, size);
f->m.wptr += size;
} else {
memcpy(f->m.va + f->m.wptr, data, i);
f->m.wptr = size - i;
memcpy(f->m.va, data + i, f->m.wptr);
}
WRITE_REG(priv, f->m.reg_WPTR, f->m.wptr & TXF_WPTR_WR_PTR);
}
/** * bdx_tx_push_desc_safe - push descriptor to TxD fifo in a safe way * @priv: NIC private structure * @data: desc's data * @size: desc's size * * NOTE: this func does check for available space and, if necessary, waits for * NIC to read existing data before writing new one.
*/ staticvoid bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
{ int timer = 0;
ENTER;
while (size > 0) { /* we substruct 8 because when fifo is full rptr == wptr which also means that fifo is empty, we can understand
the difference, but could hw do the same ??? :) */ int avail = bdx_tx_space(priv) - 8; if (avail <= 0) { if (timer++ > 300) { /* prevent endless loop */
DBG("timeout while writing desc to TxD fifo\n"); break;
}
udelay(50); /* give hw a chance to clean fifo */ continue;
}
avail = min(avail, size);
DBG("about to push %d bytes starting %p size %d\n", avail,
data, size);
bdx_tx_push_desc(priv, data, avail);
size -= avail;
data += avail;
}
RET();
}
/** * bdx_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in bdx_pci_tbl * * Returns 0 on success, negative on failure * * bdx_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. * * functions and their order used as explained in * /usr/src/linux/Documentation/DMA-{API,mapping}.txt *
*/
/* TBD: netif_msg should be checked and implemented. I disable it for now */ staticint
bdx_probe(struct pci_dev *pdev, conststruct pci_device_id *ent)
{ struct net_device *ndev; struct bdx_priv *priv; unsignedlong pciaddr;
u32 regionSize; struct pci_nic *nic; int err, port;
ENTER;
nic = vmalloc(sizeof(*nic)); if (!nic)
RET(-ENOMEM);
/************** pci *****************/
err = pci_enable_device(pdev); if (err) /* it triggers interrupt, dunno why. */ goto err_pci; /* it's not a problem though */
/* these fields are used for info purposes only
* so we can have them same for all ports of the board */
ndev->if_port = port;
ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA;
/* ndev->xmit_lock spinlock is not used. * Private priv->tx_lock is used for synchronization * between transmit and TX irq cleanup. In addition * set multicast list callback has to use priv->tx_lock.
*/ #ifdef BDX_LLTX
ndev->lltx = true; #endif /* MTU range: 60 - 16384 */
ndev->min_mtu = ETH_ZLEN;
ndev->max_mtu = BDX_MAX_MTU;
spin_lock_init(&priv->tx_lock);
/*bdx_hw_reset(priv); */ if (bdx_read_mac(priv)) {
pr_err("load MAC address failed\n");
err = -EFAULT; goto err_out_iomap;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
err = register_netdev(ndev); if (err) {
pr_err("register_netdev failed\n"); goto err_out_free;
}
netif_carrier_off(ndev);
netif_stop_queue(ndev);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.