/* Module info
*/
MODULE_AUTHOR("R.J.Dunlop ");
MODULE_DESCRIPTION("FarSync T-Series WAN driver. FarSite Communications Ltd.");
MODULE_LICENSE("GPL");
/* Driver configuration and global parameters * ==========================================
*/
/* Number of ports (per card) and cards supported
*/ #define FST_MAX_PORTS 4 #define FST_MAX_CARDS 32
/* Default parameters for the link
*/ #define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is * useful
*/ #define FST_TXQ_DEPTH 16 /* This one is for the buffering * of frames on the way down to the card * so that we can keep the card busy * and maximise throughput
*/ #define FST_HIGH_WATER_MARK 12 /* Point at which we flow control * network layer
*/ #define FST_LOW_WATER_MARK 8 /* Point at which we remove flow * control from network layer
*/ #define FST_MAX_MTU 8000 /* Huge but possible */ #define FST_DEF_MTU 1500 /* Common sane value */
/* This information is derived in part from the FarSite FarSync Smc.h * file. Unfortunately various name clashes and the non-portability of the * bit field declarations in that file have meant that I have chosen to * recreate the information here. * * The SMC (Shared Memory Configuration) has a version number that is * incremented every time there is a significant change. This number can * be used to check that we have not got out of step with the firmware * contained in the .CDE files.
*/ #define SMC_VERSION 24
#define FST_MEMSIZE 0x100000 /* Size of card memory (1Mb) */
#define SMC_BASE 0x00002000L /* Base offset of the shared memory window main * configuration structure
*/ #define BFM_BASE 0x00010000L /* Base offset of the shared memory window DMA * buffers
*/
#define LEN_SMALL_TX_BUFFER 256 /* Size of obsolete buffs used for DOS diags */ #define LEN_SMALL_RX_BUFFER 256
#define NUM_TX_BUFFER 2 /* Must be power of 2. Fixed by firmware */ #define NUM_RX_BUFFER 8
/* Interrupt retry time in milliseconds */ #define INT_RETRY_TIME 2
/* The Am186CH/CC processors support a SmartDMA mode using circular pools * of buffer descriptors. The structure is almost identical to that used * in the LANCE Ethernet controllers. Details available as PDF from the * AMD web site: https://www.amd.com/products/epd/processors/\ * 2.16bitcont/3.am186cxfa/a21914/21914.pdf
*/ struct txdesc { /* Transmit descriptor */ volatile u16 ladr; /* Low order address of packet. This is a * linear address in the Am186 memory space
*/ volatile u8 hadr; /* High order address. Low 4 bits only, high 4 * bits must be zero
*/ volatile u8 bits; /* Status and config */ volatile u16 bcnt; /* 2s complement of packet size in low 15 bits. * Transmit terminal count interrupt enable in * top bit.
*/
u16 unused; /* Not used in Tx */
};
struct rxdesc { /* Receive descriptor */ volatile u16 ladr; /* Low order address of packet */ volatile u8 hadr; /* High order address */ volatile u8 bits; /* Status and config */ volatile u16 bcnt; /* 2s complement of buffer size in low 15 bits. * Receive terminal count interrupt enable in * top bit.
*/ volatile u16 mcnt; /* Message byte count (15 bits) */
};
/* Convert a length into the 15 bit 2's complement */ /* #define cnv_bcnt(len) (( ~(len) + 1 ) & 0x7FFF ) */ /* Since we need to set the high bit to enable the completion interrupt this * can be made a lot simpler
*/ #define cnv_bcnt(len) (-(len))
/* Status and config bits for the above */ #define DMA_OWN 0x80 /* SmartDMA owns the descriptor */ #define TX_STP 0x02 /* Tx: start of packet */ #define TX_ENP 0x01 /* Tx: end of packet */ #define RX_ERR 0x40 /* Rx: error (OR of next 4 bits) */ #define RX_FRAM 0x20 /* Rx: framing error */ #define RX_OFLO 0x10 /* Rx: overflow error */ #define RX_CRC 0x08 /* Rx: CRC error */ #define RX_HBUF 0x04 /* Rx: buffer error */ #define RX_STP 0x02 /* Rx: start of packet */ #define RX_ENP 0x01 /* Rx: end of packet */
/* Interrupts from the card are caused by various events which are presented * in a circular buffer as several events may be processed on one physical int
*/ #define MAX_CIRBUFF 32
struct cirbuff {
u8 rdindex; /* read, then increment and wrap */
u8 wrindex; /* write, then increment and wrap */
u8 evntbuff[MAX_CIRBUFF];
};
/* Interrupt event codes. * Where appropriate the two low order bits indicate the port number
*/ #define CTLA_CHG 0x18 /* Control signal changed */ #define CTLB_CHG 0x19 #define CTLC_CHG 0x1A #define CTLD_CHG 0x1B
/* Finally sling all the above together into the shared memory structure. * Sorry it's a hodge podge of arrays, structures and unused bits, it's been * evolving under NT for some time so I guess we're stuck with it. * The structure starts at offset SMC_BASE. * See farsync.h for some field values.
*/ struct fst_shared { /* DMA descriptor rings */ struct rxdesc rxDescrRing[FST_MAX_PORTS][NUM_RX_BUFFER]; struct txdesc txDescrRing[FST_MAX_PORTS][NUM_TX_BUFFER];
/* Obsolete small buffers */
u8 smallRxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_SMALL_RX_BUFFER];
u8 smallTxBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_SMALL_TX_BUFFER];
u16 txDescrIndex[FST_MAX_PORTS]; /* transmit descriptor ring index */
u16 rxDescrIndex[FST_MAX_PORTS]; /* receive descriptor ring index */
u16 portMailbox[FST_MAX_PORTS][2]; /* command, modifier */
u16 cardMailbox[4]; /* Not used */
/* Number of times the card thinks the host has * missed an interrupt by not acknowledging * within 2mS (I guess NT has problems)
*/
u32 interruptRetryCount;
/* Driver private data used as an ID. We'll not * use this as I'd rather keep such things * in main memory rather than on the PCI bus
*/
u32 portHandle[FST_MAX_PORTS];
/* Count of Tx underflows for stats */
u32 transmitBufferUnderflow[FST_MAX_PORTS];
/* Debounced V.24 control input status */
u32 v24DebouncedSts[FST_MAX_PORTS];
u32 endOfSmcSignature; /* endOfSmcSignature MUST be the last member of * the structure and marks the end of shared * memory. Adapter code initializes it as * END_SIG.
*/
};
/* endOfSmcSignature value */ #define END_SIG 0x12345678
/* Mailbox values. (portMailbox) */ #define NOP 0 /* No operation */ #define ACK 1 /* Positive acknowledgement to PC driver */ #define NAK 2 /* Negative acknowledgement to PC driver */ #define STARTPORT 3 /* Start an HDLC port */ #define STOPPORT 4 /* Stop an HDLC port */ #define ABORTTX 5 /* Abort the transmitter for a port */ #define SETV24O 6 /* Set V24 outputs */
/* PLX Chip Register Offsets */ #define CNTRL_9052 0x50 /* Control Register */ #define CNTRL_9054 0x6c /* Control Register */
/* Larger buffers are positioned in memory at offset BFM_BASE */ struct buf_window {
u8 txBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_TX_BUFFER];
u8 rxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_RX_BUFFER];
};
/* Calculate offset of a buffer object within the shared memory window */ #define BUF_OFFSET(X) (BFM_BASE + offsetof(struct buf_window, X))
#pragmapack()
/* Device driver private information * =================================
*/ /* Per port (line or channel) information
*/ struct fst_port_info { struct net_device *dev; /* Device struct - must be first */ struct fst_card_info *card; /* Card we're associated with */ int index; /* Port index on the card */ int hwif; /* Line hardware (lineInterface copy) */ int run; /* Port is running */ int mode; /* Normal or FarSync raw */ int rxpos; /* Next Rx buffer to use */ int txpos; /* Next Tx buffer to use */ int txipos; /* Next Tx buffer to check for free */ int start; /* Indication of start/stop to network */ /* A sixteen entry transmit queue
*/ int txqs; /* index to get next buffer to tx */ int txqe; /* index to queue next packet */ struct sk_buff *txq[FST_TXQ_DEPTH]; /* The queue */ int rxqdepth;
};
/* Per card information
*/ struct fst_card_info { char __iomem *mem; /* Card memory mapped to kernel space */ char __iomem *ctlmem; /* Control memory for PCI cards */ unsignedint phys_mem; /* Physical memory window address */ unsignedint phys_ctlmem; /* Physical control memory address */ unsignedint irq; /* Interrupt request line number */ unsignedint nports; /* Number of serial ports */ unsignedint type; /* Type index of card */ unsignedint state; /* State of card */
spinlock_t card_lock; /* Lock for SMP access */ unsignedshort pci_conf; /* PCI card config in I/O space */ /* Per port info */ struct fst_port_info ports[FST_MAX_PORTS]; struct pci_dev *device; /* Information about the pci device */ int card_no; /* Inst of the card on the system */ int family; /* TxP or TxU */ int dmarx_in_progress; int dmatx_in_progress; unsignedlong int_count; unsignedlong int_time_ave; void *rx_dma_handle_host;
dma_addr_t rx_dma_handle_card; void *tx_dma_handle_host;
dma_addr_t tx_dma_handle_card; struct sk_buff *dma_skb_rx; struct fst_port_info *dma_port_rx; struct fst_port_info *dma_port_tx; int dma_len_rx; int dma_len_tx; int dma_txpos; int dma_rxpos;
};
/* Convert an HDLC device pointer into a port info pointer and similar */ #define dev_to_port(D) (dev_to_hdlc(D)->priv) #define port_to_dev(P) ((P)->dev)
/* Shared memory window access macros * * We have a nice memory based structure above, which could be directly * mapped on i386 but might not work on other architectures unless we use * the readb,w,l and writeb,w,l macros. Unfortunately these macros take * physical offsets so we have to convert. The only saving grace is that * this should all collapse back to a simple indirection eventually.
*/ #define WIN_OFFSET(X) ((long)&(((struct fst_shared *)SMC_BASE)->X))
#define FST_WRB(C, E, B) (writeb((B), (C)->mem + WIN_OFFSET(E))) #define FST_WRW(C, E, W) (writew((W), (C)->mem + WIN_OFFSET(E))) #define FST_WRL(C, E, L) (writel((L), (C)->mem + WIN_OFFSET(E)))
/* Debug support
*/ #if FST_DEBUG
staticint fst_debug_mask = { FST_DEBUG };
/* Most common debug activity is to print something if the corresponding bit * is set in the debug mask. Note: this uses a non-ANSI extension in GCC to * support variable numbers of macro parameters. The inverted if prevents us * eating someone else's else clause.
*/ #define dbg(F, fmt, args...) \ do { \ if (fst_debug_mask & (F)) \
printk(KERN_DEBUG pr_fmt(fmt), ##args); \
} while (0) #else #define dbg(F, fmt, args...) \ do { \ if (0) \
printk(KERN_DEBUG pr_fmt(fmt), ##args); \
} while (0) #endif
/* Device Driver Work Queues * * So that we don't spend too much time processing events in the * Interrupt Service routine, we will declare a work queue per Card * and make the ISR schedule a task in the queue for later execution. * In the 2.4 Kernel we used to use the immediate queue for BH's * Now that they are gone, tasklets seem to be much better than work * queues.
*/
staticvoid
fst_q_work_item(u64 *queue, int card_index)
{ unsignedlong flags;
u64 mask;
/* Grab the queue exclusively
*/
spin_lock_irqsave(&fst_work_q_lock, flags);
/* Making an entry in the queue is simply a matter of setting * a bit for the card indicating that there is work to do in the * bottom half for the card. Note the limitation of 64 cards. * That ought to be enough
*/
mask = (u64)1 << card_index;
*queue |= mask;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
}
/* Call the bottom half for each card with work waiting
*/ for (i = 0; i < FST_MAX_CARDS; i++) { if (work_txq & 0x01) { if (fst_card_array[i]) {
dbg(DBG_TX, "Calling tx bh for card %d\n", i);
do_bottom_half_tx(fst_card_array[i]);
}
}
work_txq = work_txq >> 1;
}
}
/* Call the bottom half for each card with work waiting
*/ for (i = 0; i < FST_MAX_CARDS; i++) { if (work_intq & 0x01) { if (fst_card_array[i]) {
dbg(DBG_INTR, "Calling rx & tx bh for card %d\n", i);
do_bottom_half_rx(fst_card_array[i]);
do_bottom_half_tx(fst_card_array[i]);
}
}
work_intq = work_intq >> 1;
}
}
/* Card control functions * ======================
*/ /* Place the processor in reset state * * Used to be a simple write to card control space but a glitch in the latest * AMD Am186CH processor means that we now have to do it by asserting and de- * asserting the PLX chip PCI Adapter Software Reset. Bit 30 in CNTRL register * at offset 9052_CNTRL. Note the updates for the TXU.
*/ staticinlinevoid
fst_cpureset(struct fst_card_info *card)
{ unsignedchar interrupt_line_register; unsignedint regval;
if (card->family == FST_FAMILY_TXU) { if (pci_read_config_byte
(card->device, PCI_INTERRUPT_LINE, &interrupt_line_register)) {
dbg(DBG_ASS, "Error in reading interrupt line register\n");
} /* Assert PLX software reset and Am186 hardware reset * and then deassert the PLX software reset but 186 still in reset
*/
outw(0x440f, card->pci_conf + CNTRL_9054 + 2);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2); /* We are delaying here to allow the 9054 to reset itself
*/
usleep_range(10, 20);
outw(0x240f, card->pci_conf + CNTRL_9054 + 2); /* We are delaying here to allow the 9054 to reload its eeprom
*/
usleep_range(10, 20);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
if (pci_write_config_byte
(card->device, PCI_INTERRUPT_LINE, interrupt_line_register)) {
dbg(DBG_ASS, "Error in writing interrupt line register\n");
}
/* Release the processor from reset
*/ staticinlinevoid
fst_cpurelease(struct fst_card_info *card)
{ if (card->family == FST_FAMILY_TXU) { /* Force posted writes to complete
*/
(void)readb(card->mem);
/* Release LRESET DO = 1 * Then release Local Hold, DO = 1
*/
outw(0x040e, card->pci_conf + CNTRL_9054 + 2);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
} else {
(void)readb(card->ctlmem);
}
}
/* Clear the cards interrupt flag
*/ staticinlinevoid
fst_clear_intr(struct fst_card_info *card)
{ if (card->family == FST_FAMILY_TXU) {
(void)readb(card->ctlmem);
} else { /* Poke the appropriate PLX chip register (same as enabling interrupts)
*/
outw(0x0543, card->pci_conf + INTCSR_9052);
}
}
/* Process the result of trying to pass a received frame up the stack
*/ staticvoid
fst_process_rx_status(int rx_status, char *name)
{ switch (rx_status) { case NET_RX_SUCCESS:
{ /* Nothing to do here
*/ break;
} case NET_RX_DROP:
{
dbg(DBG_ASS, "%s: Received packet dropped\n", name); break;
}
}
}
/* Initilaise DMA for PLX 9054
*/ staticinlinevoid
fst_init_dma(struct fst_card_info *card)
{ /* This is only required for the PLX 9054
*/ if (card->family == FST_FAMILY_TXU) {
pci_set_master(card->device);
outl(0x00020441, card->pci_conf + DMAMODE0);
outl(0x00020441, card->pci_conf + DMAMODE1);
outl(0x0, card->pci_conf + DMATHR);
}
}
/* Everything is now set, just tell the card to go
*/
dbg(DBG_TX, "fst_tx_dma_complete\n");
FST_WRB(card, txDescrRing[port->index][txpos].bits,
DMA_OWN | TX_STP | TX_ENP);
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
netif_trans_update(dev);
}
/* Mark it for our own raw sockets interface
*/ static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
skb_reset_mac_header(skb);
skb->pkt_type = PACKET_HOST; return htons(ETH_P_CUST);
}
/* Rx dma complete interrupt
*/ staticvoid
fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port, int len, struct sk_buff *skb, int rxp)
{ struct net_device *dev = port_to_dev(port); int pi; int rx_status;
dbg(DBG_TX, "fst_rx_dma_complete\n");
pi = port->index;
skb_put_data(skb, card->rx_dma_handle_host, len);
/* Push upstream */
dbg(DBG_RX, "Pushing the frame up the stack\n"); if (port->mode == FST_RAW)
skb->protocol = farsync_type_trans(skb, dev); else
skb->protocol = hdlc_type_trans(skb, dev);
rx_status = netif_rx(skb);
fst_process_rx_status(rx_status, port_to_dev(port)->name); if (rx_status == NET_RX_DROP)
dev->stats.rx_dropped++;
}
/* Receive a frame through the DMA
*/ staticinlinevoid
fst_rx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{ /* This routine will setup the DMA and start it
*/
dbg(DBG_RX, "In fst_rx_dma %x %x %d\n", (u32)dma, mem, len); if (card->dmarx_in_progress)
dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
outl(dma, card->pci_conf + DMAPADR0); /* Copy to here */
outl(mem, card->pci_conf + DMALADR0); /* from here */
outl(len, card->pci_conf + DMASIZ0); /* for this length */
outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
/* We use the dmarx_in_progress flag to flag the channel as busy
*/
card->dmarx_in_progress = 1;
outb(0x03, card->pci_conf + DMACSR0); /* Start the transfer */
}
/* Send a frame through the DMA
*/ staticinlinevoid
fst_tx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{ /* This routine will setup the DMA and start it.
*/
dbg(DBG_TX, "In fst_tx_dma %x %x %d\n", (u32)dma, mem, len); if (card->dmatx_in_progress)
dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
outl(dma, card->pci_conf + DMAPADR1); /* Copy from here */
outl(mem, card->pci_conf + DMALADR1); /* to here */
outl(len, card->pci_conf + DMASIZ1); /* for this length */
outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */
/* We use the dmatx_in_progress to flag the channel as busy
*/
card->dmatx_in_progress = 1;
outb(0x03, card->pci_conf + DMACSR1); /* Start the transfer */
}
/* Issue a Mailbox command for a port. * Note we issue them on a fire and forget basis, not expecting to see an * error and not waiting for completion.
*/ staticvoid
fst_issue_cmd(struct fst_port_info *port, unsignedshort cmd)
{ struct fst_card_info *card; unsignedshort mbval; unsignedlong flags; int safety;
/* Setup port Rx buffers
*/ staticvoid
fst_rx_config(struct fst_port_info *port)
{ int i; int pi; unsignedint offset; unsignedlong flags; struct fst_card_info *card;
pi = port->index;
card = port->card;
spin_lock_irqsave(&card->card_lock, flags); for (i = 0; i < NUM_RX_BUFFER; i++) {
offset = BUF_OFFSET(rxBuffer[pi][i][0]);
/* Setup port Tx buffers
*/ staticvoid
fst_tx_config(struct fst_port_info *port)
{ int i; int pi; unsignedint offset; unsignedlong flags; struct fst_card_info *card;
pi = port->index;
card = port->card;
spin_lock_irqsave(&card->card_lock, flags); for (i = 0; i < NUM_TX_BUFFER; i++) {
offset = BUF_OFFSET(txBuffer[pi][i][0]);
if (los) { /* Lost the link
*/ if (netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "Net carrier off\n");
netif_carrier_off(port_to_dev(port));
}
} else { /* Link available
*/ if (!netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "Net carrier on\n");
netif_carrier_on(port_to_dev(port));
}
}
if (los)
dbg(DBG_INTR, "Assert LOS Alarm\n"); else
dbg(DBG_INTR, "De-assert LOS Alarm\n"); if (rra)
dbg(DBG_INTR, "Assert RRA Alarm\n"); else
dbg(DBG_INTR, "De-assert RRA Alarm\n");
/* Increment the appropriate error counter
*/
dev->stats.rx_errors++; if (dmabits & RX_OFLO) {
dev->stats.rx_fifo_errors++;
dbg(DBG_ASS, "Rx fifo error on card %d port %d buffer %d\n",
card->card_no, port->index, rxp);
} if (dmabits & RX_CRC) {
dev->stats.rx_crc_errors++;
dbg(DBG_ASS, "Rx crc error on card %d port %d\n",
card->card_no, port->index);
} if (dmabits & RX_FRAM) {
dev->stats.rx_frame_errors++;
dbg(DBG_ASS, "Rx frame error on card %d port %d\n",
card->card_no, port->index);
} if (dmabits == (RX_STP | RX_ENP)) {
dev->stats.rx_length_errors++;
dbg(DBG_ASS, "Rx length error (%d) on card %d port %d\n",
len, card->card_no, port->index);
}
}
/* Rx Error Recovery
*/ staticvoid
fst_recover_rx_error(struct fst_card_info *card, struct fst_port_info *port, unsignedchar dmabits, int rxp, unsignedshort len)
{ int i; int pi;
pi = port->index; /* Discard buffer descriptors until we see the start of the * next frame. Note that for long frames this could be in * a subsequent interrupt.
*/
i = 0; while ((dmabits & (DMA_OWN | RX_STP)) == 0) {
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
rxp = (rxp + 1) % NUM_RX_BUFFER; if (++i > NUM_RX_BUFFER) {
dbg(DBG_ASS, "intr_rx: Discarding more bufs" " than we have\n"); break;
}
dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits);
dbg(DBG_ASS, "DMA Bits of next buffer was %x\n", dmabits);
}
dbg(DBG_ASS, "There were %d subsequent buffers in error\n", i);
/* Check we have a buffer to process */
pi = port->index;
rxp = port->rxpos;
dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits); if (dmabits & DMA_OWN) {
dbg(DBG_RX | DBG_INTR, "intr_rx: No buffer port %d pos %d\n",
pi, rxp); return;
} if (card->dmarx_in_progress) return;
/* Get buffer length */
len = FST_RDW(card, rxDescrRing[pi][rxp].mcnt); /* Discard the CRC */
len -= 2; if (len == 0) { /* This seems to happen on the TE1 interface sometimes * so throw the frame away and log the event.
*/
pr_err("Frame received with 0 length. Card %d Port %d\n",
card->card_no, port->index); /* Return descriptor to card */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
/* Check buffer length and for other errors. We insist on one packet * in one buffer. This simplifies things greatly and since we've * allocated 8K it shouldn't be a real world limitation
*/
dbg(DBG_RX, "intr_rx: %d,%d: flags %x len %d\n", pi, rxp, dmabits, len); if (dmabits != (RX_STP | RX_ENP) || len > LEN_RX_BUFFER - 2) {
fst_log_rx_error(card, port, dmabits, rxp, len);
fst_recover_rx_error(card, port, dmabits, rxp, len); return;
}
/* Push upstream */
dbg(DBG_RX, "Pushing frame up the stack\n"); if (port->mode == FST_RAW)
skb->protocol = farsync_type_trans(skb, dev); else
skb->protocol = hdlc_type_trans(skb, dev);
rx_status = netif_rx(skb);
fst_process_rx_status(rx_status, port_to_dev(port)->name); if (rx_status == NET_RX_DROP)
dev->stats.rx_dropped++;
} else {
card->dma_skb_rx = skb;
card->dma_port_rx = port;
card->dma_len_rx = len;
card->dma_rxpos = rxp;
fst_rx_dma(card, card->rx_dma_handle_card,
BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
} if (rxp != port->rxpos) {
dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
dbg(DBG_ASS, "rxp = %d rxpos = %d\n", rxp, port->rxpos);
}
rxp = (rxp + 1) % NUM_RX_BUFFER;
port->rxpos = rxp;
}
/* The bottom half to the ISR *
*/
staticvoid
do_bottom_half_tx(struct fst_card_info *card)
{ struct fst_port_info *port; int pi; int txq_length; struct sk_buff *skb; unsignedlong flags; struct net_device *dev;
/* Find a free buffer for the transmit * Step through each port on this card
*/
dbg(DBG_TX, "do_bottom_half_tx\n"); for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) { if (!port->run) continue;
dev = port_to_dev(port); while (!(FST_RDB(card, txDescrRing[pi][port->txpos].bits) &
DMA_OWN) &&
!(card->dmatx_in_progress)) { /* There doesn't seem to be a txdone event per-se * We seem to have to deduce it, by checking the DMA_OWN * bit on the next buffer we think we can use
*/
spin_lock_irqsave(&card->card_lock, flags);
txq_length = port->txqe - port->txqs; if (txq_length < 0) { /* This is the case where one has wrapped and the * maths gives us a negative number
*/
txq_length = txq_length + FST_TXQ_DEPTH;
}
spin_unlock_irqrestore(&card->card_lock, flags); if (txq_length > 0) { /* There is something to send
*/
spin_lock_irqsave(&card->card_lock, flags);
skb = port->txq[port->txqs];
port->txqs++; if (port->txqs == FST_TXQ_DEPTH)
port->txqs = 0;
spin_unlock_irqrestore(&card->card_lock, flags); /* copy the data and set the required indicators on the * card.
*/
FST_WRW(card, txDescrRing[pi][port->txpos].bcnt,
cnv_bcnt(skb->len)); if (skb->len < FST_MIN_DMA_LEN ||
card->family == FST_FAMILY_TXP) { /* Enqueue the packet with normal io */
memcpy_toio(card->mem +
BUF_OFFSET(txBuffer[pi]
[port->
txpos][0]),
skb->data, skb->len);
FST_WRB(card,
txDescrRing[pi][port->txpos].
bits,
DMA_OWN | TX_STP | TX_ENP);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
netif_trans_update(dev);
} else { /* Or do it through dma */
memcpy(card->tx_dma_handle_host,
skb->data, skb->len);
card->dma_port_tx = port;
card->dma_len_tx = skb->len;
card->dma_txpos = port->txpos;
fst_tx_dma(card,
card->tx_dma_handle_card,
BUF_OFFSET(txBuffer[pi]
[port->txpos][0]),
skb->len);
} if (++port->txpos >= NUM_TX_BUFFER)
port->txpos = 0; /* If we have flow control on, can we now release it?
*/ if (port->start) { if (txq_length < fst_txq_low) {
netif_wake_queue(port_to_dev
(port));
port->start = 0;
}
}
dev_kfree_skb(skb);
} else { /* Nothing to send so break out of the while loop
*/ break;
}
}
}
}
staticvoid
do_bottom_half_rx(struct fst_card_info *card)
{ struct fst_port_info *port; int pi; int rx_count = 0;
/* Check for rx completions on all ports on this card */
dbg(DBG_RX, "do_bottom_half_rx\n"); for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) { if (!port->run) continue;
while (!(FST_RDB(card, rxDescrRing[pi][port->rxpos].bits)
& DMA_OWN) && !(card->dmarx_in_progress)) { if (rx_count > fst_max_reads) { /* Don't spend forever in receive processing * Schedule another event
*/
fst_q_work_item(&fst_work_intq, card->card_no);
tasklet_schedule(&fst_int_task); break; /* Leave the loop */
}
fst_intr_rx(card, port);
rx_count++;
}
}
}
/* The interrupt service routine * Dev_id is our fst_card_info pointer
*/ static irqreturn_t
fst_intr(int dummy, void *dev_id)
{ struct fst_card_info *card = dev_id; struct fst_port_info *port; int rdidx; /* Event buffer indices */ int wridx; int event; /* Actual event for processing */ unsignedint dma_intcsr = 0; unsignedint do_card_interrupt; unsignedint int_retry_count;
/* Check to see if the interrupt was for this card * return if not * Note that the call to clear the interrupt is important
*/
dbg(DBG_INTR, "intr: %d %p\n", card->irq, card); if (card->state != FST_RUNNING) {
pr_err("Interrupt received for card %d in a non running state (%d)\n",
card->card_no, card->state);
/* It is possible to really be running, i.e. we have re-loaded * a running card * Clear and reprime the interrupt source
*/
fst_clear_intr(card); return IRQ_HANDLED;
}
/* Clear and reprime the interrupt source */
fst_clear_intr(card);
/* Is the interrupt for this card (handshake == 1)
*/
do_card_interrupt = 0; if (FST_RDB(card, interruptHandshake) == 1) {
do_card_interrupt += FST_CARD_INT; /* Set the software acknowledge */
FST_WRB(card, interruptHandshake, 0xEE);
} if (card->family == FST_FAMILY_TXU) { /* Is it a DMA Interrupt
*/
dma_intcsr = inl(card->pci_conf + INTCSR_9054); if (dma_intcsr & 0x00200000) { /* DMA Channel 0 (Rx transfer complete)
*/
dbg(DBG_RX, "DMA Rx xfer complete\n");
outb(0x8, card->pci_conf + DMACSR0);
fst_rx_dma_complete(card, card->dma_port_rx,
card->dma_len_rx, card->dma_skb_rx,
card->dma_rxpos);
card->dmarx_in_progress = 0;
do_card_interrupt += FST_RX_DMA_INT;
} if (dma_intcsr & 0x00400000) { /* DMA Channel 1 (Tx transfer complete)
*/
dbg(DBG_TX, "DMA Tx xfer complete\n");
outb(0x8, card->pci_conf + DMACSR1);
fst_tx_dma_complete(card, card->dma_port_tx,
card->dma_len_tx, card->dma_txpos);
card->dmatx_in_progress = 0;
do_card_interrupt += FST_TX_DMA_INT;
}
}
/* Have we been missing Interrupts
*/
int_retry_count = FST_RDL(card, interruptRetryCount); if (int_retry_count) {
dbg(DBG_ASS, "Card %d int_retry_count is %d\n",
card->card_no, int_retry_count);
FST_WRL(card, interruptRetryCount, 0);
}
if (!do_card_interrupt) return IRQ_HANDLED;
/* Scehdule the bottom half of the ISR */
fst_q_work_item(&fst_work_intq, card->card_no);
tasklet_schedule(&fst_int_task);
switch (event) { case TE1_ALMA:
dbg(DBG_INTR, "TE1 Alarm intr\n"); if (port->run)
fst_intr_te1_alarm(card, port); break;
case CTLA_CHG: case CTLB_CHG: case CTLC_CHG: case CTLD_CHG: if (port->run)
fst_intr_ctlchg(card, port); break;
case ABTA_SENT: case ABTB_SENT: case ABTC_SENT: case ABTD_SENT:
dbg(DBG_TX, "Abort complete port %d\n", port->index); break;
case TXA_UNDF: case TXB_UNDF: case TXC_UNDF: case TXD_UNDF: /* Difficult to see how we'd get this given that we * always load up the entire packet for DMA.
*/
dbg(DBG_TX, "Tx underflow port %d\n", port->index);
port_to_dev(port)->stats.tx_errors++;
port_to_dev(port)->stats.tx_fifo_errors++;
dbg(DBG_ASS, "Tx underflow on card %d port %d\n",
card->card_no, port->index); break;
case INIT_CPLT:
dbg(DBG_INIT, "Card init OK intr\n"); break;
/* Bump and wrap the index */ if (++rdidx >= MAX_CIRBUFF)
rdidx = 0;
}
FST_WRB(card, interruptEvent.rdindex, rdidx); return IRQ_HANDLED;
}
/* Check that the shared memory configuration is one that we can handle * and that some basic parameters are correct
*/ staticvoid
check_started_ok(struct fst_card_info *card)
{ int i;
/* Check structure version and end marker */ if (FST_RDW(card, smcVersion) != SMC_VERSION) {
pr_err("Bad shared memory version %d expected %d\n",
FST_RDW(card, smcVersion), SMC_VERSION);
card->state = FST_BADVERSION; return;
} if (FST_RDL(card, endOfSmcSignature) != END_SIG) {
pr_err("Missing shared memory signature\n");
card->state = FST_BADVERSION; return;
} /* Firmware status flag, 0x00 = initialising, 0x01 = OK, 0xFF = fail */
i = FST_RDB(card, taskStatus); if (i == 0x01) {
card->state = FST_RUNNING;
} elseif (i == 0xFF) {
pr_err("Firmware initialisation failed. Card halted\n");
card->state = FST_HALTED; return;
} elseif (i != 0x00) {
pr_err("Unknown firmware status 0x%x\n", i);
card->state = FST_HALTED; return;
}
/* Finally check the number of ports reported by firmware against the * number we assumed at card detection. Should never happen with * existing firmware etc so we just report it for the moment.
*/ if (FST_RDL(card, numberOfPorts) != card->nports) {
pr_warn("Port count mismatch on card %d. Firmware thinks %d we say %d\n",
card->card_no,
FST_RDL(card, numberOfPorts), card->nports);
}
}
/* Set things according to the user set valid flags * Several of the old options have been invalidated/replaced by the * generic hdlc package.
*/
err = 0; if (info->valid & FSTVAL_PROTO) { if (info->proto == FST_RAW)
port->mode = FST_RAW; else
port->mode = FST_GEN_HDLC;
}
/* Only mark information as valid if card is running. * Copy the data anyway in case it is useful for diagnostics
*/
info->valid = ((card->state == FST_RUNNING) ? FSTVAL_ALL : FSTVAL_CARD) #if FST_DEBUG
| FSTVAL_DEBUG #endif
;
/* First check what line type is set, we'll default to reporting X.21 * if nothing is set as IF_IFACE_SYNC_SERIAL implies it can't be * changed
*/ switch (port->hwif) { case E1:
ifs->type = IF_IFACE_E1; break; case T1:
ifs->type = IF_IFACE_T1; break; case V35:
ifs->type = IF_IFACE_V35; break; case V24:
ifs->type = IF_IFACE_V24; break; case X21D:
ifs->type = IF_IFACE_X21D; break; case X21: default:
ifs->type = IF_IFACE_X21; break;
} if (!ifs->size) return 0; /* only type requested */
if (ifs->size < sizeof(sync)) return -ENOMEM;
i = port->index;
memset(&sync, 0, sizeof(sync));
sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed); /* Lucky card and linux use same encoding here */
sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
INTCLK ? CLOCK_INT : CLOCK_EXT;
sync.loopback = 0;
if (copy_to_user(ifs->ifs_ifsu.sync, &sync, sizeof(sync))) return -EFAULT;
case FSTCPURELEASE:
fst_cpurelease(card);
card->state = FST_STARTING; return 0;
case FSTWRITE: /* Code write (download) */
/* First copy in the header with the length and offset of data * to write
*/ if (!data) return -EINVAL;
if (copy_from_user(&wrthdr, data, sizeof(struct fstioc_write))) return -EFAULT;
/* Sanity check the parameters. We don't support partial writes * when going over the top
*/ if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE ||
wrthdr.size + wrthdr.offset > FST_MEMSIZE) return -ENXIO;
/* Writes to the memory of a card in the reset state constitute * a download
*/ if (card->state == FST_RESET)
card->state = FST_DOWNLOAD;
return 0;
case FSTGETCONF:
/* If card has just been started check the shared memory config * version and marker
*/ if (card->state == FST_STARTING) {
check_started_ok(card);
/* If everything checked out enable card interrupts */ if (card->state == FST_RUNNING) {
spin_lock_irqsave(&card->card_lock, flags);
fst_enable_intr(card);
FST_WRB(card, interruptHandshake, 0xEE);
spin_unlock_irqrestore(&card->card_lock, flags);
}
}
if (!data) return -EINVAL;
gather_conf_info(card, port, &info);
if (copy_to_user(data, &info, sizeof(info))) return -EFAULT;
return 0;
case FSTSETCONF: /* Most of the settings have been moved to the generic ioctls * this just covers debug and board ident now
*/
if (card->state != FST_RUNNING) {
pr_err("Attempt to configure card %d in non-running state (%d)\n",
card->card_no, card->state); return -EIO;
} if (copy_from_user(&info, data, sizeof(info))) return -EFAULT;
switch (ifs->type) { case IF_GET_IFACE: return fst_get_iface(card, port, ifs);
case IF_IFACE_SYNC_SERIAL: case IF_IFACE_V35: case IF_IFACE_V24: case IF_IFACE_X21: case IF_IFACE_X21D: case IF_IFACE_T1: case IF_IFACE_E1: return fst_set_iface(card, port, ifs);
case IF_PROTO_RAW:
port->mode = FST_RAW; return 0;
case IF_GET_PROTO: if (port->mode == FST_RAW) {
ifs->type = IF_PROTO_RAW; return 0;
} return hdlc_ioctl(dev, ifs);
default:
port->mode = FST_GEN_HDLC;
dbg(DBG_IOCTL, "Passing this type to hdlc %x\n",
ifs->type); return hdlc_ioctl(dev, ifs);
}
}
staticvoid
fst_openport(struct fst_port_info *port)
{ int signals;
/* Only init things if card is actually running. This allows open to * succeed for downloads etc.
*/ if (port->card->state == FST_RUNNING) { if (port->run) {
dbg(DBG_OPEN, "open: found port already running\n");
/* Drop packet with error if we don't have carrier */ if (!netif_carrier_ok(dev)) {
dev_kfree_skb(skb);
dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
dbg(DBG_ASS, "Tried to transmit but no carrier on card %d port %d\n",
card->card_no, port->index); return NETDEV_TX_OK;
}
/* Drop it if it's too big! MTU failure ? */ if (skb->len > LEN_TX_BUFFER) {
dbg(DBG_ASS, "Packet too large %d vs %d\n", skb->len,
LEN_TX_BUFFER);
dev_kfree_skb(skb);
dev->stats.tx_errors++; return NETDEV_TX_OK;
}
/* We are always going to queue the packet * so that the bottom half is the only place we tx from * Check there is room in the port txq
*/
spin_lock_irqsave(&card->card_lock, flags);
txq_length = port->txqe - port->txqs; if (txq_length < 0) { /* This is the case where the next free has wrapped but the * last used hasn't
*/
txq_length = txq_length + FST_TXQ_DEPTH;
}
spin_unlock_irqrestore(&card->card_lock, flags); if (txq_length > fst_txq_high) { /* We have got enough buffers in the pipeline. Ask the network * layer to stop sending frames down
*/
netif_stop_queue(dev);
port->start = 1; /* I'm using this to signal stop sent up */
}
if (txq_length == FST_TXQ_DEPTH - 1) { /* This shouldn't have happened but such is life
*/
dev_kfree_skb(skb);
dev->stats.tx_errors++;
dbg(DBG_ASS, "Tx queue overflow card %d port %d\n",
card->card_no, port->index); return NETDEV_TX_OK;
}
/* queue the buffer
*/
spin_lock_irqsave(&card->card_lock, flags);
port->txq[port->txqe] = skb;
port->txqe++; if (port->txqe == FST_TXQ_DEPTH)
port->txqe = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
/* Scehdule the bottom half which now does transmit processing */
fst_q_work_item(&fst_work_txq, card->card_no);
tasklet_schedule(&fst_tx_task);
return NETDEV_TX_OK;
}
/* Card setup having checked hardware resources. * Should be pretty bizarre if we get an error here (kernel memory * exhaustion is one possibility). If we do see a problem we report it * via a printk and leave the corresponding interface and all that follow * disabled.
*/ staticchar *type_strings[] = { "no hardware", /* Should never be seen */ "FarSync T2P", "FarSync T4P", "FarSync T1U", "FarSync T2U", "FarSync T4U", "FarSync TE1"
};
staticint
fst_init_card(struct fst_card_info *card)
{ int i; int err;
/* We're working on a number of ports based on the card ID. If the * firmware detects something different later (should never happen) * we'll have to revise it in some way then.
*/ for (i = 0; i < card->nports; i++) {
err = register_hdlc_device(card->ports[i].dev); if (err < 0) {
pr_err("Cannot register HDLC device for port %d (errno %d)\n",
i, -err); while (i--)
unregister_hdlc_device(card->ports[i].dev); return err;
}
}
/* Initialise card when detected. * Returns 0 to indicate success, or errno otherwise.
*/ staticint
fst_add_one(struct pci_dev *pdev, conststruct pci_device_id *ent)
{ staticint no_of_cards_added; struct fst_card_info *card; int err = 0; int i;
printk_once(KERN_INFO
pr_fmt("FarSync WAN driver " FST_USER_VERSION " (c) 2001-2004 FarSite Communications Ltd.\n")); #if FST_DEBUG
dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask); #endif /* We are going to be clever and allow certain cards not to be * configured. An exclude list can be provided in /etc/modules.conf
*/ if (fst_excluded_cards != 0) { /* There are cards to exclude *
*/ for (i = 0; i < fst_excluded_cards; i++) { if (pdev->devfn >> 3 == fst_excluded_list[i]) {
pr_info("FarSync PCI device %d not assigned\n",
(pdev->devfn) >> 3); return -EBUSY;
}
}
}
/* Allocate driver private data */
card = kzalloc(sizeof(struct fst_card_info), GFP_KERNEL); if (!card) return -ENOMEM;
/* Try to enable the device */
err = pci_enable_device(pdev); if (err) {
pr_err("Failed to enable card. Err %d\n", -err); goto enable_fail;
}
err = pci_request_regions(pdev, "FarSync"); if (err) {
pr_err("Failed to allocate regions. Err %d\n", -err); goto regions_fail;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.