/****************************************************************************** iphase.c: Device driver for Interphase ATM PCI adapter cards Author: Peter Wang <pwang@iphase.com> Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br> Interphase Corporation <www.iphase.com> Version: 1.0 ******************************************************************************* This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on this skeleton fall under the GPL and must retain the authorship (implicit copyright) notice.
This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Modified from an incomplete driver for Interphase 5575 1KVC 1M card which was originally written by Monalisa Agrawal at UNH. Now this driver supports a variety of varients of Interphase ATM PCI (i)Chip adapter card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) in terms of PHY type, the size of control memory and the size of packet memory. The following are the change log and history: Bugfix the Mona's UBR driver. Modify the basic memory allocation and dma logic. Port the driver to the latest kernel from 2.0.46. Complete the ABR logic of the driver, and added the ABR work- around for the hardware anormalies. Add the CBR support. Add the flow control logic to the driver to allow rate-limit VC. Add 4K VC support to the board with 512K control memory. Add the support of all the variants of the Interphase ATM PCI (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525 (25M UTP25) and x531 (DS3 and E3). Add SMP support.
Support and updates available at: ftp://ftp.iphase.com/pub/atm
/* IpAdjustTrafficParams */ if (vcc->qos.txtp.max_pcr <= 0) {
IF_ERR(printk("PCR for CBR not defined\n");) return -1;
}
rate = vcc->qos.txtp.max_pcr;
entries = rate / dev->Granularity;
IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
entries, rate, dev->Granularity);) if (entries < 1)
IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
rateLow = entries * dev->Granularity;
rateHigh = (entries + 1) * dev->Granularity; if (3*(rate - rateLow) > (rateHigh - rate))
entries++; if (entries > dev->CbrRemEntries) {
IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
entries, dev->CbrRemEntries);) return -EBUSY;
}
ia_vcc = INPH_IA_VCC(vcc);
ia_vcc->NumCbrEntry = entries;
dev->sum_mcr += entries * dev->Granularity; /* IaFFrednInsertCbrSched */ // Starting at an arbitrary location, place the entries into the table // as smoothly as possible
cbrVC = 0;
spacing = dev->CbrTotEntries / entries;
sp_mod = dev->CbrTotEntries % entries; // get modulo
toBeAssigned = entries;
fracSlot = 0;
vcIndex = vcc->vci;
IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);) while (toBeAssigned)
{ // If this is the first time, start the table loading for this connection // as close to entryPoint as possible. if (toBeAssigned == entries)
{
idealSlot = dev->CbrEntryPt;
dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping if (dev->CbrEntryPt >= dev->CbrTotEntries)
dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
} else {
idealSlot += (u32)(spacing + fracSlot); // Point to the next location // in the table that would be smoothest
fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part
sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part
} if (idealSlot >= (int)dev->CbrTotEntries)
idealSlot -= dev->CbrTotEntries; // Continuously check around this ideal value until a null // location is encountered.
SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
inc = 0;
testSlot = idealSlot;
TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value
IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
testSlot, TstSchedTbl,toBeAssigned);)
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); while (cbrVC) // If another VC at this location, we have to keep looking
{
inc++;
testSlot = idealSlot - inc; if (testSlot < 0) { // Wrap if necessary
testSlot += dev->CbrTotEntries;
IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
SchedTbl,testSlot);)
}
TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); if (!cbrVC) break;
testSlot = idealSlot + inc; if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
testSlot -= dev->CbrTotEntries;
IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
testSlot, toBeAssigned);)
} // set table index and read in value
TstSchedTbl = (u16*)(SchedTbl + testSlot);
IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
TstSchedTbl,cbrVC,inc);)
memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
} /* while */ // Move this VCI number into this location of the CBR Sched table.
memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
dev->CbrRemEntries--;
toBeAssigned--;
} /* while */
static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
{
u_short val;
u32 t; int i; /* * Read the first bit that was clocked with the falling edge of * the last command data clock
*/
NVRAM_CMD(IAREAD + addr); /* * Now read the rest of the bits, the next bit read is D14, then D13, * and so on.
*/
val = 0; for (i=15; i>=0; i--) {
NVRAM_CLKIN(t);
val |= (t << i);
}
NVRAM_CLR_CE;
CFG_AND(~NVDI); return val;
}
iadev = INPH_IA_DEV(dev);
status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
IF_EVENT(printk("rx_intr: status = 0x%x\n", status);) if (status & RX_PKT_RCVD)
{ /* do something */ /* Basically recvd an interrupt for receiving a packet. A descriptor would have been written to the packet complete queue. Get all the descriptors and set up dma to move the packets till the packet complete queue is empty..
*/
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) while(!(state & PCQ_EMPTY))
{
rx_pkt(dev);
state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
}
iadev->rxing = 1;
} if (status & RX_FREEQ_EMPT)
{ if (iadev->rxing) {
iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
iadev->rx_tmp_jif = jiffies;
iadev->rxing = 0;
} elseif ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) { for (i = 1; i <= iadev->num_rx_desc; i++)
free_desc(dev, i);
printk("Test logic RUN!!!!\n");
writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
iadev->rxing = 1;
}
IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
}
if (status & RX_EXCP_RCVD)
{ /* probably need to handle the exception queue also. */
IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
rx_excp_rcvd(dev);
}
if (status & RX_RAW_RCVD)
{ /* need to handle the raw incoming cells. This deepnds on whether we have programmed to receive the raw cells or not.
Else ignore. */
IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);)
}
}
staticvoid rx_dle_intr(struct atm_dev *dev)
{
IADEV *iadev; struct atm_vcc *vcc; struct sk_buff *skb; int desc;
u_short state; struct dle *dle, *cur_dle;
u_int dle_lp; int len;
iadev = INPH_IA_DEV(dev);
/* free all the dles done, that is just update our own dle read pointer
- do we really need to do this. Think not. */ /* DMA is done, just get all the recevie buffers from the rx dma queue and push them up to the higher layer protocol. Also free the desc
associated with the buffer. */
dle = iadev->rx_dle_q.read;
dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4)); while(dle != cur_dle)
{ /* free the DMAed skb */
skb = skb_dequeue(&iadev->rx_dma_q); if (!skb) goto INCR_DLE;
desc = ATM_DESC(skb);
free_desc(dev, desc);
dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
len, DMA_FROM_DEVICE); /* no VCC related housekeeping done as yet. lets see */
vcc = ATM_SKB(skb)->vcc; if (!vcc) {
printk("IA: null vcc\n");
dev_kfree_skb_any(skb); goto INCR_DLE;
}
ia_vcc = INPH_IA_VCC(vcc); if (ia_vcc == NULL)
{
atomic_inc(&vcc->stats->rx_err);
atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb); goto INCR_DLE;
} // get real pkt length pwang_test
trailer = (struct cpcs_trailer*)((u_char *)skb->data +
skb->len - sizeof(*trailer));
length = swap_byte_order(trailer->length); if ((length > iadev->rx_buf_sz) || (length >
(skb->len - sizeof(struct cpcs_trailer))))
{
atomic_inc(&vcc->stats->rx_err);
IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
length, skb->len);)
atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb); goto INCR_DLE;
}
skb_trim(skb, length);
/* Display the packet */
IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
xdump(skb->data, skb->len, "RX: ");
printk("\n");)
IF_RX(printk("rx_dle_intr: skb push");)
vcc->push(vcc,skb);
atomic_inc(&vcc->stats->rx);
iadev->rx_pkt_cnt++;
}
INCR_DLE: if (++dle == iadev->rx_dle_q.end)
dle = iadev->rx_dle_q.start;
}
iadev->rx_dle_q.read = dle;
/* if the interrupts are masked because there were no free desc available,
unmask them now. */ if (!iadev->rxing) {
state = readl(iadev->reass_reg + STATE_REG) & 0xffff; if (!(state & FREEQ_EMPTY)) {
state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
iadev->reass_reg+REASS_MASK_REG);
iadev->rxing++;
}
}
}
if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
iadev = INPH_IA_DEV(vcc->dev); if (vcc->qos.rxtp.traffic_class == ATM_ABR) { if (iadev->phy_type & FE_25MBIT_PHY) {
printk("IA: ABR not support\n"); return -EINVAL;
}
} /* Make only this VCI in the vc table valid and let all
others be invalid entries */
vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
vc_table += vcc->vci; /* mask the last 6 bits and OR it with 3 for 1K VCs */
*vc_table = vcc->vci << 6; /* Also keep a list of open rx vcs so that we can attach them with
incoming PDUs later. */ if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
(vcc->qos.txtp.traffic_class == ATM_ABR))
{
srv_cls_param_t srv_p;
init_abr_vc(iadev, &srv_p);
ia_open_abr_vc(iadev, &srv_p, vcc, 0);
} else { /* for UBR later may need to add CBR logic */
reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
reass_ptr += vcc->vci;
*reass_ptr = NO_AAL5_PKT;
}
/* Allocate 4k bytes - more aligned than needed (4k boundary) */
dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
&iadev->rx_dle_dma, GFP_KERNEL); if (!dle_addr) {
printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n"); goto err_out;
}
iadev->rx_dle_q.start = (struct dle *)dle_addr;
iadev->rx_dle_q.read = iadev->rx_dle_q.start;
iadev->rx_dle_q.write = iadev->rx_dle_q.start;
iadev->rx_dle_q.end = (struct dle*)((unsignedlong)dle_addr+sizeof(struct dle)*DLE_ENTRIES); /* the end of the dle q points to the entry after the last
DLE that can be used. */
/* write the upper 20 bits of the start address to rx list address register */ /* We know this is 32bit bus addressed so the following is safe */
writel(iadev->rx_dle_dma & 0xfffff000,
iadev->dma + IPHASE5575_RX_LIST_ADDR);
IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
iadev->dma+IPHASE5575_TX_LIST_ADDR,
readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
iadev->dma+IPHASE5575_RX_LIST_ADDR,
readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
iadev->rfL.pcq_wr);) /* just for check - no VP TBL */ /* VP Table */ /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */ /* initialize VP Table for invalid VPIs - I guess we can write all 1s or 0x000f in the entire memory space or something similar.
*/
/* This seems to work and looks right to me too !!! */
i = REASS_TABLE * iadev->memSize;
writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE); /* initialize Reassembly table to I don't know what ???? */
reass_table = (u16 *)(iadev->reass_ram+i);
j = REASS_TABLE_SZ * iadev->memSize; for(i=0; i < j; i++)
*reass_table++ = NO_AAL5_PKT;
i = 8*1024;
vcsize_sel = 0; while (i != iadev->num_vc) {
i /= 2;
vcsize_sel++;
}
i = RX_VC_TABLE * iadev->memSize;
writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
j = RX_VC_TABLE_SZ * iadev->memSize; for(i = 0; i < j; i++)
{ /* shift the reassembly pointer by 3 + lower 3 bits of vc_lkup_base register (=3 for 1K VCs) and the last byte is those low 3 bits. Shall program this later.
*/
*vc_table = (i << 6) | 15; /* for invalid VCI */
vc_table++;
} /* ABR VC table */
i = ABR_VC_TABLE * iadev->memSize;
writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
/* VP Filter Register set for VC Reassembly only */
writew(0xff00, iadev->reass_reg+VP_FILTER);
writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
writew(0x1, iadev->reass_reg+PROTOCOL_ID);
/* Packet Timeout Count related Registers : Set packet timeout to occur in about 3 seconds Set Packet Aging Interval count register to overflow in about 4 us
*/
writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
/* The memory map suggested in appendix A and the coding for it. Keeping it around just in case we change our mind later. Buffer descr 0x0000 (128 - 4K) UBR sched 0x1000 (1K - 4K) UBR Wait q 0x2000 (1K - 4K) Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100) (128 - 256) each extended VC 0x4000 (1K - 8K) ABR sched 0x6000 and ABR wait queue (1K - 2K) each CBR sched 0x7000 (as needed) VC table 0x8000 (1K - 32K)
*/
/* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */ if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
DMA_TO_DEVICE);
}
vcc = ATM_SKB(skb)->vcc; if (!vcc) {
printk("tx_dle_intr: vcc is null\n");
spin_unlock_irqrestore(&iadev->tx_lock, flags);
dev_kfree_skb_any(skb);
return;
}
iavcc = INPH_IA_VCC(vcc); if (!iavcc) {
printk("tx_dle_intr: iavcc is null\n");
spin_unlock_irqrestore(&iadev->tx_lock, flags);
dev_kfree_skb_any(skb); return;
} if (vcc->qos.txtp.pcr >= iadev->rate_limit) { if ((vcc->pop) && (skb->len != 0))
{
vcc->pop(vcc, skb);
} else {
dev_kfree_skb_any(skb);
}
} else { /* Hold the rate-limited skb for flow control */
IA_SKB_STATE(skb) |= IA_DLED;
skb_queue_tail(&iavcc->txing_skb, skb);
}
IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);) if (++dle == iadev->tx_dle_q.end)
dle = iadev->tx_dle_q.start;
}
iadev->tx_dle_q.read = dle;
spin_unlock_irqrestore(&iadev->tx_lock, flags);
}
/* store the most significant 4 bits of vci as the last 4 bits of first part of atm header. store the last 12 bits of vci as first 12 bits of the second part of the atm header.
*/
evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
/* check the following for different traffic classes */ if (vcc->qos.txtp.traffic_class == ATM_UBR)
{
vc->type = UBR;
vc->status = CRC_APPEND;
vc->acr = cellrate_to_float(iadev->LineRate); if (vcc->qos.txtp.pcr > 0)
vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
vcc->qos.txtp.max_pcr,vc->acr);)
} elseif (vcc->qos.txtp.traffic_class == ATM_ABR)
{ srv_cls_param_t srv_p;
IF_ABR(printk("Tx ABR VCC\n");)
init_abr_vc(iadev, &srv_p); if (vcc->qos.txtp.pcr > 0)
srv_p.pcr = vcc->qos.txtp.pcr; if (vcc->qos.txtp.min_pcr > 0) { int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr; if (tmpsum > iadev->LineRate) return -EBUSY;
srv_p.mcr = vcc->qos.txtp.min_pcr;
iadev->sum_mcr += vcc->qos.txtp.min_pcr;
} else srv_p.mcr = 0; if (vcc->qos.txtp.icr)
srv_p.icr = vcc->qos.txtp.icr; if (vcc->qos.txtp.tbe)
srv_p.tbe = vcc->qos.txtp.tbe; if (vcc->qos.txtp.frtt)
srv_p.frtt = vcc->qos.txtp.frtt; if (vcc->qos.txtp.rif)
srv_p.rif = vcc->qos.txtp.rif; if (vcc->qos.txtp.rdf)
srv_p.rdf = vcc->qos.txtp.rdf; if (vcc->qos.txtp.nrm_pres)
srv_p.nrm = vcc->qos.txtp.nrm; if (vcc->qos.txtp.trm_pres)
srv_p.trm = vcc->qos.txtp.trm; if (vcc->qos.txtp.adtf_pres)
srv_p.adtf = vcc->qos.txtp.adtf; if (vcc->qos.txtp.cdf_pres)
srv_p.cdf = vcc->qos.txtp.cdf; if (srv_p.icr > srv_p.pcr)
srv_p.icr = srv_p.pcr;
IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n",
srv_p.pcr, srv_p.mcr);)
ia_open_abr_vc(iadev, &srv_p, vcc, 1);
} elseif (vcc->qos.txtp.traffic_class == ATM_CBR) { if (iadev->phy_type & FE_25MBIT_PHY) {
printk("IA: CBR not support\n"); return -EINVAL;
} if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
IF_CBR(printk("PCR is not available\n");) return -1;
}
vc->type = CBR;
vc->status = CRC_APPEND; if ((ret = ia_cbr_setup (iadev, vcc)) < 0) { return ret;
}
} else {
printk("iadev: Non UBR, ABR and CBR traffic not supported\n");
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.