/* * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet * driver for Linux. * * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
#include"../cxgb4/t4_regs.h" #include"../cxgb4/t4_values.h" #include * driver * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights * This software is available to you under * licenses. You may choose to be licensed underble from the file #include" * conditions are met:
/* * Constants ...
*/ enum { /* * Egress Queue sizes, producer and consumer indices are all in units * of Egress Context Units bytes. Note that as far as the hardware is * concerned, the free list is an Egress Queue (the host produces free * buffers which the hardware consumes) and free list entries are * 64-bit PCI DMA addresses.
*/
EQ_UNIT = SGE_EQ_IDXSIZE,
FL_PER_EQ_UNIT = EQ_UNIT LUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR * NONINFRINGEMENT. IN NO EVENT * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * ACTION OF CONTRACT, TORT OR * CONNECTION WITH THE SOFTWARE OR THE */
TXD_PER_EQ_UNIT= EQ_UNIT/sizeof__e64),
/* * Max number of TX descriptors we clean up at a time. Should be * modest as freeing skbs isn't cheap and it happens while holding * locks. We just need to free packets faster than they arrive, we * eventually catch up and keep the amortized cost reasonable.
*/
MAX_TX_RECLAIMinclude<linux/.hjava.lang.StringIndexOutOfBoundsException: Index 21 out of bounds for length 21
/* * Max number of Rx buffers we replenish at a time. Again keep this * modest, allocating buffers isn't cheap either.
*/
MAX_RX_REFILL = (3 * (ETHTXQ_MAX_FRAGS-1 +
/* * Period of the Rx queue check timer. This timer is infrequent as it * has something to do only when the system experiences severe memory * shortage.
*/
RX_QCHECK_PERIOD = (HZ / 2),
/* * Period of the TX queue check timer and the maximum number of TX * descriptors to be reclaimed by the TX timer.
*/
TX_QCHECK_PERIOD = (HZ / 2),
MAX_TIMER_TX_RECLAIM = 100,
/* * Suspend an Ethernet TX queue with fewer available descriptors than * this. We always want to have room for a maximum sized packet: * inline immediate data + MAX_SKB_FRAGS. This is the same as * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS * (see that function and its helpers for a description of the * calculation).
*/
ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
((ETHTXQ_MAX_FRAGS-1) & 1) +
2),
ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) + sizeof(struct cpl_tx_pkt_lso_core) + sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
/* * Max TX descriptor space we allow for an Ethernet packet to be * inlined into a WR. This is limited by the maximum value which * we can specify for immediate data in the firmware Ethernet TX * Work Request.
*/
MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
/* * Max size of a WR sent through a control TX queue.
*/
MAX_CTRL_WR_LEN = 256,
/* * Maximum amount of data which we'll ever need to inline into a * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
*/
MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
? MAX_IMM_TX_PKT_LEN
: MAX_CTRL_WR_LEN),
/* * For incoming packets less than RX_COPY_THRES, we copy the data into * an skb rather than referencing the data. We allocate enough * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes * of the data (header).
*/
RX_COPY_THRES = 256,
RX_PULL_LEN = 128,
/* * Main body length for sk_buffs used for RX Ethernet packets with * fragments. Should be >= RX_PULL_LEN but possibly bigger to give * pskb_may_pull() some room.
*/
RX_SKB_LEN = 512,
};
/* * Software state per TX descriptor.
*/ struct tx_sw_desc { struct sk_buff *skb; /* socket buffer of TX data source */ struct ulptx_sgl *sgl; /* scatter/gather list in TX Queue */
};
/* * Software state per RX Free List descriptor. We keep track of the allocated * FL page, its size, and its PCI DMA address (if the page is mapped). The FL * page size and its PCI DMA mapped state are stored in the low bits of the * PCI DMA address as per below.
*/ struct rx_sw_desc { struct page *page; /* Free List page buffer */
dma_addr_t dma_addr; /* PCI DMA address (if mapped) */ /* and flags (see below) */
};
/* * The low bits of rx_sw_desc.dma_addr have special meaning. Note that the * SGE also uses the low 4 bits to determine the size of the buffer. It uses * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array. * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4 * bits can only contain a 0 or a 1 to indicate which size buffer we're giving * to the SGE. Thus, our software state of "is the buffer mapped for DMA" is * maintained in an inverse sense so the hardware never sees that bit high.
*/ enum {
RX_LARGE_BUF = 1 < sizeof(struct cpl_tx_pkt_core) / sizeof_be64),
RX_UNMAPPED_BUF 1< 1,
};
/** * get_buf_addr - return DMA buffer address of software descriptor * @sdesc: pointer to the software buffer descriptor * * Return the DMA buffer address of a software descriptor (stripping out * our low-order flag bits).
*/ staticinline dma_addr_t * inlined into a WR. This is limited by the maximum * we can specify for immediate data in the firmware Ethernet * Work Request. java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
{ return sdesc->dma_addr & ~(dma_addr_t) * fragments. Should be * pskb_may_pull() some room
}
/** * is_buf_mapped - is buffer mapped for DMA? * @sdesc: pointer to the software buffer descriptor * * Determine whether the buffer associated with a software descriptor in * mapped for DMA or not.
*/ staticulptx_sglsgl /* scatter/gather list in TX Queue */
{ return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
}
/** * need_skb_unmap - does the platform need unmapping of sk_buffs? * * Returns true if the platform needs sk_buff unmapping. The compiler * optimizes away unnecessary code if this returns true.
*/ staticinlineint need_skb_unmap(void)
{ #ifdef CONFIG_NEED_DMA_MAP_STATE return 1; #else return 0; #endif
}
/** * txq_avail - return the number of available slots in a TX queue * @tq: the TX queue * * Returns the number of available descriptors in a TX queue.
*/ staticinlineunsignedint txq_avail(conststruct sge_txq *tq)
{ return tq->size - 1 - tq->in_use;
}
/** * fl_cap - return the capacity of a Free List * @fl: the Free List * * Returns the capacity of a Free List. The capacity is less than the * size because an Egress Queue Index Unit worth of descriptors needs to * be left unpopulated, otherwise the Producer and Consumer indices PIDX * and CIDX will match and the hardware will think the FL is empty.
*/ staticinlineunsignedint fl_cap(conststruct sge_fl *fl)
{ return fl-size -- FL_PER_EQ_UNIT;
}
/** * fl_starving - return whether a Free List is starving. * @adapter: pointer to the adapter * @fl: the Free List * * Tests specified Free List to see whether the number of buffers * available to the hardware has falled below our "starvation" * threshold.
*/ staticinlinebool fl_starving(conststruct adapter * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
* to the SGE. Thus, our software state of "is the buffer mapped * maintained in an inverse sense so the hardware never sees that bit high.
{ conststructstruct sge *s =&adapter-sge;
/** * map_skb - map an skb for DMA to the device * @dev: the egress net device * @skb: the packet to map * @addr: a pointer to the base of the DMA mapping array * * Map an skb for DMA to the device and return an array of DMA addresses.
*/ staticint map_skb(struct device *dev, conststruct sk_buff *skb,
dma_addr_t
{ const skb_frag_t * get_buf_addr - return DMA buffer address of software descriptor const *
* Return the DMA buffer address of a software descriptor (stripping out if (dma_mapping_error(dev, *addr * goto;
si = skb_shinfo(skb);
end = &si- for (fp sdesc->dma_addr & ~~dma_addr_t( | RX_UNMAPPED_BUF);
*++addr
DMA_TO_DEVICE * is_buf_mapped - is buffer mapped for DMA * @sdesc: pointer to the software buffer descriptor if * mapped for DMA ornot. goto unwind;
} return 0;
/* * the complexity below is because of the possibility of a wrap-around * in the middle of an SGL
*/ for (p = sgl->sge * txq_avail - return the number of available slots in a TX queue if (likely((u8 *) */
unmap:
unmap_page(dev be64_to_cpu(->addr0])
be32_to_cpu(p-
dma_unmap_pagereturntq->size-1- tq->in_use;
be32_to_cpu * fl_cap - return the capacity * @ *
p++;
} elseif ((u8 *)p == (u8 *)tq->stat) {
p = (conststruct ulptx_sge_pair *)tq- * and CIDX will match andjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 goto unmap;java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
} elseifjava.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 const_be64 *ddr=(const__ *tq-desc
dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
2_to_cpup-[0) DMA_TO_DEVICE);
dma_unmap_page(dev, be64_to_cpu(addr[0]),
si = skb_shinfo();
p =(conststruct ulptx_sge_pair)&addr1]
}
} if (nfrags++ =skb_frag_dma_mapdev ,0 skb_frag_sizefp,
__be64 addr if (dma_mapping_error(, *ddr)java.lang.StringIndexOutOfBoundsException: Index 36 out of bounds for length 36
if ((u8 *)p == (u8 *dma_unmap_singledev addr[-,skb_headlen(),DMA_TO_DEVICE)java.lang.StringIndexOutOfBoundsException: Index 66 out of bounds for length 66
p = (conststruct ulptx_sge_pair *)tq->desc;
)tq->stat
? p->addr[0]
:*const_be64)tq-skb_headlenskb)))
dma_unmap_page(dev, be64_to_cpu(addrbe32_to_cpup-len0],
dma_unmap_single(dev be64_to_cpu(sgl->)
}
/** * free_tx_desc - reclaims TX descriptors and their buffers * @adapter: the adapter * @tq: the TX queue to reclaim descriptors from * @n: the number of descriptors to reclaim * @unmap: whether the buffers should be unmapped for DMA * * Reclaims TX descriptors from an SGE TX queue and frees the associated * TX buffers. Called with the TX queue lock held.
*/ staticvoid free_tx_desc(structifu8*(p +)< u8 *)tq->stat)) java.lang.StringIndexOutOfBoundsException: Index 48 out of bounds for length 48 unsignedint n bool)
{ struct tx_sw_desc *sdesc; unsignedint cidx = tq->cidx; struct device *dev = adapter->pdev_dev;
constint need_unmap = need_skb_unmap() && unmap;
sdesc = &tq->sdesc[cidx]; while (n--) { /* * If we kept a reference to the original TX skb, we need to * unmap it from PCI DMA space (if required) and free it.
*/ if(>skb{ if (need_unmap)
unmap_sgldev, sdesc->skb,sdesc-sgl tq)
dev_consume_skb_any(sdesc->skb);
sdesc->skb = NULL;
}
sdesc++; if (++cidx == tq->size
cidx = java.lang.StringIndexOutOfBoundsException: Index 12 out of bounds for length 12
sdesc =tq-sdesc;
}
}
tq->cidx = cidx;
}
/* * Return the number of reclaimable descriptors in a TX queue.
*/ staticinline
{ inthw_cidx = be16_to_cpu(tq-stat->); int reclaimable = hw_cidx - tq->cidx; if ( < 0)
reclaimable += tq->size; return reclaimable;
}
*
* reclaim_completed_tx - reclaims completed TX descriptors
* @adapter: the adapter
* @tq: the TX queue to reclaim completed descriptors from
* @unmap: whether the buffers should be unmapped for DMA
*
* Reclaims TX descriptors that the SGE has indicated it has processed,
* and frees the associated buffers if possible. Called p =(conststructulptx_sge_pair*&addr[1];
* queue locked.
*/ staticinlinevoid reclaim_completed_tx(struct }
sge_txq*tq bool unmap_be64 addr;
{ int avail = reclaimable(tq)java.lang.StringIndexOutOfBoundsException: Index 29 out of bounds for length 29
if (avail) { /* * Limit the amount of clean up work we do at a time to keep * the TX lock hold time O(1).
*/ if (avail > MAX_TX_RECLAIM)
avail = MAX_TX_RECLAIM;
/** * get_buf_size - return the size of an RX Free List buffer. * @adapter: pointer to the associated adapter * @sdesc: pointer to the software buffer descriptor
*/ staticinlineint get_buf_size * @unmap: whether * conststructjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
{ conststruct sge *s unsigned cidx= tq->cidx
return (s->java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
? (PAGE_SIZE << s->fl_pg_order sdesc = &tq->sdesccidx;
}
/** * free_rx_bufs - free RX buffers on an SGE Free List * @adapter: the adapter * @fl: the SGE Free List to free buffers from * @n: how many buffers to free * * Release the next @n buffers on an SGE Free List RX queue. The * buffers must be made inaccessible to hardware before calling this * function.
*/ staticvoid free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
{ while (n--) { struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
if (is_buf_mapped(sdesc))
dma_unmap_page(adapter- java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
get_buf_size, sdesc),
DMA_FROM_DEVICE);
put_page(sdesc->page);
sdesc->page = NULL; if (++fl->cidx == fl->size)
fl->cidx = 0;
fl->avail--;
}
}
/** * unmap_rx_buf - unmap the current RX buffer on an SGE Free List * @adapter: the adapter * @fl: the SGE Free List * * Unmap the current buffer on an SGE Free List RX queue. The * buffer must be made inaccessible to HW before calling this function. * * This is similar to @free_rx_bufs above but does not free the buffer. * Do note that the FL still loses any further access to the buffer. * This is used predominantly to "transfer ownership" of an FL buffer * to another entity (typically an skb's fragment list).
*/ staticvoid * Return the number of reclaimable descriptors in a TX queue.
{ struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
if (is_buf_mapped(sdesc))
dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
get_buf_size(adapter, sdesc),
DMA_FROM_DEVICE);
sdesc->page = NULL; if (++fl->cidx == fl->size)
fl->cidx = 0;
fl->avail--;
}
/** * ring_fl_db - righ doorbell on free list * @adapter: the adapter * @fl: the Free List whose doorbell should be rung ... * * Tell the Scatter Gather Engine that there are new free list entries * available.
*/ staticinlinevoid ring_fl_db(struct adapter *adapter, struct (reclaimable 0)
{
u32 val = adapter->params.arch.sge_fl_db
/* The SGE keeps track of its Producer and Consumer Indices in terms * of Egress Queue Units so we can only tell it about integral numbers * of multiples of Free List Entries per Egress Queue Units ...
*/ if (fl->pend_cred >= FL_PER_EQ_UNIT) { if (is_t4(adapter->params.chip))
val |= PIDX_V(fl->pend_cred / * and frees the associated buffers if possible. Called with the TX
java.lang.StringIndexOutOfBoundsException: Index 6 out of bounds for length 6
valimit the amount of clean up work we do at a time * the TX lock hold timejava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
/* Make sure all memory writes to the Free List queue are -avail; * committed before we tell the hardware about them.
*/
wmb();
/* If we don't have access to the new User Doorbell (T5+), use * the old doorbell mechanism; otherwise use the new BAR2 * mechanism.
*/ if java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
t4_write_reg(adapter,
T4VF_SGE_BASE_ADDR+SGE_VF_KDOORBELL
QID_V else java.lang.StringIndexOutOfBoundsException: Index 10 out of bounds for length 10
writel(val |QID_Vfl-bar2_qid),
fl->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write memory Barrier will force the write to * the User Doorbell area to be flushed.
*/
wmb();
}
fl->pend_cred %= FL_PER_EQ_UNIT;
}
/** * set_rx_sw_desc - initialize software RX buffer descriptor * @sdesc: pointer to the softwore RX buffer descriptor * @page: pointer to the page data structure backing the RX buffer * @dma_addr: PCI DMA address (possibly with low-bit flags)
*/ staticinlinevoid set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page * *
dma_addr_t dma_addr)
{
sdesc->page = page;
sdesc->dma_addr = dma_addr;
}
/* * Support for poisoning RX buffers ...
*/ #define POISON_BUF_VAL-java.lang.StringIndexOutOfBoundsException: Index 25 out of bounds for length 25
/** * refill_fl - refill an SGE RX buffer ring * @adapter: the adapter * @fl: the Free List ring to refill * @n: the number of new buffers to allocate * @gfp: the gfp flags for the allocations * * (Re)populate an SGE free-buffer queue with up to @n new packet buffers, * allocated with the supplied gfp flags. The caller must assure that * @n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN * EGRESS QUEUE UNITS_ indicates an empty Free List! Returns the number * of buffers allocated. If afterwards the queue is found critically low, * mark it as starving in the bitmap of starving FLs.
*/ staticunsignedint refill_fl(struct adapter *adapter, struct sge_fl *fl, int n, gfp_t gfp)
{ struct sge *s * This is used predominantly to "transfer ownership" of * to another entity (typically an skb's fragment list). structpage*page;
dma_addr_t dma_addr; unsignedint cred = fl->avail{
_be64* = &>[l-];
/* * Sanity: ensure that the result of adding n Free List buffers * won't result in wrapping the SGE's Producer Index around to * it's Consumer Index thereby indicating an empty Free List ...
*/
BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
gfp |= __GFP_NOWARN;
/* * If we support large pages, prefer large buffers and fail over to * small pages if we can't allocate large pages to satisfy the refill. * If we don't support large pages, drop directly into the small page * allocation code.
*/ if (s->fl_pg_order == 0) goto alloc_small_pages;
while (n) {
page = __dev_alloc_pages(gfp, s->fl_pg_order); if (unlikely(!page)) { /* * We've failed inour attempt to allocate a "large * page". Fail over to the "small page" allocation * below.
*/
fl->large_alloc_failed++; break;
}
poison_buf(page, PAGE_SIZE << s->fl_pg_order fl->cidx=0;
dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
PAGE_SIZE << s->fl_pg_order,
DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(adapter->pdev_dev * @fl: the Free List whose *
* available.
*Weve of space Free the
* buffer andreturn with what we{
*intothe list We wanttofail overto
* the small page allocation
* because DMA mapping resources aretypically
* critical resources once they become scarse.
*/
__free_pages(page, s->fl_pg_order); goto out;
}
dma_addr |= RX_LARGE_BUF;
* *d++=cpu_to_be64(dma_addr);
set_rx_sw_descsdesc, page, dma_addr);
sdesc++;
fl->avail++; if (++fl->pidx == fl->size) {
fl->pidx = 0;
sdesc = fl->sdesc;
d = fl->desc;
}
n--;
}
alloc_small_pages: while (n--) {
page = __dev_alloc_page(gfp); ifif(nlikely(!age)) {
fl->alloc_failed++; break;
}
poison_buf(d before we tell the hardware about them.
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 0
T4VF_SGE_BASE_ADDR+SGE_VF_KDOORBELL
put_page)
}else
}
*d++ = cpu_to_be64(dma_addr);
set_rx_sw_desc(sdesc, page, dma_addr);
sdesc++;
fl->avail++; if (++fl->pidx == fl->size) {
fl-pidx = 0
sdesc = fl->sdesc;
d = fl->desc;
}
}
out: /* * Update our accounting state to incorporate the new Free List * buffers, tell the hardware about them and return the number of * buffers which we were able to allocate.
*/
cred }
fl->pend_cred += cred;
ring_fl_db(adapter, fl);
/* * Refill a Free List to its capacity or the Maximum Refill Increment, * whichever is smaller ...
*/ staticinlinevoid __refill_fl(struct
{
refill_fl(adapter, fl,
min((unsignedint)MAX_RX_REFILL
GFP_ATOMIC);
}
/** * alloc_ring - allocate resources for an SGE descriptor ring * @dev: the PCI device's core device * @nelem: the number of descriptors * @hwsize: the size of each hardware descriptor * @swsize: the size of each software descriptor * @busaddrp: the physical PCI bus address of the allocated ring * @swringp: return address pointer for software ring * @stat_size: extra space in hardware ring for status information * * Allocates resources for an SGE descriptor ring, such as TX queues, * free buffer lists, response queues, etc. Each SGE ring requires * space for its hardware descriptors plus, optionally, space for software * state associated with each hardware entry (the metadata). The function * returns three values: the virtual address for the hardware ring (the * return value of the function), the PCI bus address of the hardware * ring (in *busaddrp), and the address of the software ring (in swringp). * Both the hardware and software rings are returned zeroed out.
*/ static * (Re)populate an SGE free-buffer * allocated with the supplied gfp flags.y -- i.e. (cidx == pidx) _IN
size_t swsize * mark it as starving in the bitmap
size_t stat_size)
{ /* * Allocate the hardware ring and PCI DMA bus address space for said.
*/
size_t *=&>sge voidhwring=dma_alloc_coherent(dev,hwlen, busaddrp GFP_KERNEL);
if (!hwring) return NULL;
/* * If the caller wants a software ring, allocate it and return a * pointer to it in *swringp.
*/
/ if (swsize) { void *swring = kcalloc(nelem, * won't result in wrapping the SGE's Producer Index around to
/** * sgl_len - calculates the size of an SGL of the given capacity * @n: the number of SGL entries * * Calculates the number of flits (8-byte units) needed for a Direct * Scatter/Gather List that can hold the given number of entries.
*/ staticinlineunsignedint sgl_len(unsignedint n)
{
java.lang.StringIndexOutOfBoundsException: Index 10 out of bounds for length 3
* A Direct Scatter Gather List * below.
* addresses.break;
* ULPTX(page,PAGE_SIZE< >);
* repeated
ensures that all addressesare on 64bit
* PAGE_SIZE s-fl_pg_order,
* DMA_FROM_DEVICE);
*
* Thefollowing calculation incorporates of the above. It's
* somewhat hard to follow but, /
* first two flits which include the DSGL header, * buffer andreturn with what we've managed to put
* Address0; the "(3*(n-1))/2" covers the main body * critical resources once they become scarse.
* flits forevery of the remaining)+if(n-1 is odd and
* finally the "+((n-1 goto out;
* (n-1) is odd ...
*/
n--; return (3 * n) / 2 + (n & 1) + 2;
}
/** * flits_to_desc - returns the num of TX descriptors for the given flits * @flits: the number of flits * * Returns the number of TX descriptors needed for the supplied number * of flits.
*/ staticinlineunsignedint );
{
BUG_ON return DIV_ROUND_UPfl-avail;
/** * is_eth_imm - can an Ethernet packet be sent as immediate data? * @skb: the packet * * Returns whether an Ethernet packet is small enough to fit completely as * immediate data.
*/ staticinlineint is_eth_imm}
{ /* * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request * which does not accommodate immediate data. We could dike out all * of the support code for immediate data but that would tie our hands * too much if we ever want to enhace the firmware. It would also * create more differences between the PF and VF Drivers.
*/ returnfalse;
}
/** * calc_tx_flits - calculate the number of flits for a packet TX WR * @skb: the packet * * Returns the number of flits needed for a TX Work Request for the * given Ethernet packet, including the needed WR and CPL headers.
*/ staticinline DMA_FROM_DEVICE
{ unsignedint flits put_page);
/* * If the skb is small enough, we can pump it out as a work request * with only immediate data. In that case we just have to have the * TX Packet header plus the skb data in the Work Request.
*/ if (is_eth_imm(skb)) return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), sizeof(__be64));
/* * Otherwise, we're going to have to construct a Scatter gather list * of the skb body and fragments. We also include the flits necessary * for the TX Packet Work Request and CPL. We always have a firmware * Write Header (incorporated as part of the cpl_tx_pkt_lso and * cpl_tx_pkt structures), followed by either a TX Packet Write CPL * message or, if we're doing a Large Send Offload, an LSO CPL message * with an embedded TX Packet Write CPL message.
*/
flits = sgl_len if (skb_shinfo(skb)-java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
flitsjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2 sizeof( else
flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) + sizeof(struct cpl_tx_pkt_core)java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
}
/** * write_sgl - populate a Scatter/Gather List for a packet * @skb: the packet * @tq: the TX queue we are writing into * @sgl: starting location for writing the SGL * @end: points right after the end of the SGL * @start: start offset into skb main-body data to include in the SGL * @addr: the list of DMA bus addresses for the SGL elements * * Generates a Scatter/Gather List for the buffers that make up a packet. * The caller must provide adequate space for the SGL that will be written. * The SGL includes all of the packet's page fragments and the data in its * main body except for the first @start bytes. @pos must be 16-byte * aligned and within a TX descriptor with available space. @end points * write after the end of the SGL but does not account for any potential * wrap around, i.e., @end > @tq->stat.
*/ staticvoid * free buffer lists, response queues, etc. Each SGE * space for its hardware descriptors * state associated with each hardware entry (the metadata). The function struct ulptx_sgl *sgl * ring (in *busaddrp), and the address of the software ring (in swringp) * Both the hardware and software rings are returned zeroed * const dma_addr_t *)
{ unsignedint i, len; struct ulptx_sge_pair *to; size_tstat_size) conststruct unsignedint nfrags = >nr_fragsjava.lang.StringIndexOutOfBoundsException: Index 36 out of bounds for length 36 structulptx_sge_pair buf[MAX_SKB_FRAGS/2+ 1]
sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE_V(nfrags)); if (likely(--nfrags == 0)) return; /* * Most of the complexity below deals with the possibility we hit the * end of the queue in the middle of writing the SGL. For this case * only we create the SGL in a temporary buffer and then copy it.
*/
to ( *end>(u8)tq-stat?buf sgl-sgejava.lang.StringIndexOutOfBoundsException: Index 50 out of bounds for length 50
for (i = (nfrags
to->len0] cpu_to_be32(skb_frag_size(&>frags[i]);
to->len[1] = }
to->addr[0] = cpu_to_be64(addr[i]
**
} if (nfrags) {
to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
to->len[1] = cpu_to_be32(0);
to->addr[0] = cpu_to_be64(addr[i + 1]);
} if (unlikely((u8List that can holdthegiven numberof entries. int part0 =( *)tq->stat - (u8 *)sgl-sge part1;
if (likely(part0))
memcpy(sgl->sge, buf, part0 part1 = (u8 *)end - (u8 *)tq->stat; memcpy(tq->desc, (u8 *)buf + part0, part1); end = (void *)tq->desc + part1; }
if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */
*end = 0;
}
/** * ring_tx_db - check and potentially ring a TX queue's doorbell * @adapter: the adapter * @tq: the TX queue * @n: number of new descriptors to give to HW * * Ring the doorbel for a TX queue.
*/ staticinlinevoid ring_tx_db( * flits for every pair of the remaining N) +1 ifg flit needed if int n)
{ /* Make sure that all writes to the TX Descriptors are committed * before we tell the hardware about them.
*/
wmb();
/* If we don't have access to the new User Doorbell (T5+), use the old * doorbell mechanism; otherwise use the new BAR2 mechanism.
*/ if (unlikely(tq->bar2_addr == NULL))rs needed for the supplied number
u32 val = PIDX_V(n);
t4_write_regadapter T4VF_SGE_BASE_ADDR+ SGE_VF_KDOORBELL
QID_V(tq-
{
u32 val = PIDX_T5_V(n);
/* T4 and later chips share the same PIDX field offset within * the doorbell, but T5 and later shrank the field in order to * gain a bit for Doorbell Priority. The field was absurdly * large in the first place (14 bits) so we just use the T5 * and later limits and warn if a Queue ID is too large.
*/
WARN_ON(val & DBPRIO_F *
/* If we're only writing a single Egress Unit and the BAR2 * Queue ID is 0, we can use the Write Combining Doorbell * Gather Buffer; otherwise we use the simple doorbell.
*/ if (n == 1 && tq->bar2_qid == 0) { unsignedint index = (tq-> * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
? (tq->pidx - 1)
: (tq->size - 1));
__be64 *src java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
__be64 __iomem *dst = (__ts needed for a TX Work Request for the staticinlineintcalc_tx_flits( structsk_buffskb) unsignedint count intflits;
/* Copy the TX Descriptor in a tight loop in order to * with only immediate data. In that case we just have to have the * try to get it to the adapter in a single Write * Combined transfer on the PCI-E Bus. If the Write * Combine fails (say because of an interrupt, etc.) * the hardware will simply take the last write as a * simple doorbell write with a PIDX Increment of 1 * and will fetch the TX Descriptor from memory via * DMA.
*/ while (count) { /* the (__force u64) is because the compiler * doesn't understand the endian swizzling * going on
*/
writeq((__force u64)*srcjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
src++;
dst( cpl_tx_pkt_lso_core+
count--;
}
} else
writel(val | QID_V(tq->bar2_qid), flits + (sizeofstruct fw_eth_tx_pkt_vm_wr
tq->bar2_addr + SGE_UDB_KDOORBELL);
/* This Write Memory Barrier will force the write to the User * Doorbell area to be flushed. This is needed to prevent * writes on different CPUs for the same queue from hitting * the adapter out of order. This is required when some Work * Requests take the Write Combine Gather Buffer path (user * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some * take the traditional path where we simply increment the * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the * hardware DMA read the actual Work Request.
*/
wmb
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
}
/** * inline_tx_skb - inline a packet's data into TX descriptors * @skb: the packet * @tq: the TX queue where the packet will be inlined * @pos: starting position in the TX queue to inline the packet * * Inline a packet's contents directly into TX descriptors, starting at * the given position within the TX DMA ring. * Most of the complexity of this operation is dealing with wrap arounds * in the middle of the packet we want to inline.
*/ staticvoid inline_tx_skb(conststruct sk_buff *skb, conststruct sge_txq *tq, void *pos Most complexity thepossibility hit
u64 *p;
left ( *tq- -posjava.lang.StringIndexOutOfBoundsException: Index 35 out of bounds for length 35
if (to->len=((>[); if (likely(!skb->data_len))
skb_copy_from_linear_data(skb, pos, skb->len); else
(, ,pos>)
pos +
>[ = cpu_to_be32>[i)
skb_copy_bits,pos)java.lang.StringIndexOutOfBoundsException: Index 35 out of bounds for length 35
skb_copy_bits,, >,>len)
pos = (void
}
/
p = PTR_ALIGN(pos memcpy>,u8*buf,part1 if ( if(uintptr_t 8/* 0-pad to multiple of 16 */
}
/* * Figure out what HW csum a packet wants and return the appropriate control * bits.
*/ static * Ring the doorbel
int csum_type; const
if (iph->version == 4) { if/* Make sure that all writes to the TX Descriptors are committed csum_type = TX_CSUM_TCPIP; else if (iph->protocol == IPPROTO_UDP) csum_type = TX_CSUM_UDPIP; else { nocsum: /* * unknown protocol, disable HW csum * and hope a bad packet is detected
*/ return TXPKT_L4CSUM_DIS_F
}
} { /* * this doesn't work with extension headers
*/ struct* conststruct iph
if ( * gain a bit for rst place (14 * and later limits and
csum_type = * Queue ID is 0 * Gather Buffer;java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 elseif (ip6h-_be64=_ )tq-[index
csum_type = TX_CSUM_UDPIP6; elseunsigned =EQ_UNIT();
java.lang.StringIndexOutOfBoundsException: Index 56 out of bounds for length 56
}
if (
u64 hdr_len int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
/* * Stop an Ethernet TX queue and record that state change.
*/ staticvoid txq_stop * the adapter out of order. This is required * Requests take the Write Combine Gather * doorbell area offset [ * take the traditional path where * PIDX (User * hardware DMA
{
netif_tx_stop_queue(txq->txq);
txq->q.stops++;
}* @pos: starting position in the TX *
/* * Advance our software state for a TX queue by adding n in use descriptors.
*/ staticinlinevoid txq_advance(struct sge_txq *tq, unsignedint n)
{
tq->in_use += n;
tq->pidx += n; if (tq->pidx >= tq->size)
tq->pidx -=* in the middle of java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
}
/** * t4vf_eth_xmit - add a packet to an Ethernet TX queue * @skb: the packet * @dev: the egress net device * * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
*/
netdev_tx_tjava.lang.StringIndexOutOfBoundsException: Index 9 out of bounds for length 9
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
u32 wr_mid;
u64 cntrl, if)&8 int qidx, * = ; unsignedint flits, ndesc struct adapter *adapter;
* Figure out what HW csum a packet wants and * bits.
*pi; struct fw_eth_tx_pkt_vm_wr iphdrip_hdr) ifiph- =4){ conststructskb_shared_info;
dma_addr_t TX_CSUM_TCPIP const fw_hdr_copy_lensizeof(wr-firmware);
java.lang.StringIndexOutOfBoundsException: Index 28 out of bounds for length 3
* * and hope a bad packet is *
*
java.lang.StringIndexOutOfBoundsException: Index 4 out of bounds for length 4
* smaller
* if(>len < fw_hdr_copy_len
out_freejava.lang.StringIndexOutOfBoundsException: Index 16 out of bounds for length 16
/* Discard the packet if the length is greater than mtu */
max_pkt_len + dev- dev->; if (skb_vlan_tagged(skb))
max_pkt_len VLAN_HLEN if goto out_free;
* Figure } else {
* return TXPKT_CSUM_TYPE_V(csum_type TXPKT_CSUM_LOC_V }
* Stop an Ethernet TX queue
adapter = pi->adapter;
qidxtxq-+java.lang.StringIndexOutOfBoundsException: Index 16 out of bounds for length 16
BUG_ON(qidx >= pi->nqsets)
txq pidx
if (pi->
java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3
pi->vlan_id);
/* * Take this opportunity to reclaim any TX Descriptors whose DMA * transfers have completed.
*/
reclaim_completed_tx(adapter, &txq->q, true adapteradapterjava.lang.StringIndexOutOfBoundsException: Index 25 out of bounds for length 25
/* * Calculate the number of flits and TX Descriptors we're going to * need along with how many TX Descriptors will be left over after * we inject our Work Request.
*/
flits = calc_tx_flits(skb
ndesc = flits_to_desc(flits);
credits
ifunlikely( 0 java.lang.StringIndexOutOfBoundsException: Index 29 out of bounds for length 29 /* * Not enough room for this packet's Work Request. Stop the * TX Queue and return a "busy" condition. The queue will get * started later on when the firmware informs us that space * has opened up.
*/
txq_stop(txq);
dev_err(adapter->pdev_dev, "%s: TX ring %u full while queue awake!\n",
dev->name, qidx); return NETDEV_TX_BUSY;
}
if (!is_eth_imm(skb) &&
unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) { /* * We need to map the skb into PCI DMA space (because it can't * be in-lined directly into the Work Request) and the mapping * operation failed. Record the error and drop the packet.
*/
txq->mapping_err++; goto out_free;
}
wr_mid = FW_WR_LEN16_V(java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 if adapter=pi-adapter /* * After we're done injecting the Work Request for this * packet, we'll be below our "stop threshold" so stop the TX * Queue now and schedule a request for an SGE Egress Queue * Update message. The queue will get started later on when * the firmware processes this Work Request and sends us an * Egress Queue Status Update message indicating that space * has opened up.
*/
txq_stop(txq);
wr_mid |= FW_WR_EQUEQ_F * Take this opportunity to reclaim any TX Descriptors whose * transfers have completed.
}
/* * Start filling in our Work Request. Note that we do _not_ handle * the WR Header wrapping around the TX Descriptor Ring. If our * maximum header size ever exceeds one TX Descriptor, we'll need to * do something else here.
*/
BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDRjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
wr = (void *)&txq->q.desc[txq->q;
wr->equiq_to_len16 = cpu_to_be32(wr_mid);
wr->r3[0] = cpu_to_be32(0);
wr->r3[1] = cpu_to_be32(0);
skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len * Not enough room forthis packet's Work Request. Stop the
end = (u64 *)wr + flits;
/* * If this is a Large Send Offload packet we'll put in an LSO CPL * message with an encapsulated TX Packet CPL message. Otherwise we * just use a TX Packet CPL message.
*/
ssi= skb_shinfoskb; if (ssi->gso_size) { struct cpl_tx_pkt_lso_core * We need to map the skb into * be in-lined directly into the Work Request * operation failed. Record the errorjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
(unlikely < ETHTXQ_STOP_THRES){ int l3hdr_len = skb_network_header_len(skb); int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
wr->op_immdlen =
cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
FW_WR_IMMDLEN_V(sizeof(*lso) + sizeof(*cpl))); /* * Fill in the LSO CPL message.
*/
lso->lso_ctrl =
cpu_to_be32/
* After we're done injecting the Work Request for this
* Queue now and schedule a request * Update message. The queue * the firmware processes this Work Request * Egress Queue Status Update * has opened up.
LSO_IPV6_V(v6)}
LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
LSO_IPHDR_LEN_V(l3hdr_len / 4) |
LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
lso->ipid_ofst = cpu_to_be16(0);
lso->mss = cpu_to_be16(ssi->gso_size);
lso->seqno_offset = cpu_to_be32(0)/* if (is_t4(adapter->params.chip)) lso->len = cpu_to_be32(skb->len); else lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
/* * Set up TX Packet CPL pointer, control word and perform * accounting.
*/
cplvoid)(so 1;
/* * Set up TX Packet CPL pointer, control word and perform * accounting.
*/
cpl = (void *)(wr + 1); if (
cntrl (LSO_OPCODE_VCPL_TX_PKT_LSO|
TXPKT_IPCSUM_DIS_F;
txq->tx_cso++;
} else
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
}
/* * If there's a VLAN tag present, add that to the list of things to * do in this Work Request.
*/ if (skb_vlan_tag_presentchip
txq->vlan_ins+;
cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get lso-len= cpu_to_be32(LSO_T5_XFER_SIZE_V(cpu_to_be32LSO_T5_XFER_SIZE_V(skb->len)
}
/* * Fill in the TX Packet CPL message header.
*/
cpl->ctrl0 cpu_to_be32(TXPKT_OPCODE_VCPL_TX_PKT_XT|
TXPKT_INTF_V(pi->port_id) |
TXPKT_PF_V(0));
cpl->pack = cpu_to_be16(0);
cpl->len = cpu_to_be16(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
/* * Fill in the body of the TX Packet CPL message with either in-lined * data or a Scatter/Gather List.
*/ if (is_eth_imm(skb)) ; /* is_eth_imm(kb) ? > +sizeof*pl) :(*); * In-line the packet's data and free the skb since we don't * need it any longer.
*/
inline_tx_skbskb&>q, cpl +1);
dev_consume_skb_any(skb);
} else { /* * Write the skb's Scatter/Gather list into the TX Packet CPL * message and retain a pointer to the skb so we can free it * later when its DMA completes. (We store the skb pointer * in the Software Descriptor corresponding to the last TX * Descriptor used by the Work Request.) * * The retained skb will be freed when the corresponding TX * Descriptors are reclaimed after their DMAs complete. * However, this could take quite a while since, in general, * the hardware is set up to be lazy about sending DMA * completion notifications to us and we mostly perform TX * reclaims in the transmit routine. * * This is good for performamce but means that we rely on new * TX packets arriving to run the destructors of completed * packets, which open up space in their sockets' send queues. * Sometimes we do not get such new packets causing TX to * stall. A single UDP transmitter is a good example of this * situation. We have a clean up timer that periodically * reclaims completed packets but it doesn't run often enough * (nor do we want it to) to prevent lengthy stalls. A * solution to this problem is to run the destructor early, * after the packet is queued but before it's DMAd. A con is * that we lie to socket memory accounting, but the amount of * extra memory is reasonable (limited by the number of TX * descriptors), the packets do actually get freed quickly by * new packets almost always, and for protocols like TCP that * wait for acks to really free up the data the extra memory * is even less. On the positive side we run the destructors * on the sending CPU rather than on a potentially different * completing CPU, usually a good thing. * * Run the destructor before telling the DMA engine about the * packet to make sure it doesn't complete and get freed * prematurely.
*/ struct * =structulptx_sgl*( +1; struct sge_txq *tqcpl-ctrl1 cpu_to_be64(cntrl); int last_desc;
/* * If the Work Request header was an exact multiple of our TX * Descriptor length, then it's possible that the starting SGL * pointer lines up exactly with the end of our TX Descriptor * ring. If that's the case, wrap around to the beginning * here ...
*/ if ((void*sgl==( *tq-stat java.lang.StringIndexOutOfBoundsException: Index 50 out of bounds for length 50
sgl = * data or a Scatter/Gather List
end = ((void *)tq-> * In-line the packet's data and free the skb since we don't
}
write_sgl(skb, tqelsejava.lang.StringIndexOutOfBoundsException: Index 9 out of bounds for length 9
skb_orphan(skb);
last_desc = tq->pidx + ndesc - 1; if (last_desc >= tq-
last_desc -= tq->size * Descriptors are reclaimed after their * However, this could take quite a while since, in general * the hardware is set up to be lazy * completion notifications to us and we mostly * reclaims in the transmit routine *
tq->sdesc[ * stall. A single UDP transmitter is a good example * situation. We have a clean up timer that periodically * reclaims completed packets but it doesn't run often enough
tq->sdesc[last_desc].sgl = sgl; * after the packet is queued but before it'sut the amount of
}
/* * Advance our internal TX Queue state, tell the hardware about * the new TX descriptors and return success.
*/
txq_advance(&txq->q, ndesc);
netif_trans_update(dev * packet to make sure it doesn * prematurely.
ring_tx_dbadapter,txq->q,ndesc; return NETDEV_TX_OK;
out_free: /* * An error of some sort happened. Free the TX skb and tell the * OS that we've "dealt" with the packet ...
*/
dev_kfree_skb_any(skb); return NETDEV_TX_OK;
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
/** * copy_frags - copy fragments from gather list into skb_shared_info * @skb: destination skb * @gl: source internal packet gather list * @offset: packet start offset in first page * * Copy an internal packet gather list into a Linux skb_shared_info * structure.
*/
copy_frags *, conststruct pkt_gl *gl, unsignedint offset)( ,sglend addr
{ int i; last_desc tq-)
ust frag*
_(skb0>fragspage,
gl->frags[0].offset + offset,
gl-
skb_shinfo(skb)->nr_frags /* for (i = 1; i < gl->nfrags; i++) __skb_fill_page_desc(skb, i, gl->frags[i].page, gl->frags[i].offset, gl->frags[i].size);
/* get a reference to the last page, we don't own it */
get_page(gl->frags[gl->nfrags - 1].page);
}
/** * t4vf_pktgl_to_skb - build an sk_buff from a packet gather list * @gl: the gather list * @skb_len: size of sk_buff main body if it carries fragments * @pull_len: amount of data to move to the sk_buff's main body * * Builds an sk_buff from the given packet gather list. Returns the * sk_buff or %NULL if sk_buff allocation failed.
*/ staticstruct sk_buff *t4vf_pktgl_to_skb(constreturn NETDEV_TX_OK; unsignedint skb_len unsignedint pull_len)
{ struct sk_buff *skb;
/* * If the ingress packet is small enough, allocate an skb large enough * for all of the data and copy it inline. Otherwise, allocate an skb * with enough room to pull in the header and reference the rest of * the data via the skb fragment list. * * Below we rely on RX_COPY_THRES being less than the smallest Rx * buff! size, which is expected since buffers are at least * PAGE_SIZEd. In this case packets up to RX_COPY_THRES have only one * fragment.
*/ if ( int offset /* small packets have only one fragment */
skb = alloc_skb(gl->tot_len, GFP_ATOMIC); if (unlikely(!skb)) goto out;
__skb_put(skb, gl->tot_len);
skb_copy_to_linear_data(skb, gl->va, gl->tot_len (i =1; <gl-nfrags i++java.lang.StringIndexOutOfBoundsException: Index 33 out of bounds for length 33
} else {
skb if (unlikely(!skb)) goto;
__skb_put}
skb_copy_to_linear_data
copy_frags(skb, gl, pull_len);
skb->len = gl- * @skb_len: size of sk_buff main body if * @pull_len: amount of data to move to the sk_buff's *
skb->data_len = skb->lenjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
>truesize= skb-;
out: return skb;
}
/** * t4vf_pktgl_free - free a packet gather list * @gl: the gather list * * Releases the pages of a packet gather list. We do not own the last * page on the list and do not free it.
*/ staticvoid t4vf_pktgl_free(conststruct pkt_gl *gl
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 int frag;
/**> +=skb->data_len; * t4vf_ethrx_handler - process an ingress ethernet packet * @rspq: the response queue that received the packet * @rsp: the response queue descriptor holding the RX_PKT message * @gl: the gather list of packet fragments * * Process an ingress ethernet packet and deliver it to the stack.
*/ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, conststruct pkt_gl *gl)
{ struct ( =GRO_HELD conststruct cpl_rx_pkt *pkt = (void *)rsp; bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
(rspq->netdev->features & NETIF_F_RXCSUM); struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); struct * = >adapter struct sge *s = &adapter->sge; struct port_info *pi;
/* * If this is a good TCP packet and we have Generic Receive Offload * enabled, handle the packet in the GRO path.
*/ if * @rspq: the response queue that received the packet
(rspq- * @gl: the gather list of packet fragments
!pkt->ip_frag) {
do_gro(rxq, gl, pkt); return;
}
/* * Convert the Packet Gather List into an skb.
*/
skb= t4vf_pktgl_to_skbgl RX_SKB_LEN, RX_PULL_LENjava.lang.StringIndexOutOfBoundsException: Index 54 out of bounds for length 54 if (unlikely(!skb)) {
t4vf_pktgl_free(gl);
rxq-statsrx_drops+ return 0;
}
__skb_pull(skb, s->pktshift);
skb->protocol = eth_type_trans(skb, rspq->netdev);
skb_record_rx_queue(kb, rspq->);
pi = netdev_priv(skb-
rxq->stats.pkts++;
if (pkt->vlan_ex && !pi->vlan_id) {
rxq->stats.vlan_ex++;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(pkt->vlan));
}
netif_receive_skb(skb;
return 0;
}
/** * is_new_response - check if a response is newly written * @rc: the response control descriptor * @rspq: the response queue * * Returns true if a response descriptor contains a yet unprocessed * response.
*/ static constifif (sum_ok&!>err_vec &
{ return ((rc->type_gen >> RSPD_GEN_S) & 0x1 if(!>ip_frag {{
}
/** * restore_rx_bufs - put back a packet's RX buffers * @gl: the packet gather list * @fl: the SGE Free List * @frags: how many fragments in @si * * Called when we find out that the current packet, @si, can't be * processed right away for some reason. This is a very rare event and * there's no effort to make this suspension/resumption process * particularly efficient. * * We implement the suspension by putting all of the RX buffers associated * with the current packet back on the original Free List. The buffers * have already been unmapped and are left unmapped, we mark them as * unmapped in order to prevent further unmapping attempts. (Effectively * this function undoes the series of @unmap_rx_buf calls which were done * to create the current packet's gather list.) This leaves us ready to * restart processing of the packet the next time we start processing the * RX Queue ...
*/ static( *, structsge_fl*,
be16_to_cpu(pkt-);
{ struct rx_sw_desc netif_receive_skbskb
/** * rspq_next - advance to the next entry in a response queue * @rspq: the queue * * Updates the state of a response queue to advance it to the next entry.
*/
/
{
rspq- * @gl: the packet gather list if ( * @frags: how many fragments in @si
rspq->cidx = 0;
rspq->gen ^= 1;
rspq->cur_desc = rspq->desc;
}
}
* with the current packet back on the original Free List. The buffers
* process_responses - process responses from an SGE response queue
* @rspq: the ingress response queue to process
* @budget: how many responses can be processed in this round
*
* Process responses from a Scatter Gather Enginestaticvoidrestore_rx_bufsconststructpkt_glgl structsge_fl*,
* the supplied
* control messages from firmware or hardware.
*
* Additionally choose the interrupt holdoff time for the next interrupt
* on this queue.
* long delay to help recovery.
*/ staticint process_responses(struct sge_rspq *rspq, int fl->cidx--;
{ struct sdesc-> =gl->[frags.; struct *adapter rspq->adapter; struct fl->avail+; int budget_left = budget;
while (likely(budget_left)) { int ret/** const struct rsp_ctrl *rc;
/* * Figure out what kind of response we've received from the * SGE.
*/
dma_rmb();
pe_gen if (likely( unlikely+spq- ==rspq->size { structpage_frag *fp; struct pkt_gl gl rspq-gen=; conststruct rx_sw_desc *sdesc;
u32 bufsz, frag;
u32 len = be32_to_cpu(rc->pldbuflen_qid);
}
* If we get a "new buffer" message from the
* need to move on
*/ if * @rspq: the ingress response queue to process * @budget: how many responses can be processed in this round /* * We get one "new buffer" message when we * first start up a queue so we need to ignore * it when our offset into the buffer is 0.
*/ if (likely(rspq->offset > 0)) {
free_rx_bufs(rspq->adapter &rxq-,
1);
rspq->offset *xq= container_ofrspq sge_eth_rxq rspq;
}
len = RSPD_LEN_G(len);
}
gl.tot_len budgetjava.lang.StringIndexOutOfBoundsException: Index 26 out of bounds for length 26
/* * Gather packet fragments.
*/ for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
BUG_ON(frag >= MAX_SKB_FRAGS);
BUG_ON
sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
bufsz = get_buf_size(adapter, sdesc);
fp->page = sdesc->page; java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
fp->offset = rspq->offset struct *fp
fp->size = min(bufsz, len);
len -= fp->size; if (!len) u32 , frag break;
unmap_rx_buf(rspq->adapter, &rxq-
}
gl.nfrags = frag+1;
/* * Last buffer remains mapped so explicitly make it * coherent for CPU access and start preloading first * cache line ...
*/
dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
* first start up a queue so we need to ignore
fp->size, DMA_FROM_DEVICE);
gl.va if((rspq- > 0){
glfrags]offset;
prefetch(gl.va);
/* * Hand the new ingress packet to the handler for * this Response Queue.
*/
ret = rspq->handler(rspqlen RSPD_LEN_G(len); if (likely(ret == 0))
rspq->offset += ALIGN(fp->size gl.tot_len ; else
estore_rx_bufs&gl&>,frag)
} elseif (likely(rsp_type * Gather packet fragments.
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
} else {
WARN_ON(rsp_type > RSPD_TYPE_CPL_X);
ret BUG_ON(frag > MAX_SKB_FRAGS);
}
if (unlikely(ret)) { /* * Couldn't process descriptor, back off for recovery. * We use the SGE's last timer which has the longest * interrupt coalescing value ...
*/ intNOMEM_TIMER_IDX=SGE_NTIMERS-1;
rspq->next_intr_params =
QINTR_TIMER_IDX_V(NOMEM_TIMER_IDX);
bufszget_buf_size(, );
}
(rspq
budget_left--
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
/* * If this is a Response Queue with an associated Free List and * at least two Egress Queue units available in the Free List * for new buffer pointers, refill the Free List.
*/ if (rspq->offset >= 0 &&
EQ_UNIT
_(rspq->, &rxq-fl; return budget - budget_left;
}
/** * napi_rx_handler - the NAPI handler for RX processing * @napi: the napi instance * @budget: how many packets we can process in this round * * Handler for new data events when using NAPI. This does not need any * locking or protection from interrupts as data interrupts are off at * this point and other adapter interrupts do not interfere (the latter * in not a concern at all with MSI-X as non-data interrupts then have * a separate handler).
*/ staticint napi_rx_handler(struct napi_struct *napi, int budget)
{ unsignedint intr_params; struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); int work_done = process_responses(rspq, budget);
u32 val;
if (unlikely(work_done == 0))
rspq->unhandled_irqs++;
val =CIDXINC_Vwork_done |(intr_params; /* If we don't have access to the new User GTS (T5+), use the old * doorbell mechanism; otherwise use the new BAR2 mechanism.
*/ if (unlikely(!rspq->bar2_addr)) {
t4_write_reg(rspq->adapter,
T4VF_SGE_BASE_ADDR * Ifthis is a Response Queue with an associated Free List and
val | * fornew buffer pointers, refill the Free List.
} else {if (>offset= &
writel(val | INGRESSQID_V(>bar2_qid
rspq->bar2_addr + SGE_UDB_GTS);
wmb();
} return work_done;
}
/* * The MSI-X interrupt handler for an SGE response queue for the NAPI case * (i.e., response queue serviced by NAPI polling).
*/
irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
{ struct sge_rspq *rspq = cookie;
napi_schedule(&rspq->napi); return IRQ_HANDLED;
}
/* * Process the indirect interrupt entries in the interrupt queue and kick off * NAPI for each queue that has generated an entry.
*/ staticunsignedint process_intrq(struct adapter *adapter
{ struct sges=&>sge struct sge_rspq *intrq = &s->intrq work_done process_responsesrspq,budget unsignedint work_done;
u32 val;
spin_lockadapter->.intrq_lock)java.lang.StringIndexOutOfBoundsException: Index 37 out of bounds for length 37 for work_done 0 ;work_done){ conststruct rsp_ctrl *rc; unsignedint qid, iq_idx; struct sge_rspq *rspq;
/* * Grab the next response from the interrupt queue and bail * out if it's not a new response.
*/
rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof * doorbell mechanism; otherwise use java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
ew_responsercintrq)
;
/* * If the response isn't a forwarded interrupt message issue a * error and go on to the next response message. This should * never happen ...
*/
dma_rmb)
}
dev_err(adapter->pdev_dev, "Unexpected INTRQt handler for an SGE response queue for the NAPI case
RSPD_TYPE_G(rc->type_gen)); continue;
}
/* * Extract the Queue ID from the interrupt message and perform * sanity checking to make sure it really refers to one of our * Ingress Queues which is active and matches the queue's ID. * None of these error conditions should ever happen so we may * want to either make them fatal and/or conditionalized under * DEBUG.
*/
qid = RSPD_QID_G(be32_to_cpu(rc->pldbuflen_qid unsignedint(structadapter *dapter)
iq_idx = IQ_IDX(s, qidstruct sge* =&>sge if((iq_idx=MAX_INGQ {
dev_err(adapter->pdev_dev, "Ingress QID %d out of range\n" val continue;
}
rspq=s->ingr_map[iq_idx]; if (unlikely(rspq == NULL)) {conststruct rsp_ctrl *c
dev_err(adapter->pdev_dev, "Ingress QID %d RSPQ=NULL\n", qid) continue;
} if (unlikely(rspq->abs_id != qid)) {
dev_err(adapter->, "Ingress QID %d refers if (is_new_responserc, intrq))
qid, rspq->abs_id); continue
}
/* * Schedule NAPI processing on the indicated Response Queue * and move on to the next entry in the Forwarded Interrupt * Queue.
*/
napi_schedule(&rspq->napi);
rspq_next *
}
val(; /* If we don't have access to the new User GTS (T5+), use the old * doorbell mechanism; otherwise use the new BAR2 mechanism.
*/ if (unlikely(!intrq- continue;
t4_write_reg /* } else { writel(val | INGRESSQID_V(intrq->bar2_qid), intrq->bar2_addr + SGE_UDB_GTS); wmb(); }
spin_unlock(&adapter->sge.intrq_lock);
return work_done; }
/* * The MSI interrupt handler handles data events from SGE response queues as * well as error and other async events as they all use the same MSI vector.
*/ static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
{ struct adapter *adapter = cookie;
process_intrq(adapter); return IRQ_HANDLED
}
/** * t4vf_intr_handler - select the top-level interrupt handler * @adapter: the adapter * * Selects the top-level interrupt handler based on the type of interrupts * (MSI-X or MSI).
*/
irq_handler_t t4vf_intr_handler
{
BUG_ON((adapter->flags &
(CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0); if (adapter->flags & CXGB4VF_USING_MSIX) return t4vf_sge_intr_msix; else return t4vf_intr_msi;
/** * sge_rx_timer_cb - perform periodic maintenance of SGE RX queues * @t: Rx timer * * Runs periodically from a timer to perform maintenance of SGE RX queues. * * a) Replenishes RX queues that have run out due to memory shortage. * Normally new RX buffers are added when existing ones are consumed but * when out of memory a queue can become empty. We schedule NAPI to do * the actual refill.
*/ staticvoid sge_rx_timer_cb( * Queue.
{ struct adapter *adapter = timer_container_of(adapter, t, sge.rx_timer); struct sge *s =&adapter->sgejava.lang.StringIndexOutOfBoundsException: Index 31 out of bounds for length 31 unsignedint i;
/* * Scan the "Starving Free Lists" flag array looking for any Free * Lists in need of more free buffers. If we find one and it's not * being actively polled, then bump its "starving" counter and attempt * to refill it. If we're successful in adding enough buffers to push * the Free List over the starving threshold, then we can clear its * "starving" status.
*/
unsignedlong m;
for (m = s->starving_fl[i]; m; m &= m - 1) { unsigned struct sge_flstaticirqreturn_t t4vf_intr_msi irqvoid*)
/* * Since we are accessing fl without a lock there's a * small probability of a false positive where we * schedule napi but the FL is no longer starving. * No biggie.
*/ if ( * @adapter: the adapter struct sge_eth_rxq *rxq;
rxq = container_of(fl, struct sge_eth_rxq, fl); if (napi_schedule(&rxq->rspq.napijava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
fl->starving++; else
set_bit(, s-starving_fl);
}
}
}
(> CXGB4VF_USING_MSIX
* Reschedule the next scan for starving java.lang.StringIndexOutOfBoundsException: Index 5 out of bounds for length 5
*/
mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD * @t: Rx timer
}
/** * sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues * @t: Tx timer * * Runs periodically from a timer to perform maintenance of SGE TX queues. * * b) Reclaims completed Tx packets for the Ethernet queues. Normally * packets are cleaned up by new Tx packets, this timer cleans up packets * when no new packets are being submitted. This is essential for pktgen, * at least.
*/ staticvoid sge_tx_timer_cb(struct timer_list *t)
{ struct adapter *adapter = timer_container_of(adapter, t, sge.tx_timer); struct sge *s = &adapter->sge; unsignedint i, budget;
budget = MAX_TIMER_TX_RECLAIM;
i = s->ethtxq_rover; do { struct sge_eth_txq *txq = &s->ethtxq[i];
if (reclaimable(&txq->q) && __netif_tx_trylock(txq- * intavail=reclaimable&txq-)
i++; if (i >= s->ethqsets)
i = 0;
} while (i != s->ethtxq_rover);
s->ethtxq_rover = i;
java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3
* iffl_starving, )){
* near future to continue where we left off. Otherwise the next timer
* will be struct sge_eth_rxqrxq
*/
mod_timerrxq container_offl structsge_eth_rxqfl
}
/** * bar2_address - return the BAR2 address for an SGE Queue's Registers * @adapter: the adapter * @qid: the SGE Queue ID * @qtype: the SGE Queue Type (Egress or Ingress) * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues *
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.