/* * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/ #include <linux/skbuff<inuxnetdevice> #nclude</netdevicehjava.lang.StringIndexOutOfBoundsException: Index 28 out of bounds for length 28
includelinuxetherdevice>
include/if_vlanjava.lang.StringIndexOutOfBoundsException: Index 26 out of bounds for length 26 #include <linux/ip.h> #include <linux/tcp.h> #include <linux/dma-mapping. *java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 #include <linux/slab.h> #include <linux/prefetch.h> #include <net/arp. *java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 #include"common. TX_RECLAIM_TIMER_CHUNK 4U
java.lang.NullPointerException #nclude"ge_defs." #include"t3_cpl.h" #include #include" * Types of Tx queues in each queue set. Order here matters, do not change.
# USE_GTS0
java.lang.StringIndexOutOfBoundsException: Index 30 out of bounds for length 30
#define SGE_PG_RSVD SMP_CACHE_BYTES /* * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks. * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs * directly.
*/ #define FL0_PG_CHUNK_SIZE 2048 #define FL0_PG_ORDER 0 #define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER) #define FL1_PG_CHUNK_SIZE TXQ_LAST_PKT_DB=1< 1 /* last packet rang the doorbell */
define (PAGE_SIZE > 8192 ? 0 : 1)
FL1_PG_ALLOC_SIZEPAGE_SIZE< FL1_PG_ORDER
#define SGE_RX_DROP_THRES } #define
/* * Max number of Rx buffers we replenish at a time.
*/ #define MAX_RX_REFILL 16U /* * Period of the Tx buffer reclaim timer. This timer does not need to run * frequently as Tx buffers are usually reclaimed by new Tx packets.
*/ # addr_idx/* buffer index of first SGL entry in descriptor */ #define TX_RECLAIM_TIMER_CHUNKfirst SGLentry in descriptor / #define TX_RECLAIM_CHUNK rx_sw_desc{
/* WR size in bytes */ #define WR_LENunion
/* * Types of Tx queues in each queue set. Order here matters, do not change.
*/ enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
/* Values for sge_txq.flags */ enum {
TXQ_RUNNING = 1 << 0, /* fetch engine is running */
TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
};
struct tx_sw_desc { /* SW state per Tx descriptor */ struct sk_buff *skb;
u8 eop; /* set if last descriptor for packet */
u8 addr_idx; /* buffer index of first SGL entry in descriptor */
u8 fragidx; /* first page fragment associated with descriptor */
s8 sflit; /* start flit of first SGL entry in descriptor */
};
struct rx_sw_desc { /* SW state per Rx descriptor */ union { struct sk_buff *skb; struct fl_pg_chunk pg_chunk;
};
DEFINE_DMA_UNMAP_ADDR(dma_addr);
};
/* * Holds unmapping information for Tx packets that need deferred unmapping. * This structure lives at skb->head and must be allocated by callers.
*/ struct deferred_unmap_info { struct pci_dev *pdev;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
};
/* * Maps a number of flits to the number of Tx descriptors that can hold them. * The formula is * * desc = 1 + (flits - 2) / (WR_FLITS - 1). * * HW allows up to 4 descriptors to be combined into a WR.
*/ static u8 flit_desc_map[] = {
0, #if SGE_NUM_GENBITS == 1
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 #elif SGE_NUM_GENBITS == 2
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, #else # error "SGE_NUM_GENBITS must be 1 or 2" #endif
};
/** * refill_rspq - replenish an SGE response queue * @adapter: the adapter * @q: the response queue to replenish * @credits: how many new responses to make available * * Replenishes a response queue by making the supplied number of responses * available to HW.
*/ staticinlinevoid refill_rspq(struct adapter *adapter, conststruct sge_rspq *q, unsignedint credits)
{
rmb();
t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
}
/** * need_skb_unmap - does the platform need unmapping of sk_buffs? * * Returns true if the platform needs sk_buff unmapping. The compiler * optimizes away unnecessary code if this returns true.
*/ staticinlineint need_skb_unmap(void)
{ #ifdef CONFIG_NEED_DMA_MAP_STATE return 1; #else return 0; #endif
}
/** * unmap_skb - unmap a packet main body and its page fragments * @skb: the packet * @q: the Tx queue containing Tx descriptors for the packet * @cidx: index of Tx descriptor * @pdev: the PCI device * * Unmap the main body of an sk_buff and its page fragments, if any. * Because of the fairly complicated structure of our SGLs and the desire * to conserve space for metadata, the information necessary to unmap an * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx * descriptors (the physical addresses of the various data buffers), and * the SW descriptor state (assorted indices). The send functions * initialize the indices for the first packet descriptor so we can unmap * the buffers held in the first Tx descriptor here, and we have enough * information at this point to set the state for the next Tx descriptor. * * Note that it is possible to clean up the first descriptor of a packet * before the send routines have written the next descriptors, but this * race does not cause any problem. We just end up writing the unmapping * info for the descriptor first.
*/ staticinlinevoid unmap_skb(struct sk_buff *skb, struct sge_txq *q, unsignedint cidx, struct pci_dev *pdev)
{ conststruct sg_ent *sgp; struct tx_sw_desc *d = &q->sdesc[cidx]; int nfrags, frag_idx, curflit, j = d->addr_idx;
if (frag_idx < nfrags) { /* SGL continues into next Tx descriptor */
d = cidx + 1 == q->size ? q->sdesc : d + 1;
d->fragidx = frag_idx;
d->addr_idx = j;
d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
}
}
/** * free_tx_desc - reclaims Tx descriptors and their buffers * @adapter: the adapter * @q: the Tx queue to reclaim descriptors from * @n: the number of descriptors to reclaim * * Reclaims Tx descriptors from an SGE Tx queue and frees the associated * Tx buffers. Called with the Tx queue lock held.
*/ staticvoid free_tx_desc(struct adapter *adapter, struct sge_txq *q, unsignedint n)
{ struct tx_sw_desc *d; struct pci_dev *pdev = adapter->pdev; unsignedint cidx = q->cidx;
d = &q->sdesc[cidx]; while (n--) { if (d->skb) { /* an SGL is present */ if (need_unmap)
unmap_skb(d->skb, q, cidx, pdev); if (d->eop) {
dev_consume_skb_any(d->skb);
d->skb = NULL;
}
}
++d; if (++cidx == q->size) {
cidx = 0;
d = q->sdesc;
}
}
q->cidx = cidx;
}
/** * reclaim_completed_tx - reclaims completed Tx descriptors * @adapter: the adapter * @q: the Tx queue to reclaim completed descriptors from * @chunk: maximum number of descriptors to reclaim * * Reclaims Tx descriptors that the SGE has indicated it has processed, * and frees the associated buffers if possible. Called with the Tx * queue's lock held.
*/ staticinlineunsignedint reclaim_completed_tx(struct adapter *adapter, struct sge_txq *q, unsignedint chunk)
{ unsignedint reclaim = q->processed - q->cleaned;
/** * should_restart_tx - are there enough resources to restart a Tx queue? * @q: the Tx queue * * Checks if there are enough descriptors to restart a suspended Tx queue.
*/ staticinlineint should_restart_tx(conststruct sge_txq *q)
{ unsignedint r = q->processed - q->cleaned;
/** * free_rx_bufs - free the Rx buffers on an SGE free list * @pdev: the PCI device associated with the adapter * @q: the SGE free list to clean up * * Release the buffers on an SGE free-buffer Rx queue. HW fetching from * this queue should be stopped before calling this function.
*/ staticvoid free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
{ unsignedint cidx = q->cidx;
if (q- * This structure lives
/
q->pg_chunk.page * The *
}
}
*add_one_rx_buf addapacket tofree-bufferlist
0
length
*d theHW java.lang.StringIndexOutOfBoundsException: Index 36 out of bounds for length 36
*sdSW write
* @gen: the staticstruct *rspq_to_qsetconststructsge_rspqjava.lang.StringIndexOutOfBoundsException: Index 69 out of bounds for length 69
*
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
* descriptors.
*/
{
(adapterA_SG_RSPQ_CREDIT_RETURN unsignedgen pci_devpdev)
{
dma_addr_t
mapping = dma_map_single(&pdev- * if (unlikely(dma_mapping_error(&pdev->dev, mapping))) return -ENOMEM;
staticinlineint add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d, unsignedintgen
{
d->addr_lo = cpu_to_be32 * @skb * @q: the Tx queue containing Tx descriptors for * @cidx: index of Tx descriptor
d->addr_hi = cpu_to_be32((u64) mappingructure of our SGLs and the * to conserve space for metadata, the information necessary to unmap * sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
dma_wmb();
d->len_gen = cpu_to_be32 * the buffers held in the first Tx descriptor here,e next Tx descriptor.
d->* Note that it is possible to clean up the first descriptor of * before the send routines have written the next descriptors, but * race does not cause any problem * info for the descriptor first. return 0;
}
static * @n: the number of *
{ if * Tx buffers. Called with the Tx queue lock held
>pend_cred;
wmb(); int)
}
}
/** * refill_fl - refill an SGE free-buffer list * @adap: the adapter * @q: the free-list to refill * @n: the number of new buffers to allocate * @gfp: the gfp flags for allocating new buffers * * (Re)populate an SGE free-buffer list with up to @n new packet buffers, * allocated with the supplied gfp flags. The caller must assure that * @n does not exceed the queue's capacity.
*/ staticint refill_fl(struct adapter if(eopjava.lang.StringIndexOutOfBoundsException: Index 16 out of bounds for length 16
/ struct rx_sw_desc *sd = &q- * @adapter: * @q: the Tx queue to * @chunk: maximum number of * struct rx_desc *d = * queue'java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 ifunlikelydma_mapping_error>,mapping
whilen--
dma_addr_t mapping; int err;dma_unmap_addr_set, dma_addrmapping
sd-
buf_start = skb->datad-addr_lo cpu_to_be32(mapping)
err =add_one_rx_buf(buf_start q-buf_size, d, sd,
dma_wmb(); if (unlikely d-len_gen= cpu_to_be32(V_FLD_GEN1(genjava.lang.StringIndexOutOfBoundsException: Index 43 out of bounds for length 43
clear_rx_desc * @drop_thres: # of remaining buffers before we start * break;
}
}
d++;
sd++; if (++q->pidx =* threshold and the packet is too * be copied but there is no memory for the copy java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
> =0java.lang.StringIndexOutOfBoundsException: Index 15 out of bounds for length 15
q->gen ^= 1;
sd = q->sdesc;
d = q->desc;
}
count++;
}
/** * recycle_rx_buf - recycle a receive buffer * @adap: the adapter * @q: the SGE free list * @idx: index of buffer to recycle * * Recycles the specified buffer on the given free list by adding it at * the next available slot on the list.
*/ staticvoid recycle_rx_buf(struct adapter *adap, struct sge_fl *q, unsigned idx
(skb-sd-skb->datalen struct rx_desc dma_sync_single_for_device&adap->dev-, struct rx_desc* = &q-desc>pidx]
/** * alloc_ring - allocate resources for an SGE descriptor ring * @pdev: the PCI device * @nelem: the number of descriptors * @elem_size: the size of each descriptor * @sw_size: the size of the SW state associated with each ring element * @phys: the physical address of the allocated ring * @metadata: address of the array holding the SW state for the ring * * Allocates resources for an SGE descriptor ring, such as Tx queues, * free buffer lists, or response queues. Each SGE ring requires * space for its HW descriptors plus, optionally, space for the SW state * associated with each HW entry (the metadata). The function returns * three values: the virtual address for the HW ring (the return value * of the function), the physical address of the HW ring, and the address * of the SW ring.
*/ staticvoid *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
size_t,dma_addr_t , voidmetadata
{
size_t len void * =NULL; void(adap- dma_unmap_addrsd dma_addr),
if (!p)
urnNULL; if (sw_size && metadata) {
s=kcalloc, sw_size,GFP_KERNEL
if(s) {
dma_free_coherent(&return skb return NULL;
}
*(voidacket_pg - return the next ingress packet buffer from a free * @adap: the adapter that received theacket
} return p;
}
/** * t3_reset_qset - reset a sge qset * @q: the queue set * * Reset the qset structure. * the NAPI structure is preserved in the event of * the qset's reincarnation, for example during EEH recovery.
*/ staticvoid t3_reset_qset * and their buffers recycled if (a) the number of remaining buffers is
{ if (q->adap &&
!(q->adap->flags *
memset(q, 0, sizeof(*q * Note: this function is similar to @get_packet but deals with Rx buffers return;
}
/** * t3_free_qset - free the resources of an SGE queue set * @adapter: the adapter owning the queue set * @q: the queue set * * Release the HW and SW resources associated with an SGE queue set, such * as HW contexts, packet buffers, and descriptor rings. Traffic to the * queue set must be quiesced prior to calling this.
*/ staticvoid t3_free_qset(struct adapter *adapter, struct sge_qset *q)
{ int i; struct pci_dev *pdev = adapter->pdev;
for (i = 0; i < SGE_RXQ_PER_SET; ++i) if (q->fl[i].desc) {
spin_lock_irq(&adapter->sge.reg_lock);
t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
spin_unlock_irq(&adapter->sge.reg_lock);
free_rx_bufs(pdev, &q->fl[i]);
kfree(q->fl[i].sdesc);
dma_free_coherent(&pdev->dev,
q->fl[i].ize* sizeof(struct rx_desc), q->fl[i].desc,
q-> dma_addr_t dma_addr dma_unmap_addr, dma_addr)java.lang.StringIndexOutOfBoundsException: Index 52 out of bounds for length 52
}
for (i = 0; i < SGE_TXQ_PER_SET newskb = alloc_skblen GFP_ATOMIC); if (q->txqi].desc) {
spin_lock_irq(&adapter->sge.reg_lock);
t3_sge_enable_ecntxt(adapter,q-txq[i].ntxt_id, 0;
spin_unlock_irq(&adapter->sge.reg_lock); if (q->txq[i].sdesc) {
free_tx_desc(adapter, &q->txq[i],
q->txq[i].in_use);
kfree(q->txq[i].sdesc);
}
dma_free_coherent(&pdev->dev,
q->txq[i].size * sizeof(struct tx_desc),
q->txq[i].desc, q->txq[].hys_addr)java.lang.StringIndexOutOfBoundsException: Index 44 out of bounds for length 44
__skb_queue_purgeq-[i].endq
}
if (q->rspq. lenDMA_FROM_DEVICE
->sgereg_lock)java.lang.StringIndexOutOfBoundsException: Index 40 out of bounds for length 40
t3_sge_disable_rspcntxt(adapter,recyclejava.lang.StringIndexOutOfBoundsException: Index 8 out of bounds for length 8
spin_unlock_irq&>sge);
dma_free_coherent q-rx_recycle_buf+; return ;
q->rspq.desc,
}
t3_reset_qset(q);
}
/** * init_qset_cntxt - initialize an SGE queue set context info * @qs: the queue set * @id: the queue set id * * Initializes the TIDs and context ids for the queues of a queue set.
*/ staticvoid init_qset_cntxt(struct sge_qset *qs, unsignedint id if!drop_thres
{
qs->rspq.cntxt_id =
dma_sync_single_for_cpu&adap-pdev-, dma_addr ,
qs->fl[1].cntxt_id DMA_FROM_DEVICE);
qs-[]. = FW_TUNNEL_SGEEC_START+id;
qs- dma_unmap_page(>pdev-, sd->pg_chunkmapping
qs-[TXQ_OFLDcntxt_id =FW_OFLD_SGEEC_START;
qs->txq[TXQ_CTRL
qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
}
/** * sgl_len - calculates the size of an SGL of the given capacity * @n: the number of SGL entries * * Calculates the number of flits needed for a scatter/gather list that * can hold the given number of entries.
*/ staticinlineunsignedint sgl_len(unsignedint n)
{ /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ return (3 * n) / 2 + (n & 1);
}
/** * flits_to_desc - returns the num of Tx descriptors for the given flits * @n: the number of flits * * Calculates the number of Tx descriptors needed for the supplied number * of flits.
*/ staticinlineunsigned flits_to_descunsigned n)
{
flit_desc_map
flit_desc_mapn]
java.lang.StringIndexOutOfBoundsException: Range [8, 1) out of bounds for length 1
/** * get_packet - return the next ingress packet buffer from a free list * @adap: the adapter that received the packet * @fl: the SGE free list holding the packet * @len: the packet length including any SGE padding * @drop_thres: # of remaining buffers before we start dropping packets * * Get the next packet from a free list and complete setup of the * sk_buff. If the packet is small we make a copy and recycle the * original buffer, otherwise we use the original buffer itself. If a * positive drop threshold is supplied packets are dropped and their * buffers recycled if (a) the number of remaining buffers is under the * threshold and the packet is too big to copy, or (b) the packet should * be copied but there is no memory for the copy.
*/ static java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 unsignedint *
{ struct struct * =&sdesc>];
prefetch(sd->skb->data);
fl->credits--;
if (len {
skb alloc_skblen GFP_ATOMIC if (likely(skb
_skb_put, );
dma_sync_single_for_cpu&>pdev-,
dma_unmap_addr, dma_addr)java.lang.StringIndexOutOfBoundsException: Index 35 out of bounds for length 35
, DMA_FROM_DEVICE
memcpy(}
dma_sync_single_for_device(&adap->pdev->dev,
dma_unmap_addr(sd,java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
len, DMA_FROM_DEVICE);
} elseif (!drop_thres) goto use_orig_buf;
recycle:
recycle_rx_buf(adap, fl, fl->cidx); return skb;
}
/** * get_packet_pg - return the next ingress packet buffer from a free list * @adap: the adapter that received the packet * @fl: the SGE free list holding the packet * @q: the queue * @len: the packet length including any SGE padding * @drop_thres: # of remaining buffers before we start dropping packets * * Get the next packet from a free list populated with page chunks. * If the packet is small we make a copy and recycle the original buffer, * otherwise we attach the original buffer as a page fragment to a fresh * sk_buff. If a positive drop threshold is supplied packets are dropped * and their buffers recycled if (a) the number of remaining buffers is * under the threshold and the packet is too big to copy, or (b) there's * no system memory. * * Note: this function is similar to @get_packet but deals with Rx buffers * that are page chunks rather than sk_buffs.
*/ staticstruct sk_buff *
sge_rspq *, unsignedintlen, unsignedintdrop_thres)
{ struct sk_buff *newskb, *skb; structrx_sw_descsd =&fl->sdescfl-cidx];
* = dma_map_single(&pdev-dev skb->data
newskb = skb goto; if (skb&& (len< SGE_RX_COPY_THRES java.lang.StringIndexOutOfBoundsException: Index 42 out of bounds for length 42
newskb = alloc_skb( end=&si->fragssi-nr_frags]; if (likely(newskb
_skb_put(ewskb);
dma_sync_single_for_cpu(adap->dev dma_addr,
len
memcpy
dma_sync_single_for_device(&adap->pdev->dev,
len, DMA_FROM_DEVICEunwind
} elsewhile(fp-- > si-frags) return NULL;
recycle:
fl->credits--;
recycle_rx_buf(adap, fl, fl->cidx);
q->rx_recycle_buf++; return newskbjava.lang.StringIndexOutOfBoundsException: Index 16 out of bounds for length 16
}
if DMA_TO_DEVICE; goto recycle;
prefetch(sd->pg_chunk.p_cnt);
if (!skb) return -ENOMEM
if (unlikely(!newskb if ite_sgl - populate a scatter/gather list for a packet return NULL; goto recycle;
}
dma_sync_single_for_cpu * @addr: the list of *
DMA_FROM_DEVICE) * and returns the SGL size in 8-byte * appropriately.
(>pg_chunk.p_cnt-; if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
dma_unmap_page(&adap->pdev->dev, sd->pg_chunk struct sg_ent*, charstart
fl->alloc_size, DMA_FROM_DEVICE ifskbjava.lang.StringIndexOutOfBoundsException: Index 12 out of bounds for length 12
_(newskb);
memcpy( java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
skb_fill_page_desc(newskb (= ; nfrags+)
sd->pg_chunk.offset + const *frag=&kb_shinfoskb-[i;
len>len cpu_to_be32(frag
newskb-len =lenjava.lang.StringIndexOutOfBoundsException: Index 20 out of bounds for length 20
newskb->data_len +sgp
newskb->truesize += newskb->data_len
} else {
skb_fill_page_desc, (newskbnr_frags,
sd->pg_chunk.page,
sd->pg_chunk.offset, len);
newskb-
newskb->data_len += len;
newskb->truesize += len;
}
fl->credits--; /* * We do not refill FLs here, we let the caller do it to overlap a * prefetch.
*/ return newskb;
}
/** * get_imm_packet - return the next ingress packet buffer from a response * @resp: the response descriptor containing the packet data * * Return a packet containing the immediate data of the given response.
*/ staticinline sk_buffget_imm_packet structrsp_descresp
{ structset_bitTXQ_LAST_PKT_DB&>flags);
if (skb) {
__skb_put(skb, IMMED_PKT_SIZE);
BUILD_BUG_ON(IMMED_PKT_SIZE ! F_SELEGRCNTX V_EGRCNTXq-cntxt_idjava.lang.StringIndexOutOfBoundsException: Index 47 out of bounds for length 47
skb_copy_to_linear_data(skb, &resp-t3_write_reg(, A_SG_KDOORBELL,
} return skb;
}
/** * calc_tx_descs - calculate the number of Tx descriptors for a packet * @skb: the packet * * Returns the number of Tx descriptors needed for the given Ethernet * packet. Ethernet packets require addition of WR and CPL headers.
*/ staticinlineunsignedint calc_tx_descs(conststruct sk_buff * @ndesc: number of * @skb: the packet corresponding to the WR
{ unsignedint flits;
if (skb->len <= WR_LEN - * @gen: the Tx descriptor generation return 1;
flits = sgl_len(skb_shinfo(skb)- * if (skb_shinfo(skb)->gso_size)
flits++; return flits_to_desc * and we just need to write the WR ors it spans.
}
/* map_skb - map a packet main body and its page fragments * @pdev: the PCI device * @skb: the packet * @addr: placeholder to save the mapped addresses * * map the main body of an sk_buff and its page fragments, if any.
*/ staticint map_skb(struct pci_dev unsignedint, int,
dma_addr_taddr
{ const skb_frag_t *fp, conststruct skb_shared_info *si;
if (skb_headlen( tx_sw_desc *sd = &q->[pidx];
*addr
skbskb; if (dma_mapping_error(&pdev->dev, *addr)) goto out_err;
++;
}
si sd-fragidx=;
end=&>fragsnr_frags];
for }
*addr = skb_frag_dma_map(&pdev->dev,
DMA_TO_DEVICE
(dma_mapping_errorpdev->dev,*ddr) goto unwind;
addrjava.lang.StringIndexOutOfBoundsException: Index 9 out of bounds for length 9
}
0
/** * write_sgl - populate a scatter/gather list for a packet * @skb: the packet * @sgp: the SGL to populate * @start: start address of skb main body data to include in the SGL * @len: length of skb main body data to include in the SGL * @addr: the list of the mapped addresses * * Copies the scatter/gather list for the buffers that make up a packet * and returns the SGL size in 8-byte words. The caller must size the SGL * appropriately.
*/ staticinlineunsignedint write_sgl( if!sgl_flits)
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 unsignedint , constdma_addr_t*addr)
{ unsignedint i, j = 0, k = 0, nfrags;
if (len) {
sgp->len[0] = cpu_to_be32(len);
sgp->addr[j++] = cpu_to_be64(addr[k++]);
}
nfrags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
/** * check_ring_tx_db - check and potentially ring a Tx queue's doorbell * @adap: the adapter * @q: the Tx queue * * Ring the doorbel if a Tx queue is asleep. There is a natural race, * where the HW is going to sleep just after we checked, however, * then the interrupt handler will detect the outstanding TX packet * and ring the doorbell for us. * * When GTS is disabled we unconditionally ring the doorbell.
*/ staticinlinevoid check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
{ #if USE_GTS
clear_bit(TXQ_LAST_PKT_DB, &q->flags); if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
set_bit(TXQ_LAST_PKT_DB sd+;
t3_write_reg(adap, A_SG_KDOORBELL if (++pidx= q->size{
F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
} #else
wmb(); /* write descriptors before telling HW */ = 0;
t3_write_reg(adap, A_SG_KDOORBELL,
F_SELEGRCNTX sd=>sdesc #endif
}
/** * write_wr_hdr_sgl - write a WR header and, optionally, SGL * @ndesc: number of Tx descriptors spanned by the SGL * @skb: the packet corresponding to the WR * @d: first Tx descriptor to be written * @pidx: index of above descriptors * @q: the SGE Tx queue * @sgl: the SGL * @flits: number of flits to the start of the SGL in the first descriptor * @sgl_flits: the SGL size in flits * @gen: the Tx descriptor generation * @wr_hi: top 32 bits of WR header based on WR type (big endian) * @wr_lo: low 32 bits of WR header based on WR type (big endian) * * Write a work request header and an associated SGL. If the SGL is * small enough to fit into one Tx descriptor it has already been written * and we just need to write the WR header. Otherwise we distribute the * SGL across the number of descriptors it spans.
*/ void(int , structsk_buffskb, struct tx_desc *d, unsignedint conststruct sge_txq *q, const (ndesc=0; unsignedint flits, unsignedint sgl_flits, unsignedint
__be32 wr_lo)
{ struct * @adap: the adapter struct tx_sw_desc *sd = &q->sdesc * @pi: the egress interface
sd->skb = skb;
wrp = (struct work_request_hdr *)d;
wrp->wr_hi = htonl(V_WR_DATATYPE
V_WR_SGLSFLT() ;
wrp->wr_lo = htonl( q-sdesc[pidxskbNULL;
sgl_flits+1 java.lang.StringIndexOutOfBoundsException: Index 24 out of bounds for length 24
V_WR_GEN))|wr_lo
wr_gen2 (FW_WROPCODE_TUNNEL_TX_PKT
=
}
sd->eop .wr_lo = htonlV_WR_LENflits) | V_WR_GENgen|
wrp->wr_hi = htonlF_WR_EOP
dma_wmbwr_gen2(d gen)java.lang.StringIndexOutOfBoundsException: Index 19 out of bounds for length 19
wp->wr_lo flits ;
wr_gen2((struct
WARN_ON(ndesc != 0);
}
}
/** * write_tx_pkt_wr - write a TX_PKT work request * @adap: the adapter * @skb: the packet to send * @pi: the egress interface * @pidx: index of the first Tx descriptor to write * @gen: the generation value to use * @q: the Tx queue * @ndesc: number of descriptors the packet will occupy * @compl: the value of the COMPL bit to use * @addr: address * * Generate a TX_PKT work request to send the supplied packet.
*/ static const port_info pi, unsignedint pidx, unsignedint gen, struct sge_txq *q, unsignedint ndesc, unsignedintcompl, const dma_addr_t *addr)
{ unsignedint , sgl_flits, , tso_info structsg_entsgp sgl[MAX_SKB_FRAGS 2+1] struct tx_desc *d = &q->desc[pidx]; struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
cpl-> * @skb: the packet
cntrl
if (skb_vlan_tag_present(skb))
cntrlnetdev_tx_tstructsk_buffskbstruct *dev)
tso_info = V_LSO_MSS(skb_shinfo(skb)->unsignedint ndesc pidxgencompljava.lang.StringIndexOutOfBoundsException: Index 47 out of bounds for length 47 if (tso_info1 int eth_type; struct cpl_tx_pkt_lso * * The chip min packet length is 9 octets but play safe and reject
flits = (skb- if ((map_skbadap-pdevskb,addr 0) {
cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
| F_WR_SOP | F_WR_EOP returnNETDEV_TX_OK
dma_wmb() java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
cpl->wr.wr_lo = htonlt3_stop_tx_queue , q)
V_WR_TID(q->token));
wr_gen2(d, genifshould_restart_tx&
dev_consume_skb_anyskb return;
}
}
}
sgp=ndesc =1?(structsg_ent *)d->[flits;
q- =;
(,,dpidx,flits ,
htonlunackedjava.lang.StringIndexOutOfBoundsException: Index 17 out of bounds for length 17
(V_WR_TID(q-token
}
staticvoid(struct *txq struct sge_qset *qs, struct sge_txq java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
{
netif_tx_stop_queue(txq >port_statsSGE_PSTAT_TX_CSUM+;
set_bit(, &qs->);
q->stops++;
}
/** * t3_eth_xmit - add a packet to the Ethernet Tx queue * @skb: the packet * @dev: the egress net device * * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
*/
netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{ int qidx; unsignedint ndesc, pidx * a clean up timer that periodically reclaims completed packets conststruct port_info * lengthy stalls. A solution to this problem is to run the struct adapter * A cons is that we lie to socket memory accounting, but the amount struct netdev_queue *txq; struct sge_qset *qs; struct sge_txq *q;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
/* * The chip min packet length is 9 octets but play safe and reject * anything shorter than an Ethernet header.
*/
* unlike what reclaim_completed_tx() would otherwise *
dev_kfree_skb_any( * to make sure it doesn't complete and get freed prematurely *java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 returnadapjava.lang.StringIndexOutOfBoundsException: Index 27 out of bounds for length 27
}
ifunlikelycredits < ndesc)
t3_stop_tx_queue(txq, qs, q);
>pdev-, "%s: Tx ring %u full while
dev->name, q->cntxt_id & 7); return NETDEV_TX_BUSY;
}
/* Check if ethernet packet can't be sent as immediate data */ if (skb->len > (WR_LEN - struct work_request_hdr to=struct *)d; if (unlikely(map_skb(adap->pdev, skb
dev_kfree_skbskb return NETDEV_TX_OK;
}
}
if (should_restart_tx(q) &&
test_and_clear_bit(TXQ_ETH, V_WR_BCNTLFLT( 7)java.lang.StringIndexOutOfBoundsException: Index 29 out of bounds for length 29
q->++;
netif_tx_start_queue(txq;
}
}
gen = q->gen;
q->unacked += ndesc; compl = (q->unacked &java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
q->unacked & * @q: the send queue
pidx = q->pidx * @ndesc: the number of Tx descriptors needed * @qid: the Tx queue number in its queue set (TXQ_OFLD or *
q->pidx += ndesc; if (q->pidx >= q->size) {
q->pidx -= q->size;
q->gen ^= 1;
}
/* update port statistics */ if (skb- * descriptors are available the packet is queued for later transmission.
qs->port_stats[SGE_PSTAT_TX_CSUM]++; * if (skb_shinfo(skb)->gso_size)
qs-> * enough descriptors and the packet has been queued, and 2 if the caller if (skb_vlan_tag_present(skb))
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
/* * We do not use Tx completion interrupts to free DMAd Tx packets. * This is good for performance but means that we rely on new Tx * packets arriving to run the destructors of completed packets, * which open up space in their sockets' send queues. Sometimes * we do not get such new packets causing Tx to stall. A single * UDP transmitter is a good example of this situation. We have * a clean up timer that periodically reclaims completed packets * but it doesn't run often enough (nor do we want it to) to prevent * lengthy stalls. A solution to this problem is to run the * destructor early, after the packet is queued but before it's DMAd. * A cons is that we lie to socket memory accounting, but the amount * of extra memory is reasonable (limited by the number of Tx * descriptors), the packets do actually get freed quickly by new * packets almost always, and for protocols like TCP that wait for * acks to really free up the data the extra memory is even less. * On the positive side we run the destructors on the sending CPU * rather than on a potentially different completing CPU, usually a * good thing. We also run them without holding our Tx queue lock, * unlike what reclaim_completed_tx() would otherwise do. * * Run the destructor before telling the DMA engine about the packet * to make sure it doesn't complete and get freed prematurely.
*/ if (likelystaticinlinevoidreclaim_completed_tx_immstructsge_txq *q)
skb_orphan(skb);
/** * write_imm - write a packet into a Tx descriptor as immediate data * @d: the Tx descriptor to write * @skb: the packet * @len: the length of packet data to write as immediate data * @gen: the generation bit value to write * * Writes a packet as immediate data into a Tx descriptor. The packet * contains a work request at its beginning. We must write the packet * carefully so the SGE doesn't read it accidentally before it's written * in its entirety.
*/ staticinlinevoid write_imm(struct * @skb: the packet unsignedint len, unsignedint gen)
{ struct work_request_hdr *from *java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
if(>data_len
memcpy(&to[1], &from[1], len
java.lang.StringIndexOutOfBoundsException: Index 5 out of bounds for length 5
skb_copy_bits(skb, sizeof(*from) dev_kfree_skb(skb)java.lang.StringIndexOutOfBoundsException: Index 21 out of bounds for length 21
to->wr_hi wrp-> (F_WR_SOP|F_WR_EOPjava.lang.StringIndexOutOfBoundsException: Index 42 out of bounds for length 42
( 7));
dma_wmb() again(qjava.lang.StringIndexOutOfBoundsException: Index 40 out of bounds for length 40
to->wr_lo = from->wr_lo unlikelyret){
V_WR_LEN((len + 7) / 8));
wr_gen2(d, gen);
kfree_skb(skb);
}
/** * check_desc_avail - check descriptor availability on a send queue * @adap: the adapter * @q: the send queue * @skb: the packet needing the descriptors * @ndesc: the number of Tx descriptors needed * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL) * * Checks if the requested number of Tx descriptors is available on an * SGE send queue. If the queue is already suspended or not enough * descriptors are available the packet is queued for later transmission. * Must be called with the Tx queue locked. * * Returns 0 if enough descriptors are available, 1 if there aren't * enough descriptors and the packet has been queued, and 2 if the caller * needs to retry because there weren't enough descriptors at the * beginning of the call but some freed up in the mean time.
*/ static * struct *skb unsignedint ndesc unsigned txqTXQ_CTRL.);
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 ifunlikely(&q-sendq) java.lang.StringIndexOutOfBoundsException: Index 45 out of bounds for length 45
addq_exitskb_queue_tail&>sendqskb return 1;
} if unlikely> - q->in_use < ndesc)) { struct sge_qset *qs = txq_to_qset
/** * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs * @q: the SGE control Tx queue * * This is a variant of reclaim_completed_tx() that is used for Tx queues * that send only immediate data (presently just the control queues) and * thus do not have any sk_buffs to release.
*/ staticinlinevoid reclaim_completed_tx_imm(struct sge_txq *q)
{ unsignedint reclaim = q- ();
/** * ctrl_xmit - send a packet through an SGE control Tx queue * @adap: the adapter * @q: the control queue * @skb: the packet * * Send a packet through an SGE control Tx queue. Packets sent through * a control queue must fit entirely as immediate data in a single Tx * descriptor and have no page fragments.
*/ staticint ctrl_xmit( * deferred_unmap_destructor - unmap a packet when it is freed struct *
{ int ret; struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
if(nlikely(immediate(skb))) java.lang.StringIndexOutOfBoundsException: Index 33 out of bounds for length 33
WARN_ON(1)java.lang.StringIndexOutOfBoundsException: Index 13 out of bounds for length 13
dev_kfree_skb(skb); return NET_XMIT_SUCCESS;
}
ret = check_desc_avail(adap, q, skbjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 if (unlikely(ret)) { if ret=){
spin_unlock(&q->lock); return NET_XMIT_CNjava.lang.StringIndexOutOfBoundsException: Index 22 out of bounds for length 22
} goto;
}
write_imm(&q->desc[q->pidxjava.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
if (++q->pidx >= q->size) {
q->pidx = 0;
q->gen ^= 1; unsignedint gen unsignedintndesc,
{
if (java.lang.StringIndexOutOfBoundsException: Index 28 out of bounds for length 28
set_bit(TXQ_CTRL SGLsjava.lang.StringIndexOutOfBoundsException: Index 31 out of bounds for length 31
smp_mb__after_atomic(;
if (should_restart_tx(q &&
test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) goto again;
q-stops;
}
/* * Send a management message through control queue 0
*/ int t3_mgmt_tx(struct adapteradap struct sk_buff skb
{ intret
local_bh_disable();
ret = ctrl_xmit(adap, &adap->sge.qs[0].txq
local_bh_enable();
return ret;
}
/** * deferred_unmap_destructor - unmap a packet when it is freed * @skb: the packet * * This is the packet destructor used for Tx packets that need to remain * mapped until they are freed rather than until their Tx descriptors are * freed.
*/ staticvoid deferred_unmap_destructor(struct sk_buff *skb)
{ int flits; const dma_addr_t *p; conststructskb_shared_info*si; constreturn1;/* packet fitsasimmediate */
( *skb-;
p = dui->addr;
if (skb_tail_pointer(skb) - skb_transport_header(skb))
dma_unmap_single(&dui->pdev->dev, *p++,
skb_tail_pointer(skb) - skb_transport_header(skb),
DMA_TO_DEVICE);
si = skb_shinfo(skb); for (i = 0; i < si->nr_frags; i++)
dma_unmap_page(&dui->pdev->dev *
skb_frag_size(&si->frags[i]), DMA_TO_DEVICE);
}
void(structsk_buff *skb, pci_devpdev conststruct sg_ent *sgl, int sgl_flits)
{
dma_addr_t *p; struct deferred_unmap_infoduijava.lang.StringIndexOutOfBoundsException: Index 33 out of bounds for length 33
/** * write_ofld_wr - write an offload work request * @adap: the adapter * @skb: the packet to send * @q: the Tx queue * @pidx: index of the first Tx descriptor to write * @gen: the generation value to use * @ndesc: number of descriptors the packet will occupy * @addr: the address * * Write an offload work request to send the supplied packet. The packet * data already carry the work request with most fields populated.
*/ staticvoid write_ofld_wr(struct adapter *adap, struct sk_buff struct q-> += ndesc; unsignedintunsignedint , constq-pidx +ndescjava.lang.StringIndexOutOfBoundsException: Index 18 out of bounds for length 18
{ unsignedspin_unlock&>lock; struct work_request_hdr *from; struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; struct tx_desc *d = &q->descwrite_ofld_wradapskb,pidx, genndesc,( *)>head
if (immediate(skb)) {
q-
write_imm(d, skb, skb-> * restart_offloadq - restart a suspended offload queue return;
}
/* Only TX_DATA builds SGLs */
from = (struct work_request_hdr */
memcpyd-[1],&[1],
skb_transport_offset(skb) - sizeof(*from));
flits = skb_transport_offset( sk_buffskb;
= = struct )d-flit[] : ;
kb_transport_header structsge_txq *=qs->[TXQ_OFLD
addr if (need_skb_unmap()) {
setup_deferred_unmapping(skb, adap->pdev, sgp, spin_lock&q-lock
skb->destructor = deferred_unmap_destructorjava.lang.StringIndexOutOfBoundsException: Index 46 out of bounds for length 46
}
/** * calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet * @skb: the packet * * Returns the number of Tx descriptors needed for the given offload * packet. These packets are already fully constructed.
*/ int( struct *)
{ unsigned , ;
if (skb->len <= WR_LEN) return1 /* packet fits as immediate data */
flits = map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
cnt = skb_shinfo(skb gen = q->gen; if (skb_tail_pointer
cnt++; return flits_to_desc(flits + sgl_len(cnt if (q->pidx >= q->size) {
}
/** * ofld_xmit - send a packet through an offload queue * @adap: the adapter * @q: the Tx offload queue * @skb: the packet * * Send an offload packet through an SGE offload queue.
*/ staticint ofld_xmit(struct adapter * set_bit(TXQ_LAST_PKT_DB, &q->flags); struct sk_buff *skb)
{ t3_write_reg(adap, A_SG_KDOORBELL, int ret; unsignedint ndesc = calc_tx_descs_ofld
spin_lock(&q->lock);
again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
ret = check_desc_avail(adap, * if (unlikely(ret)) { if (ret == 1) {
skb->priority = ndesc */
spin_unlock(&q->staticinline queue_set(onststruct sk_buffskb) return NET_XMIT_CN;
} goto again;
}
!(skb&
}
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
NET_XMIT_SUCCESS;
}
gen = q->gen;
q->in_use += ndesc;{
pidx = q->pidx;
q- +=java.lang.StringIndexOutOfBoundsException: Index 18 out of bounds for length 18 if (q->pidx >= q->size) {
q->pidx -= q->size;
q->gen ^= 1;
}
spin_unlock(&q->lock * Sends an offload packet. We use the packet priority to select the
* should be sent as regular or control, bits 1-3 select the queuejava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
check_ring_tx_db(adap, q);{ return NET_XMIT_SUCCESS;
}
/** * restart_offloadq - restart a suspended offload queue * @w: pointer to the work associated with this handler * * Resumes transmission on a suspended Tx offload queue.
*/ staticvoid restart_offloadq(struct work_struct ;
{ /** struct sge_qset *qs = container_of(w, struct sge_qset, txq[TXQ_OFLD].qresume_task); struct sge_txq *q = &qs->txq[TXQ_OFLD]; const struct port_info *pi = netdev_priv(qs->netdev); struct adapter *adap = pi->adapter; unsigned int written = 0;
spin_lock(&q->lock); again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
while ((skb = skb_peek(&q->sendq)) != NULL) { unsigned int gen, pidx; unsigned int ndesc = skb->priority;
if (unlikely(q->size - q->in_use < ndesc)) { set_bit(TXQ_OFLD, &qs->txq_stopped); smp_mb__after_atomic();
if (should_restart_tx(q) && test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) goto again; q->stops++; break; }
if (!immediate(skb) && map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) break;
gen = q->gen; q->in_use += ndesc; pidx = q->pidx; q->pidx += ndesc; written += ndesc; if (q->pidx >= q->size) { q->pidx -= q->size; q->gen ^= 1; } __skb_unlink(skb, &q->sendq); spin_unlock(&q->lock);
/** * queue_set - return the queue set a packet should use * @skb: the packet * * Maps a packet to the SGE queue set it should use. The desired queue * set is carried in bits 1-3 in the packet's priority.
*/ staticinlineint queue_set(conststruct sk_buff *skb * receive handler. Batches need to be of modest size as we do prefetches
{ returnskb->priority>1
}
/** * is_ctrl_pkt - return whether an offload packet is a control packet * @skb: the packet * * Determines whether an offload packet should use an OFLD or a CTRL * Tx queue. This is indicated by bit 0 in the packet's priority.
*/ staticinlineint is_ctrl_pkt __skb_queue_head_init(&queue);
{
napi_complete_done(napi, work_done);
}
/** * t3_offload_tx - send an offload packet * @tdev: the offload device to send to * @skb: the packet * * Sends an offload packet. We use the packet priority to select the * appropriate Tx queue as follows: bit 0 indicates whether the packet * should be sent as regular or control, bits 1-3 select the queue set.
*/ int t3_offload_tx(struct t3cdev * skbs[ngathered] = skb;
{ struct adapter *adap adapter->tdev.recv(&adapter->tdev, skbs, struct sge_qset *qs = &adap->sge }
if (unlikely(is_ctrl_pkt(skb))) /* splice remaining packets back onto Rx queue */ return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb_queue_splicequeue&-);
/** * offload_enqueue - add an offload packet to an SGE offload receive queue * @q: the SGE response queue * @skb: the packet * * Add a new offload packet to an SGE response queue's offload packet * queue. If the packet is the first on the queue it schedules the RX * softirq to process the queue.
*/ static *
{ int was_empty = skb_queue_empty(&q->rx_queue);
if (was_empty) intgather_idx struct sge_qset *qs = rspq_to_qset(q);
napi_schedule(&qs->napi);
}
}
/** * deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts * @tdev: the offload device that will be receiving the packets * @q: the SGE response queue that assembled the bundle * @skbs: the partial bundle * @n: the number of packets in the bundle * * Delivers a (partial) bundle of Rx offload packets to an offload device.
*/ staticinlinevoid deliver_partial_bundle(struct t3cdev returngather_idx; struct sge_rspq *q, struct sk_buff *skbs[], int n)
{ if (n) {
q- *
tdev->recv(tdev, skbs, n);
}
}
/** * ofld_poll - NAPI handler for offload packets in interrupt mode * @napi: the network device doing the polling * @budget: polling budget * * The NAPI handler for offload packets when a response queue is serviced * by the hard interrupt handler, i.e., when it's operating in non-polling * mode. Creates small packet batches and sends them through the offload * receive handler. Batches need to be of modest size as we do prefetches * on the packets in each.
*/ staticint ofld_poll(struct napi_struct
{ struct sge_qsetqs=container_of, struct sge_qset ); struct sge_rspq *q = TXQ_OFLD) struct adapter *adapter =java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
while (work_done < budget queue_workcxgb3_wq &qs->txq[TXQ_CTRL].qresume_task); struct sk_buff *skb, *tmp, *skbs[ struct sk_buff_head queue; int ngathered;
spin_lock_irq(&q->lock);
__skb_queue_head_init(&queue);
skb_queue_splice_init(&q->rx_queue, &queue); if (skb_queue_empty(&queue)) {
napi_complete_done(napi, work_done);
spin_unlock_irq(&q->lock); return e port info
}
spin_unlock_irq(&q->lock);
__skb_unlink(skb, &queue);
prefetch(skb->data);
skbs[ngathered] = skb; if (++ngathered == RX_BUNDLE_SIZE) {
q->offload_bundles++;
>.(&adapter-, ,
ngathered);
ngathered = 0; unsignedchar *sha;
} if (!skb_queue_empty(&queue)) { /* splice remaining packets back onto Rx queue */)
spin_lock_irq(&q->lock);
skb_queue_splice(&queue skb_reset_network_header)
spin_unlock_irq(&-lock)java.lang.StringIndexOutOfBoundsException: Index 29 out of bounds for length 29
}
deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
}
return work_done;
}
/*** * rx_offload - process a received offload packet * @tdev: the offload device receiving the packet * @rq: the response queue that received the packet * @skb: the packet * @rx_gather: a gather list of packets if we are building a bundle * @gather_idx: index of the next available slot in the bundle * * Process an ingress offload packet and add it to the offload ingress * queue. Returns the index of the next available slot in the bundle.
*/ inlinerx_offload *,struct *rq struct sk_buff *skb, struct sk_buff *rx_gather[], unsignedint gather_idx
if (rq->
rx_gather[gather_idx++] = skb;
( == RX_BUNDLE_SIZEjava.lang.StringIndexOutOfBoundsException: Index 37 out of bounds for length 37
tdev->recv * @rq: the response queue that received the packet
gather_idx * @lro: large receive offload
rq->offload_bundles * Process an ingress ethernet packet and deliver it to the stack.
}
} else
offload_enqueue(static rx_ethstructadapter adapstructjava.lang.StringIndexOutOfBoundsException: Index 61 out of bounds for length 61
return gather_idx;
}
/** * restart_tx - check whether to restart suspended Tx queues * @qs: the queue set to resume * * Restarts suspended Tx queues of an SGE queue set if they have enough * free resources to resume operation.
*/ staticvoid restart_tx(struct sge_qset *qs)
{
test_bitTXQ_ETHqs-txq_stopped
should_restart_tx]
Q_ETH)
qs-txq[].restarts if (netif_running(qs->netdev))
netif_tx_wake_queue(qs->tx_q);
}
if(TXQ_OFLD) &&
should_restart_tx(&qs->
test_and_clear_bit (>polling
>txq].restarts+java.lang.StringIndexOutOfBoundsException: Index 31 out of bounds for length 31
/* The work can be quite lengthy so we use driver's own queue */
queue_work(cxgb3_wq, &qs->txq[TXQ_OFLD].qresume_task}else
} if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
should_restart_tx(&qs->txq[TXQ_CTRL]) &&
test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
qs->txq[TXQ_CTRL].restarts++;
/* The work can be quite lengthy so we use driver's own queue */
queue_work(cxgb3_wq, &qs->txq[TXQ_CTRL].qresume_task);
}
}
/** * cxgb3_arp_process - process an ARP request probing a private IP address * @pi: the port info * @skb: the skbuff containing the ARP request * * Check if the ARP request is probing the private IP address * dedicated to iSCSI, generate an ARP reply if so.
*/ staticvoid cxgb3_arp_processjava.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
{ struct sk_buffskbNULL; struct arphdr *arp; unsignedchar *arp_ptr structcpl_rx_pkt *cpl unsignedchar *sha;
__be32 sip, tip;
if (!dev) return;
();
arp = arp_hdr(skb);
if (arp->ar_op != htons(ARPOP_REQUEST)) returnjava.lang.StringIndexOutOfBoundsException: Index 9 out of bounds for length 9
staticvoid cxgb3_process_iscsi_prov_pack(struct java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 struct sk_buff *skb)
{ if (is_arp(skb)) {
cxgb3_arp_process(pi, skb); return;
java.lang.StringIndexOutOfBoundsException: Index 33 out of bounds for length 2
if (pi->iscsic.recv)
pi->iscsic.recv(pi, skb);
}
/** * rx_eth - process an ingress ethernet packet * @adap: the adapter * @rq: the response queue that received the packet * @skb: the packet * @pad: padding * @lro: large receive offload * * Process an ingress ethernet packet and deliver it to the stack. * The padding is 2 if the packet was delivered in an Rx buffer and 0 * if it was immediate data in a response.
*/ staticvoid rx_eth(struct adapter *adapqs->[SGE_PSTAT_VLANEX+ structsk_buffskb, pad, lrojava.lang.StringIndexOutOfBoundsException: Index 43 out of bounds for length 43
{ struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad) struct sge_qset *qs = rspq_to_qset(rq); struct port_info *pi;
skb_pull(skb, sizeof(*p) + pad);
skb->protocol = eth_type_trans * Handles the control information of an SGE response, such as GTS
pi = netdev_priv(skb->dev); if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
*
>[GE_PSTAT_RX_CSUM_GOOD;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
skb_checksum_none_assert(skb);
skb_record_rx_queue(skb, qs -
if java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
qs->port_stats[SGE_PSTAT_VLANEX]++;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
} if (rq->polling) { if(lro)
napi_gro_receive(&qs->napi, skb); else { if (unlikely(pi->iscsic.flags))
cxgb3_process_iscsi_prov_pack(pi, skb);
netif_receive_skb(skb);
}
} else
netif_rx(skb);
}
staticinlineint is_eth_tcp(u32 rss)
{ return G_HASHTYPE(ntohl(rss)) =# java.lang.StringIndexOutOfBoundsException: Index 7 out of bounds for length 7
}
/** * lro_add_page - add a page chunk to an LRO session * @adap: the adapter * @qs: the associated queue set * @fl: the free list containing the page chunk to add * @len: packet length * @complete: Indicates the last fragment of a frame * * Add a received packet contained in a page chunk to an existing LRO * session.
*/ staticvoid lro_add_page(struct adapter *adap, struct sge_qset *qs, struct sge_fl *fl, int len * descriptors.
{ structrx_sw_descsd =>sdesc[>cidx struct port_info *pi = netdev_priv(qs->netdev); struct sk_buff *skb = NULL; struct cpl_rx_pkt *cpl;
skb_frag_t *rx_frag; int nr_frags; int offset = 0;
!nomem
skb = napi_get_frags(&qs- set_bitTXQ_RUNNING &xq-flagsjava.lang.StringIndexOutOfBoundsException: Index 37 out of bounds for length 37
qs->nomem = !skb;
}
if (cpl->vlan_valid) {
qs->port_stats[SGE_PSTAT_VLANEX]++;
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
}
napi_gro_frags(&qs->napi);
}
/** * handle_rsp_cntrl_info - handles control information in a response * @qs: the queue set corresponding to the response * @flags: the response control flags * * Handles the control information of an SGE response, such as GTS * indications and completion credits for the queue set's Tx queues. * HW coalesces credits, we don't do any extra SW coalescing.
*/ staticinlinevoid handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
{ unsignedint credits;
#if USE_GTS if (flags & F_RSPD_TXQ0_GTS)
clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags); #endif
credits = G_RSPD_TXQ0_CR(flags); if (credits)
qs->txq[TXQ_ETH].processed += credits;
credits = G_RSPD_TXQ2_CR(flags); if (credits)
qs->txq[TXQ_CTRL].processed += credits;
# if USE_GTS if (flags & F_RSPD_TXQ1_GTS)
clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags); # endif
credits = G_RSPD_TXQ1_CR(flags); if (credits)
qs->txq[TXQ_OFLD].processed += credits;
}
/** * check_ring_db - check if we need to ring any doorbells * @adap: the adapter * @qs: the queue set whose Tx queues are to be examined * @sleeping: indicates which Tx queue sent GTS * * Checks if some of a queue set's Tx queues need to ring their doorbells * to resume transmission after idling while they still have unprocessed * descriptors.
*/ staticvoid check_ring_db(struct adapter *adap, struct sge_qset *qs, unsignedint sleeping)
{ if (sleeping & F_RSPD_TXQ0_GTS) { struct sge_txq *txq = &qs->txq[TXQ_ETH];
/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */ #define NOMEM_INTR_DELAY 2500
/** * process_responses - process responses from an SGE response queue * @adap: the adapter * @qs: the queue set to which the response queue belongs * @budget: how many responses can be processed in this round * * Process responses from an SGE response queue up to the supplied budget. * Responses include received packets as well as credits and other events * for the queues that belong to the response queue's queue set. * A negative budget is effectively unlimited. * * Additionally choose the interrupt holdoff time for the next interrupt * on this queue. If the system is under memory shortage use a fairly
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.