/* Poll a register for a specific value */ staticint nicvf_poll_reg(struct nicvf *nic, int qidx,
u64 reg, int bit_pos, int bits, int val)
{
u64 bit_mask;
u64 reg_val; int timeout = 10;
/* Allocate memory for a queue's descriptors */ staticint nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, int q_len, int desc_size, int align_bytes)
{
dmem->q_len = q_len;
dmem->size = (desc_size * q_len) + align_bytes; /* Save address, need it while freeing */
dmem->unalign_base = dma_alloc_coherent(&nic->pdev->dev, dmem->size,
&dmem->dma, GFP_KERNEL); if (!dmem->unalign_base) return -ENOMEM;
/* Allocate a new page or recycle one if possible * * We cannot optimize dma mapping here, since * 1. It's only one RBDR ring for 8 Rx queues. * 2. CQE_RX gives address of the buffer where pkt has been DMA'ed * and not idx into RBDR ring, so can't refer to saved info. * 3. There are multiple receive buffers per page
*/ staticinlinestruct pgcache *nicvf_alloc_page(struct nicvf *nic, struct rbdr *rbdr, gfp_t gfp)
{ int ref_count; struct page *page = NULL; struct pgcache *pgcache, *next;
/* Check if page is already allocated */
pgcache = &rbdr->pgcache[rbdr->pgidx];
page = pgcache->page; /* Check if page can be recycled */ if (page) {
ref_count = page_ref_count(page); /* This page can be recycled if internal ref_count and page's * ref_count are equal, indicating that the page has been used * once for packet transmission. For non-XDP mode, internal * ref_count is always '1'.
*/ if (rbdr->is_xdp) { if (ref_count == pgcache->ref_count)
pgcache->ref_count--; else
page = NULL;
} elseif (ref_count != 1) {
page = NULL;
}
}
if (!page) {
page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 0); if (!page) return NULL;
this_cpu_inc(nic->pnicvf->drv_stats->page_alloc);
/* Check for space */ if (rbdr->pgalloc >= rbdr->pgcnt) { /* Page can still be used */
nic->rb_page = page; return NULL;
}
/* Save the page in page cache */
pgcache->page = page;
pgcache->dma_addr = 0;
pgcache->ref_count = 0;
rbdr->pgalloc++;
}
/* Take additional page references for recycling */ if (rbdr->is_xdp) { /* Since there is single RBDR (i.e single core doing * page recycling) per 8 Rx queues, in XDP mode adjusting * page references atomically is the biggest bottleneck, so * take bunch of references at a time. * * So here, below reference counts defer by '1'.
*/ if (!pgcache->ref_count) {
pgcache->ref_count = XDP_PAGE_REFCNT_REFILL;
page_ref_add(page, XDP_PAGE_REFCNT_REFILL);
}
} else { /* In non-XDP case, single 64K page is divided across multiple * receive buffers, so cost of recycling is less anyway. * So we can do with just one extra reference.
*/
page_ref_add(page, 1);
}
rbdr->pgidx++;
rbdr->pgidx &= (rbdr->pgcnt - 1);
/* Prefetch refcount of next page in page cache */
next = &rbdr->pgcache[rbdr->pgidx];
page = next->page; if (page)
prefetch(&page->_refcount);
/* Check if request can be accomodated in previous allocated page. * But in XDP mode only one buffer per page is permitted.
*/ if (!rbdr->is_xdp && nic->rb_page &&
((nic->rb_page_offset + buf_len) <= PAGE_SIZE)) {
nic->rb_pageref++; goto ret;
}
nicvf_get_page(nic);
nic->rb_page = NULL;
/* Get new page, either recycled or new one */
pgcache = nicvf_alloc_page(nic, rbdr, gfp); if (!pgcache && !nic->rb_page) {
this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures); return -ENOMEM;
}
nic->rb_page_offset = 0;
/* Reserve space for header modifications by BPF program */ if (rbdr->is_xdp)
buf_len += XDP_PACKET_HEADROOM;
/* Check if it's recycled */ if (pgcache)
nic->rb_page = pgcache->page;
ret: if (rbdr->is_xdp && pgcache && pgcache->dma_addr) {
*rbuf = pgcache->dma_addr;
} else { /* HW will ensure data coherency, CPU sync not required */
*rbuf = (u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
nic->rb_page_offset, buf_len,
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) { if (!nic->rb_page_offset)
__free_pages(nic->rb_page, 0);
nic->rb_page = NULL; return -ENOMEM;
} if (pgcache)
pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
nic->rb_page_offset += buf_len;
}
/* Now build an skb to give to stack */
skb = build_skb(data, RCV_FRAG_LEN); if (!skb) {
put_page(virt_to_page(data)); return NULL;
}
prefetch(skb->data); return skb;
}
/* Allocate RBDR ring and populate receive buffers */ staticint nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len, int buf_size)
{ int idx;
u64 rbuf; struct rbdr_entry_t *desc; int err;
rbdr->desc = rbdr->dmem.base; /* Buffer size has to be in multiples of 128 bytes */
rbdr->dma_size = buf_size;
rbdr->enable = true;
rbdr->thresh = RBDR_THRESH;
rbdr->head = 0;
rbdr->tail = 0;
/* Initialize page recycling stuff. * * Can't use single buffer per page especially with 64K pages. * On embedded platforms i.e 81xx/83xx available memory itself * is low and minimum ring size of RBDR is 8K, that takes away * lots of memory. * * But for XDP it has to be a single buffer per page.
*/ if (!nic->pnicvf->xdp_prog) {
rbdr->pgcnt = ring_len / (PAGE_SIZE / buf_size);
rbdr->is_xdp = false;
} else {
rbdr->pgcnt = ring_len;
rbdr->is_xdp = true;
}
rbdr->pgcnt = roundup_pow_of_two(rbdr->pgcnt);
rbdr->pgcache = kcalloc(rbdr->pgcnt, sizeof(*rbdr->pgcache),
GFP_KERNEL); if (!rbdr->pgcache) return -ENOMEM;
rbdr->pgidx = 0;
rbdr->pgalloc = 0;
nic->rb_page = NULL; for (idx = 0; idx < ring_len; idx++) {
err = nicvf_alloc_rcv_buffer(nic, rbdr, GFP_KERNEL,
RCV_FRAG_LEN, &rbuf); if (err) { /* To free already allocated and mapped ones */
rbdr->tail = idx - 1; return err;
}
/* Release additional page references held for recycling */
head = 0; while (head < rbdr->pgcnt) {
pgcache = &rbdr->pgcache[head]; if (pgcache->page && page_ref_count(pgcache->page) != 0) { if (rbdr->is_xdp) {
page_ref_sub(pgcache->page,
pgcache->ref_count - 1);
}
put_page(pgcache->page);
}
head++;
}
/* Free RBDR ring */
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
}
/* Refill receive buffer descriptors with new buffers.
*/ staticvoid nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
{ struct queue_set *qs = nic->qs; int rbdr_idx = qs->rbdr_cnt; int tail, qcount; int refill_rb_cnt; struct rbdr *rbdr; struct rbdr_entry_t *desc;
u64 rbuf; int new_rb = 0;
refill: if (!rbdr_idx) return;
rbdr_idx--;
rbdr = &qs->rbdr[rbdr_idx]; /* Check if it's enabled */ if (!rbdr->enable) goto next_rbdr;
/* Get no of desc's to be refilled */
qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
qcount &= 0x7FFFF; /* Doorbell can be ringed with a max of ring size minus 1 */ if (qcount >= (qs->rbdr_len - 1)) goto next_rbdr; else
refill_rb_cnt = qs->rbdr_len - qcount - 1;
staticvoid nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx)
{
u64 tmp, fifo_state; int timeout = 10;
/* Save head and tail pointers for feeing up buffers */
rbdr->head = nicvf_queue_reg_read(nic,
NIC_QSET_RBDR_0_1_HEAD,
qidx) >> 3;
rbdr->tail = nicvf_queue_reg_read(nic,
NIC_QSET_RBDR_0_1_TAIL,
qidx) >> 3;
/* If RBDR FIFO is in 'FAIL' state then do a reset first * before relaiming.
*/
fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); if (((fifo_state >> 62) & 0x03) == 0x3)
nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
qidx, NICVF_RBDR_RESET);
/* Set queue base address */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
qidx, (u64)(sq->dmem.phys_base));
/* Enable send queue & set queue size */
memset(&sq_cfg, 0, sizeof(struct sq_cfg));
sq_cfg.ena = 1;
sq_cfg.reset = 0;
sq_cfg.ldwb = 0;
sq_cfg.qsize = ilog2(qs->sq_len >> 10);
sq_cfg.tstmp_bgx_intf = 0; /* CQ's level at which HW will stop processing SQEs to avoid * transmitting a pkt with no space in CQ to post CQE_TX.
*/
sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
/* Set threshold value for interrupt generation */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
/* Set queue:cpu affinity for better load distribution */ if (cpu_online(qidx)) {
cpumask_set_cpu(qidx, &sq->affinity_mask);
netif_set_xps_queue(nic->netdev,
&sq->affinity_mask, qidx);
}
}
/* Take primary VF's queue lengths. * This is needed to take queue lengths set from ethtool * into consideration.
*/ if (nic->sqs_mode && pqs) {
qs->cq_len = pqs->cq_len;
qs->sq_len = pqs->sq_len;
}
if (enable) { if (nicvf_alloc_resources(nic)) return -ENOMEM;
/* Reset RXQ's stats. * SQ's stats will get reset automatically once SQ is reset.
*/
nicvf_reset_rcv_queue_stats(nic);
return 0;
}
/* Get a free desc from SQ * returns descriptor ponter & descriptor number
*/ staticinlineint nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
{ int qentry;
/* Rollback to previous tail pointer when descriptors not used */ staticinlinevoid nicvf_rollback_sq_desc(struct snd_queue *sq, int qentry, int desc_cnt)
{
sq->tail = qentry;
atomic_add(desc_cnt, &sq->free_cnt);
}
/* Free descriptor back to SQ for future use */ void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
{ if (!sq->is_xdp)
atomic_add(desc_cnt, &sq->free_cnt); else
sq->xdp_free_cnt += desc_cnt;
sq->head += desc_cnt;
sq->head &= (sq->dmem.q_len - 1);
}
/* Calculate no of SQ subdescriptors needed to transmit all * segments of this TSO packet. * Taken from 'Tilera network driver' with a minor modification.
*/ staticint nicvf_tso_count_subdescs(struct sk_buff *skb)
{ struct skb_shared_info *sh = skb_shinfo(skb); unsignedint sh_len = skb_tcp_all_headers(skb); unsignedint data_len = skb->len - sh_len; unsignedint p_len = sh->gso_size; long f_id = -1; /* id of the current fragment */ long f_size = skb_headlen(skb) - sh_len; /* current fragment size */ long f_used = 0; /* bytes used from the current fragment */ long n; /* size of the current piece of payload */ int num_edescs = 0; int segment;
/* One edesc for header and for each piece of the payload. */ for (num_edescs++; p_used < p_len; num_edescs++) { /* Advance as needed. */ while (f_used >= f_size) {
f_id++;
f_size = skb_frag_size(&sh->frags[f_id]);
f_used = 0;
}
/* Use bytes from the current fragment. */
n = p_len - p_used; if (n > f_size - f_used)
n = f_size - f_used;
f_used += n;
p_used += n;
}
/* The last segment may be less than gso_size. */
data_len -= p_len; if (data_len < p_len)
p_len = data_len;
}
/* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */ return num_edescs + sh->gso_segs;
}
#define POST_CQE_DESC_COUNT 2
/* Get the number of SQ descriptors needed to xmit this skb */ staticint nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
{ int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) { /* post_cqe = 0, to avoid HW posting a CQE for every TSO * segment transmitted on 88xx.
*/
hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
} else {
sq->skbuff[qentry] = (u64)skb; /* Enable notification via CQE after processing SQE */
hdr->post_cqe = 1; /* No of subdescriptors following this */
hdr->subdesc_cnt = subdesc_cnt;
}
hdr->tot_len = len;
/* Offload checksum calculation to HW */ if (skb->ip_summed == CHECKSUM_PARTIAL) { if (ip.v4->version == 4)
hdr->csum_l3 = 1; /* Enable IP csum calculation */
hdr->l3_offset = skb_network_offset(skb);
hdr->l4_offset = skb_transport_offset(skb);
proto = (ip.v4->version == 4) ? ip.v4->protocol :
ip.v6->nexthdr;
switch (proto) { case IPPROTO_TCP:
hdr->csum_l4 = SEND_L4_CSUM_TCP; break; case IPPROTO_UDP:
hdr->csum_l4 = SEND_L4_CSUM_UDP; break; case IPPROTO_SCTP:
hdr->csum_l4 = SEND_L4_CSUM_SCTP; break;
}
}
if (nic->hw_tso && skb_shinfo(skb)->gso_size) {
hdr->tso = 1;
hdr->tso_start = skb_tcp_all_headers(skb);
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; /* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
/* Check if hw timestamp is requested */ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) return;
/* Tx timestamping not supported along with TSO, so ignore request */ if (skb_shinfo(skb)->gso_size) return;
/* HW supports only a single outstanding packet to timestamp */ if (!atomic_add_unless(&nic->pnicvf->tx_ptp_skbs, 1, 1)) return;
/* Mark the SKB for later reference */
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
/* Finally enable timestamp generation * Since 'post_cqe' is also set, two CQEs will be posted * for this packet i.e CQE_TYPE_SEND and CQE_TYPE_SEND_PTP.
*/
hdr->tstmp = 1;
}
/* SQ GATHER subdescriptor * Must follow HDR descriptor
*/ staticinlinevoid nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, int size, u64 data)
{ struct sq_gather_subdesc *gather;
/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO * packet so that a CQE is posted as a notifation for transmission of * TSO packet.
*/ staticinlinevoid nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, int tso_sqe, struct sk_buff *skb)
{ struct sq_imm_subdesc *imm; struct sq_hdr_subdesc *hdr;
sq->skbuff[qentry] = (u64)skb;
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER; /* Enable notification via CQE after processing SQE */
hdr->post_cqe = 1; /* There is no packet to transmit here */
hdr->dont_send = 1;
hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
hdr->tot_len = 1; /* Actual TSO header SQE index, needed for cleanup */
hdr->rsvd2 = tso_sqe;
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
/* Inform HW to xmit all TSO segments */
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
sq_num, desc_cnt);
}
/* Segment a TSO packet into 'gso_size' segments and append * them to SQ for transfer
*/ staticint nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, int sq_num, int qentry, struct sk_buff *skb)
{ struct tso_t tso; int seg_subdescs = 0, desc_cnt = 0; int seg_len, total_len, data_left; int hdr_qentry = qentry; int hdr_len;
/* Append an skb to a SQ for packet transfer. */ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, struct sk_buff *skb, u8 sq_num)
{ int i, size; int subdesc_cnt, hdr_sqe = 0; int qentry;
u64 dma_addr;
subdesc_cnt = nicvf_sq_subdesc_required(nic, skb); if (subdesc_cnt > atomic_read(&sq->free_cnt)) goto append_fail;
qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
/* Check if its a TSO packet */ if (skb_shinfo(skb)->gso_size && !nic->hw_tso) return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb);
append_fail: /* Use original PCI dev for debug log */
nic = nic->pnicvf;
netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n"); return 0;
}
staticvoid nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
u64 buf_addr, bool xdp)
{ struct page *page = NULL; int len = RCV_FRAG_LEN;
if (xdp) {
page = virt_to_page(phys_to_virt(buf_addr)); /* Check if it's a recycled page, if not * unmap the DMA mapping. * * Recycled page holds an extra reference.
*/ if (page_ref_count(page) != 1) return;
len += XDP_PACKET_HEADROOM; /* Receive buffers in XDP mode are mapped from page start */
dma_addr &= PAGE_MASK;
}
dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len,
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
}
/* Returns SKB for a received packet */ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx, bool xdp)
{ int frag; int payload_len = 0; struct sk_buff *skb = NULL; struct page *page; int offset;
u16 *rb_lens = NULL;
u64 *rb_ptrs = NULL;
u64 phys_addr;
rb_lens = (void *)cqe_rx + (3 * sizeof(u64)); /* Except 88xx pass1 on all other chips CQE_RX2_S is added to * CQE_RX at word6, hence buffer pointers move by word * * Use existing 'hw_tso' flag which will be set for all chips * except 88xx pass1 instead of a additional cache line * access (or miss) by using pci dev's revision.
*/ if (!nic->hw_tso)
rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64)); else
rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
payload_len = rb_lens[frag_num(frag)];
phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs); if (!phys_addr) { if (skb)
dev_kfree_skb_any(skb); return NULL;
}
static u64 nicvf_int_type_to_mask(int int_type, int q_idx)
{
u64 reg_val;
switch (int_type) { case NICVF_INTR_CQ:
reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ:
reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR:
reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP:
reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER:
reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX:
reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR:
reg_val = (1ULL << NICVF_INTR_QS_ERR_SHIFT); break; default:
reg_val = 0;
}
return reg_val;
}
/* Enable interrupt */ void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
{
u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
if (!mask) {
netdev_dbg(nic->netdev, "Failed to enable interrupt: unknown type\n"); return;
}
nicvf_reg_write(nic, NIC_VF_ENA_W1S,
nicvf_reg_read(nic, NIC_VF_ENA_W1S) | mask);
}
/* Disable interrupt */ void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
{
u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
if (!mask) {
netdev_dbg(nic->netdev, "Failed to disable interrupt: unknown type\n"); return;
}
nicvf_reg_write(nic, NIC_VF_ENA_W1C, mask);
}
/* Clear interrupt */ void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
{
u64 mask = nicvf_int_type_to_mask(int_type, q_idx);
if (!mask) {
netdev_dbg(nic->netdev, "Failed to clear interrupt: unknown type\n"); return;
}
nicvf_reg_write(nic, NIC_VF_INT, mask);
}
/* Check if interrupt is enabled */ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
{
u64 mask = nicvf_int_type_to_mask(int_type, q_idx); /* If interrupt type is unknown, we treat it disabled. */ if (!mask) {
netdev_dbg(nic->netdev, "Failed to check interrupt enable: unknown type\n"); return 0;
}
/* Check for errors in the receive cmp.queue entry */ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
netif_err(nic, rx_err, nic->netdev, "RX error CQE err_level 0x%x err_opcode 0x%x\n",
cqe_rx->err_level, cqe_rx->err_opcode);
switch (cqe_rx->err_opcode) { case CQ_RX_ERROP_RE_PARTIAL:
this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts); break; case CQ_RX_ERROP_RE_JABBER:
this_cpu_inc(nic->drv_stats->rx_jabber_errs); break; case CQ_RX_ERROP_RE_FCS:
this_cpu_inc(nic->drv_stats->rx_fcs_errs); break; case CQ_RX_ERROP_RE_RX_CTL:
this_cpu_inc(nic->drv_stats->rx_bgx_errs); break; case CQ_RX_ERROP_PREL2_ERR:
this_cpu_inc(nic->drv_stats->rx_prel2_errs); break; case CQ_RX_ERROP_L2_MAL:
this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed); break; case CQ_RX_ERROP_L2_OVERSIZE:
this_cpu_inc(nic->drv_stats->rx_oversize); break; case CQ_RX_ERROP_L2_UNDERSIZE:
this_cpu_inc(nic->drv_stats->rx_undersize); break; case CQ_RX_ERROP_L2_LENMISM:
this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch); break; case CQ_RX_ERROP_L2_PCLP:
this_cpu_inc(nic->drv_stats->rx_l2_pclp); break; case CQ_RX_ERROP_IP_NOT:
this_cpu_inc(nic->drv_stats->rx_ip_ver_errs); break; case CQ_RX_ERROP_IP_CSUM_ERR:
this_cpu_inc(nic->drv_stats->rx_ip_csum_errs); break; case CQ_RX_ERROP_IP_MAL:
this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed); break; case CQ_RX_ERROP_IP_MALD:
this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed); break; case CQ_RX_ERROP_IP_HOP:
this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs); break; case CQ_RX_ERROP_L3_PCLP:
this_cpu_inc(nic->drv_stats->rx_l3_pclp); break; case CQ_RX_ERROP_L4_MAL:
this_cpu_inc(nic->drv_stats->rx_l4_malformed); break; case CQ_RX_ERROP_L4_CHK:
this_cpu_inc(nic->drv_stats->rx_l4_csum_errs); break; case CQ_RX_ERROP_UDP_LEN:
this_cpu_inc(nic->drv_stats->rx_udp_len_errs); break; case CQ_RX_ERROP_L4_PORT:
this_cpu_inc(nic->drv_stats->rx_l4_port_errs); break; case CQ_RX_ERROP_TCP_FLAG:
this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs); break; case CQ_RX_ERROP_TCP_OFFSET:
this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs); break; case CQ_RX_ERROP_L4_PCLP:
this_cpu_inc(nic->drv_stats->rx_l4_pclp); break; case CQ_RX_ERROP_RBDR_TRUNC:
this_cpu_inc(nic->drv_stats->rx_truncated_pkts); break;
}
return 1;
}
/* Check for errors in the send cmp.queue entry */ int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
{ switch (cqe_tx->send_status) { case CQ_TX_ERROP_DESC_FAULT:
this_cpu_inc(nic->drv_stats->tx_desc_fault); break; case CQ_TX_ERROP_HDR_CONS_ERR:
this_cpu_inc(nic->drv_stats->tx_hdr_cons_err); break; case CQ_TX_ERROP_SUBDC_ERR:
this_cpu_inc(nic->drv_stats->tx_subdesc_err); break; case CQ_TX_ERROP_MAX_SIZE_VIOL:
this_cpu_inc(nic->drv_stats->tx_max_size_exceeded); break; case CQ_TX_ERROP_IMM_SIZE_OFLOW:
this_cpu_inc(nic->drv_stats->tx_imm_size_oflow); break; case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
this_cpu_inc(nic->drv_stats->tx_data_seq_err); break; case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
this_cpu_inc(nic->drv_stats->tx_mem_seq_err); break; case CQ_TX_ERROP_LOCK_VIOL:
this_cpu_inc(nic->drv_stats->tx_lock_viol); break; case CQ_TX_ERROP_DATA_FAULT:
this_cpu_inc(nic->drv_stats->tx_data_fault); break; case CQ_TX_ERROP_TSTMP_CONFLICT:
this_cpu_inc(nic->drv_stats->tx_tstmp_conflict); break; case CQ_TX_ERROP_TSTMP_TIMEOUT:
this_cpu_inc(nic->drv_stats->tx_tstmp_timeout); break; case CQ_TX_ERROP_MEM_FAULT:
this_cpu_inc(nic->drv_stats->tx_mem_fault); break; case CQ_TX_ERROP_CK_OVERLAP:
this_cpu_inc(nic->drv_stats->tx_csum_overlap); break; case CQ_TX_ERROP_CK_OFLOW:
this_cpu_inc(nic->drv_stats->tx_csum_overflow); break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.