/** * ixgbe_fcoe_ddp_put - free the ddp context for a given xid * @netdev: the corresponding net_device * @xid: the xid that corresponding ddp will be freed * * This is the implementation of net_device_ops.ndo_fcoe_ddp_done * and it is expected to be called by ULD, i.e., FCP layer of libfc * to release the corresponding ddp context when the I/O is done. * * Returns : data length already ddp-ed in bytes
*/ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
{ int len; struct ixgbe_fcoe *fcoe; struct ixgbe_adapter *adapter; struct ixgbe_fcoe_ddp *ddp; struct ixgbe_hw *hw;
u32 fcbuff;
/* no DDP if we are already down or resetting */ if (test_bit(__IXGBE_DOWN, &adapter->state) ||
test_bit(__IXGBE_RESETTING, &adapter->state)) return 0;
/* alloc the udl from per cpu ddp pool */
ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp); if (!ddp->udl) {
e_err(drv, "failed allocated ddp context\n"); goto out_noddp_unmap;
}
ddp->pool = ddp_pool->pool;
ddp->sgl = sgl;
ddp->sgc = sgc;
j = 0;
for_each_sg(sgl, sg, dmacount, i) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg); while (len) { /* max number of buffers allowed in one DDP context */ if (j >= IXGBE_BUFFCNT_MAX) {
ddp_pool->noddp++; goto out_noddp_free;
}
/* get the offset of length of current buffer */
thisoff = addr & ((dma_addr_t)bufflen - 1);
thislen = min((bufflen - thisoff), len); /* * all but the 1st buffer (j == 0) * must be aligned on bufflen
*/ if ((j != 0) && (thisoff)) goto out_noddp_free; /* * all but the last buffer * ((i == (dmacount - 1)) && (thislen == len)) * must end at bufflen
*/ if (((i != (dmacount - 1)) || (thislen != len))
&& ((thislen + thisoff) != bufflen)) goto out_noddp_free;
ddp->udl[j] = (u64)(addr - thisoff); /* only the first buffer may have none-zero offset */ if (j == 0)
firstoff = thisoff;
len -= thislen;
addr += thislen;
j++;
}
} /* only the last buffer may have non-full bufflen */
lastsize = thisoff + thislen;
/* * lastsize can not be buffer len. * If it is then adding another buffer with lastsize = 1.
*/ if (lastsize == bufflen) { if (j >= IXGBE_BUFFCNT_MAX) {
ddp_pool->noddp_ext_buff++; goto out_noddp_free;
}
/* turn on last frame indication for target mode as FCP_RSPtarget is
* supposed to send FCP_RSP when it is done. */ if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
}
if (hw->mac.type == ixgbe_mac_X550) { /* X550 does not require DDP lock */
/** * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode * @netdev: the corresponding net_device * @xid: the exchange id requesting ddp * @sgl: the scatter-gather list for this request * @sgc: the number of scatter-gather items * * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup * and is expected to be called from ULD, e.g., FCP layer of libfc * to set up ddp for the corresponding xid of the given sglist for * the corresponding I/O. * * Returns : 1 for success and 0 for no ddp
*/ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsignedint sgc)
{ return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
}
/** * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode * @netdev: the corresponding net_device * @xid: the exchange id requesting ddp * @sgl: the scatter-gather list for this request * @sgc: the number of scatter-gather items * * This is the implementation of net_device_ops.ndo_fcoe_ddp_target * and is expected to be called from ULD, e.g., FCP layer of libfc * to set up ddp for the corresponding xid of the given sglist for * the corresponding I/O. The DDP in target mode is a write I/O request * from the initiator. * * Returns : 1 for success and 0 for no ddp
*/ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, struct scatterlist *sgl, unsignedint sgc)
{ return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
}
/** * ixgbe_fcoe_ddp - check ddp status and mark it done * @adapter: ixgbe adapter * @rx_desc: advanced rx descriptor * @skb: the skb holding the received data * * This checks ddp status. * * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates * not passing the skb to ULD, > 0 indicates is the length of data * being ddped.
*/ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb)
{ int rc = -EINVAL; struct ixgbe_fcoe *fcoe; struct ixgbe_fcoe_ddp *ddp; struct fc_frame_header *fh; struct fcoe_crc_eof *crc;
__le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
__le32 ddp_err; int ddp_max;
u32 fctl;
u16 xid;
ddp_max = IXGBE_FCOE_DDP_MAX; /* X550 has different DDP Max limit */ if (adapter->hw.mac.type == ixgbe_mac_X550)
ddp_max = IXGBE_FCOE_DDP_MAX_X550; if (xid >= ddp_max) return -EINVAL;
fcoe = &adapter->fcoe;
ddp = &fcoe->ddp[xid]; if (!ddp->udl) return -EINVAL;
ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
IXGBE_RXDADV_ERR_FCERR); if (ddp_err) return -EINVAL;
switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) { /* return 0 to bypass going to ULD for DDPed data */ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP): /* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
rc = 0; break; /* unmap the sg list when FCPRSP is received */ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
ddp->sgc, DMA_FROM_DEVICE);
ddp->err = (__force u32)ddp_err;
ddp->sgl = NULL;
ddp->sgc = 0;
fallthrough; /* if DDP length is present pass it through to ULD */ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP): /* update length of DDPed data */
ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); if (ddp->len)
rc = ddp->len; break; /* no match will return as an error */ case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH): default: break;
}
/* In target mode, check the last data frame of the sequence. * For DDP in target mode, data is already DDPed but the header * indication of the last data frame ould allow is to tell if we * got all the data and the ULP can send FCP_RSP back, as this is * not a full fcoe frame, we fill the trailer here so it won't be * dropped by the ULP stack.
*/ if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
(fctl & FC_FC_END_SEQ)) {
skb_linearize(skb);
crc = skb_put(skb, sizeof(*crc));
crc->fcoe_eof = FC_EOF_T;
}
return rc;
}
/** * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) * @tx_ring: tx desc ring * @first: first tx_buffer structure containing skb, tx_flags, and protocol * @hdr_len: hdr_len to be returned * * This sets up large send offload for FCoE * * Returns : 0 indicates success, < 0 for error
*/ int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first,
u8 *hdr_len)
{ struct sk_buff *skb = first->skb; struct fc_frame_header *fh;
u32 vlan_macip_lens;
u32 fcoe_sof_eof = 0;
u32 mss_l4len_idx;
u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE;
u8 sof, eof;
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
skb_shinfo(skb)->gso_type); return -EINVAL;
}
/* resets the header to point fcoe/fc */
skb_set_network_header(skb, skb->mac_len);
skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
/* sets up SOF and ORIS */
sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; switch (sof) { case FC_SOF_I2:
fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS; break; case FC_SOF_I3:
fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
IXGBE_ADVTXD_FCOEF_ORIS; break; case FC_SOF_N2: break; case FC_SOF_N3:
fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF; break; default:
dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof); return -EINVAL;
}
/* the first byte of the last dword is EOF */
skb_copy_bits(skb, skb->len - 4, &eof, 1); /* sets up EOF and ORIE */ switch (eof) { case FC_EOF_N:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; break; case FC_EOF_T: /* lso needs ORIE */ if (skb_is_gso(skb))
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
IXGBE_ADVTXD_FCOEF_ORIE; else
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; break; case FC_EOF_NI:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; break; case FC_EOF_A:
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; break; default:
dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof); return -EINVAL;
}
/* sets up PARINC indicating data offset */
fh = (struct fc_frame_header *)skb_transport_header(skb); if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
/* include trailer in headlen as it is replicated per frame */
*hdr_len = sizeof(struct fcoe_crc_eof);
/* hdr_len includes fc_hdr if FCoE LSO is enabled */ if (skb_is_gso(skb)) {
*hdr_len += skb_transport_offset(skb) + sizeof(struct fc_frame_header); /* update gso_segs and bytecount */
first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
skb_shinfo(skb)->gso_size);
first->bytecount += (first->gso_segs - 1) * *hdr_len;
first->tx_flags |= IXGBE_TX_FLAGS_TSO; /* Hardware expects L4T to be RSV for FCoE TSO */
type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV;
}
/* set flag indicating FCOE to ixgbe_tx_map call */
first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC;
/* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
/* leave registers un-configured if FCoE is disabled */ if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) return;
/* Use one or more Rx queues for FCoE by redirection table */
fcreta_size = IXGBE_FCRETA_SIZE; if (adapter->hw.mac.type == ixgbe_mac_X550)
fcreta_size = IXGBE_FCRETA_SIZE_X550;
for (i = 0; i < fcreta_size; i++) { if (adapter->hw.mac.type == ixgbe_mac_X550) { int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
fcoe->indices);
fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
fcoe_q_h = FIELD_PREP(IXGBE_FCRETA_ENTRY_HIGH_MASK,
fcoe_q_h);
}
/* release existing queues and reallocate them */
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);
return 0;
}
/** * ixgbe_fcoe_get_wwn - get world wide name for the node or the port * @netdev : ixgbe adapter * @wwn : the world wide name * @type: the type of world wide name * * Returns the node or port world wide name if both the prefix and the san * mac address are valid, then the wwn is formed based on the NAA-2 for * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). * * Returns : 0 on success
*/ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
{
u16 prefix = 0xffff; struct ixgbe_adapter *adapter = ixgbe_from_netdev(netdev); struct ixgbe_mac_info *mac = &adapter->hw.mac;
switch (type) { case NETDEV_FCOE_WWNN:
prefix = mac->wwnn_prefix; break; case NETDEV_FCOE_WWPN:
prefix = mac->wwpn_prefix; break; default: break;
}
/* Get the PCI-e Device Serial Number Capability */
dsn = pci_get_dsn(adapter->pdev); if (dsn)
snprintf(info->serial_number, sizeof(info->serial_number), "%016llX", dsn); else
snprintf(info->serial_number, sizeof(info->serial_number), "Unknown");
/* Model Description */
snprintf(info->model_description, sizeof(info->model_description), "%s",
ixgbe_default_device_descr);
return 0;
}
/** * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to * @adapter: pointer to the device adapter structure * * Return : TC that FCoE is mapped to
*/
u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
{ #ifdef CONFIG_IXGBE_DCB return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); #else return 0; #endif
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.