/* * This file is part of the Chelsio T4 Ethernet driver for Linux. * * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
/* Suspend an Ethernet Tx queue with fewer available descriptors than this. * This is the same as calc_tx_descs() for a TSO packet with * nr_frags == MAX_SKB_FRAGS.
*/ #define ETHTXQ_STOP_THRES \
(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
struct cxgb4_pcir_data {
__le32 signature; /* Signature. The string "PCIR" */
__le16 vendor_id; /* Vendor Identification */
__le16 device_id; /* Device Identification */
__u8 vital_product[2]; /* Pointer to Vital Product Data */
__u8 length[2]; /* PCIR Data Structure Length */
__u8 revision; /* PCIR Data Structure Revision */
__u8 class_code[3]; /* Class Code */
__u8 image_length[2]; /* Image Length. Multiple of 512B */
__u8 code_revision[2]; /* Revision Level of Code/Data */
__u8 code_type;
__u8 indicator;
__u8 reserved[2];
};
/* BIOS boot headers */ struct cxgb4_pci_exp_rom_header {
__le16 signature; /* ROM Signature. Should be 0xaa55 */
__u8 reserved[22]; /* Reserved per processor Architecture data */
__le16 pcir_offset; /* Offset to PCI Data Structure */
};
/* Legacy PCI Expansion ROM Header */ struct legacy_pci_rom_hdr {
__u8 signature[2]; /* ROM Signature. Should be 0xaa55 */
__u8 size512; /* Current Image Size in units of 512 bytes */
__u8 initentry_point[4];
__u8 cksum; /* Checksum computed on the entire Image */
__u8 reserved[16]; /* Reserved */
__le16 pcir_offset; /* Offset to PCI Data Struture */
};
struct port_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_frames; /* all good frames */
u64 tx_bcast_frames; /* all broadcast frames */
u64 tx_mcast_frames; /* all multicast frames */
u64 tx_ucast_frames; /* all unicast frames */
u64 tx_error_frames; /* all error frames */
u64 tx_frames_64; /* # of Tx frames in a particular range */
u64 tx_frames_65_127;
u64 tx_frames_128_255;
u64 tx_frames_256_511;
u64 tx_frames_512_1023;
u64 tx_frames_1024_1518;
u64 tx_frames_1519_max;
u64 rx_octets; /* total # of octets in good frames */
u64 rx_frames; /* all good frames */
u64 rx_bcast_frames; /* all broadcast frames */
u64 rx_mcast_frames; /* all multicast frames */
u64 rx_ucast_frames; /* all unicast frames */
u64 rx_too_long; /* # of frames exceeding MTU */
u64 rx_jabber; /* # of jabber frames */
u64 rx_fcs_err; /* # of received frames with bad FCS */
u64 rx_len_err; /* # of received frames with length error */
u64 rx_symbol_err; /* symbol errors */
u64 rx_runt; /* # of short frames */
u64 rx_frames_64; /* # of Rx frames in a particular range */
u64 rx_frames_65_127;
u64 rx_frames_128_255;
u64 rx_frames_256_511;
u64 rx_frames_512_1023;
u64 rx_frames_1024_1518;
u64 rx_frames_1519_max;
u64 rx_pause; /* # of received pause frames */
u64 rx_ppp0; /* # of received PPP prio 0 frames */
u64 rx_ppp1; /* # of received PPP prio 1 frames */
u64 rx_ppp2; /* # of received PPP prio 2 frames */
u64 rx_ppp3; /* # of received PPP prio 3 frames */
u64 rx_ppp4; /* # of received PPP prio 4 frames */
u64 rx_ppp5; /* # of received PPP prio 5 frames */
u64 rx_ppp6; /* # of received PPP prio 6 frames */
u64 rx_ppp7; /* # of received PPP prio 7 frames */
u64 rx_ovflow0; /* drops due to buffer-group 0 overflows */
u64 rx_ovflow1; /* drops due to buffer-group 1 overflows */
u64 rx_ovflow2; /* drops due to buffer-group 2 overflows */
u64 rx_ovflow3; /* drops due to buffer-group 3 overflows */
u64 rx_trunc0; /* buffer-group 0 truncated packets */
u64 rx_trunc1; /* buffer-group 1 truncated packets */
u64 rx_trunc2; /* buffer-group 2 truncated packets */
u64 rx_trunc3; /* buffer-group 3 truncated packets */
};
struct tp_params { unsignedint tre; /* log2 of core clocks per TP tick */ unsignedint la_mask; /* what events are recorded by TP LA */ unsignedshort tx_modq_map; /* TX modulation scheduler queue to */ /* channel map */
/* cached TP_OUT_CONFIG compressed error vector * and passing outer header info for encapsulated packets.
*/ int rx_pkt_encap;
/* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a * subset of the set of fields which may be present in the Compressed * Filter Tuple portion of filters and TCP TCB connections. The * fields which are present are controlled by the TP_VLAN_PRI_MAP. * Since a variable number of fields may or may not be present, their * shifted field positions within the Compressed Filter Tuple may * vary, or not even be present if the field isn't selected in * TP_VLAN_PRI_MAP. Since some of these fields are needed in various * places we store their offsets here, or a -1 if the field isn't * present.
*/ int fcoe_shift; int port_shift; int vnic_shift; int vlan_shift; int tos_shift; int protocol_shift; int ethertype_shift; int macmatch_shift; int matchtype_shift; int frag_shift;
unsignedint ofldq_wr_cred; bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
unsignedint nsched_cls; /* number of traffic classes */ unsignedint max_ordird_qp; /* Max read depth per RDMA QP */ unsignedint max_ird_adapter; /* Max read depth per adapter */ bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
u8 fw_caps_support; /* 32-bit Port Capabilities */ bool filter2_wr_support; /* FW support for FILTER2_WR */ unsignedint viid_smt_extn_support:1; /* FW returns vin and smt index */
/* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is * used by the Port
*/
u8 mps_bg_map[MAX_NPORTS]; /* MPS Buffer Group Map */ bool write_w_imm_support; /* FW supports WRITE_WITH_IMMEDIATE */ bool write_cmpl_support; /* FW supports WRITE_CMPL */
};
/* State needed to monitor the forward progress of SGE Ingress DMA activities * and possible hangs.
*/ struct sge_idma_monitor_state { unsignedint idma_1s_thresh; /* 1s threshold in Core Clock ticks */ unsignedint idma_stalled[2]; /* synthesized stalled timers in HZ */ unsignedint idma_state[2]; /* IDMA Hang detect state */ unsignedint idma_qid[2]; /* IDMA Hung Ingress Queue ID */ unsignedint idma_warn[2]; /* time to warning in HZ */
};
/* Firmware Mailbox Command/Reply log. All values are in Host-Endian format. * The access and execute times are signed in order to accommodate negative * error returns.
*/ struct mbox_cmd {
u64 cmd[MBOX_LEN / 8]; /* a Firmware Mailbox Command/Reply */
u64 timestamp; /* OS-dependent timestamp */
u32 seqno; /* sequence number */
s16 access; /* time (ms) to access mailbox */
s16 execute; /* time (ms) to execute */
};
struct mbox_cmd_log { unsignedint size; /* number of entries in the log */ unsignedint cursor; /* next position in the log to write */
u32 seqno; /* next sequence number */ /* variable length mailbox command log starts here */
};
/* Given a pointer to a Firmware Mailbox Command Log and a log entry index, * return a pointer to the specified entry.
*/ staticinlinestruct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log, unsignedint entry_idx)
{ return &((struct mbox_cmd *)&(log)[1])[entry_idx];
}
fw_port_cap32_t speed_caps; /* speed(s) user has requested */ unsignedint speed; /* actual link speed (Mb/s) */
enum cc_pause requested_fc; /* flow control user has requested */ enum cc_pause fc; /* actual link flow control */ enum cc_pause advertised_fc; /* actual advertised flow control */
enum cc_fec requested_fec; /* Forward Error Correction: */ enum cc_fec fec; /* requested and actual in use */
unsignedchar autoneg; /* autonegotiating? */
unsignedchar link_ok; /* link up? */ unsignedchar link_down_rc; /* link down reason */
struct sge_fl { /* SGE free-buffer queue state */ unsignedint avail; /* # of available Rx buffers */ unsignedint pend_cred; /* new buffers since last FL DB ring */ unsignedint cidx; /* consumer index */ unsignedint pidx; /* producer index */ unsignedlong alloc_failed; /* # of times buffer allocation failed */ unsignedlong large_alloc_failed; unsignedlong mapping_err; /* # of RX Buffer DMA Mapping failures */ unsignedlong low; /* # of times momentarily starving */ unsignedlong starving; /* RO fields */ unsignedint cntxt_id; /* SGE context id for the free list */ unsignedint size; /* capacity of free list */ struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
__be64 *desc; /* address of HW Rx descriptor ring */
dma_addr_t addr; /* bus address of HW ring start */ void __iomem *bar2_addr; /* address of BAR2 Queue registers */ unsignedint bar2_qid; /* Queue ID for BAR2 Queue registers */
};
/* A packet gather list */ struct pkt_gl {
u64 sgetstamp; /* SGE Time Stamp for Ingress Packet */ struct page_frag frags[MAX_SKB_FRAGS]; void *va; /* virtual address of first byte */ unsignedint nfrags; /* # of fragments */ unsignedint tot_len; /* total length of fragments */
};
typedefint (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, conststruct pkt_gl *gl); typedefvoid (*rspq_flush_handler_t)(struct sge_rspq *q); /* LRO related declarations for ULD */ struct t4_lro_mgr { #define MAX_LRO_SESSIONS 64
u8 lro_session_cnt; /* # of sessions to aggregate */ unsignedlong lro_pkts; /* # of LRO super packets */ unsignedlong lro_merged; /* # of wire packets merged by LRO */ struct sk_buff_head lroq; /* list of aggregated sessions */
};
struct sge_rspq { /* state for an SGE response queue */ struct napi_struct napi; const __be64 *cur_desc; /* current descriptor in queue */ unsignedint cidx; /* consumer index */
u8 gen; /* current generation bit */
u8 intr_params; /* interrupt holdoff parameters */
u8 next_intr_params; /* holdoff params for next interrupt */
u8 adaptive_rx;
u8 pktcnt_idx; /* interrupt packet threshold */
u8 uld; /* ULD handling this queue */
u8 idx; /* queue index within its group */ int offset; /* offset into current Rx buffer */
u16 cntxt_id; /* SGE context id for the response q */
u16 abs_id; /* absolute SGE id for the response q */
__be64 *desc; /* address of HW response ring */
dma_addr_t phys_addr; /* physical address of the ring */ void __iomem *bar2_addr; /* address of BAR2 Queue registers */ unsignedint bar2_qid; /* Queue ID for BAR2 Queue registers */ unsignedint iqe_len; /* entry size */ unsignedint size; /* capacity of response queue */ struct adapter *adap; struct net_device *netdev; /* associated net device */
rspq_handler_t handler;
rspq_flush_handler_t flush_handler; struct t4_lro_mgr lro_mgr;
};
struct sge_eth_stats { /* Ethernet queue statistics */ unsignedlong pkts; /* # of ethernet packets */ unsignedlong lro_pkts; /* # of LRO super packets */ unsignedlong lro_merged; /* # of wire packets merged by LRO */ unsignedlong rx_cso; /* # of Rx checksum offloads */ unsignedlong vlan_ex; /* # of Rx VLAN extractions */ unsignedlong rx_drops; /* # of packets dropped due to no mem */ unsignedlong bad_rx_pkts; /* # of packets with err_vec!=0 */
};
struct sge_txq { unsignedint in_use; /* # of in-use Tx descriptors */ unsignedint q_type; /* Q type Eth/Ctrl/Ofld */ unsignedint size; /* # of descriptors */ unsignedint cidx; /* SW consumer index */ unsignedint pidx; /* producer index */ unsignedlong stops; /* # of times q has been stopped */ unsignedlong restarts; /* # of queue restarts */ unsignedint cntxt_id; /* SGE context id for the Tx q */ struct tx_desc *desc; /* address of HW Tx descriptor ring */ struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */ struct sge_qstat *stat; /* queue status entry */
dma_addr_t phys_addr; /* physical address of the ring */
spinlock_t db_lock; int db_disabled; unsignedshort db_pidx; unsignedshort db_pidx_inc; void __iomem *bar2_addr; /* address of BAR2 Queue registers */ unsignedint bar2_qid; /* Queue ID for BAR2 Queue registers */
};
struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ struct sge_txq q; struct netdev_queue *txq; /* associated netdev TX queue */ #ifdef CONFIG_CHELSIO_T4_DCB
u8 dcb_prio; /* DCB Priority bound to queue */ #endif
u8 dbqt; /* SGE Doorbell Queue Timer in use */ unsignedint dbqtimerix; /* SGE Doorbell Queue Timer Index */ unsignedlong tso; /* # of TSO requests */ unsignedlong uso; /* # of USO requests */ unsignedlong tx_cso; /* # of Tx checksum offloads */ unsignedlong vlan_ins; /* # of Tx VLAN insertions */ unsignedlong mapping_err; /* # of I/O MMU packet mapping errors */
} ____cacheline_aligned_in_smp;
struct sge_uld_txq { /* state for an SGE offload Tx queue */ struct sge_txq q; struct adapter *adap; struct sk_buff_head sendq; /* list of backpressured packets */ struct tasklet_struct qresume_tsk; /* restarts the queue */ bool service_ofldq_running; /* service_ofldq() is processing sendq */
u8 full; /* the Tx ring is full */ unsignedlong mapping_err; /* # of I/O MMU packet mapping errors */
} ____cacheline_aligned_in_smp;
struct sge_ctrl_txq { /* state for an SGE control Tx queue */ struct sge_txq q; struct adapter *adap; struct sk_buff_head sendq; /* list of backpressured packets */ struct tasklet_struct qresume_tsk; /* restarts the queue */
u8 full; /* the Tx ring is full */
} ____cacheline_aligned_in_smp;
struct sge_uld_rxq_info { char name[IFNAMSIZ]; /* name of ULD driver */ struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */
u16 *rspq_id; /* response queue id's of rxq */
u16 nrxq; /* # of ingress uld queues */
u16 nciq; /* # of completion queues */
u8 uld; /* uld type */
};
struct sge_uld_txq_info { struct sge_uld_txq *uldtxq; /* Txq's for ULD */
atomic_t users; /* num users */
u16 ntxq; /* # of egress uld queues */
};
/* struct to maintain ULD list to reallocate ULD resources on hotplug */ struct cxgb4_uld_list { struct cxgb4_uld_info uld_info; struct list_head list_node; enum cxgb4_uld uld_type;
};
enum sge_eosw_state {
CXGB4_EO_STATE_CLOSED = 0, /* Not ready to accept traffic */
CXGB4_EO_STATE_FLOWC_OPEN_SEND, /* Send FLOWC open request */
CXGB4_EO_STATE_FLOWC_OPEN_REPLY, /* Waiting for FLOWC open reply */
CXGB4_EO_STATE_ACTIVE, /* Ready to accept traffic */
CXGB4_EO_STATE_FLOWC_CLOSE_SEND, /* Send FLOWC close request */
CXGB4_EO_STATE_FLOWC_CLOSE_REPLY, /* Waiting for FLOWC close reply */
};
struct sge_eosw_txq {
spinlock_t lock; /* Per queue lock to synchronize completions */ enum sge_eosw_state state; /* Current ETHOFLD State */ struct tx_sw_desc *desc; /* Descriptor ring to hold packets */
u32 ndesc; /* Number of descriptors */
u32 pidx; /* Current Producer Index */
u32 last_pidx; /* Last successfully transmitted Producer Index */
u32 cidx; /* Current Consumer Index */
u32 last_cidx; /* Last successfully reclaimed Consumer Index */
u32 flowc_idx; /* Descriptor containing a FLOWC request */
u32 inuse; /* Number of packets held in ring */
u32 cred; /* Current available credits */
u32 ncompl; /* # of completions posted */
u32 last_compl; /* # of credits consumed since last completion req */
u32 eotid; /* Index into EOTID table in software */
u32 hwtid; /* Hardware EOTID index */
u32 hwqid; /* Underlying hardware queue index */ struct net_device *netdev; /* Pointer to netdevice */ struct tasklet_struct qresume_tsk; /* Restarts the queue */ struct completion completion; /* completion for FLOWC rendezvous */
};
struct sge_eohw_txq {
spinlock_t lock; /* Per queue lock */ struct sge_txq q; /* HW Txq */ struct adapter *adap; /* Backpointer to adapter */ unsignedlong tso; /* # of TSO requests */ unsignedlong uso; /* # of USO requests */ unsignedlong tx_cso; /* # of Tx checksum offloads */ unsignedlong vlan_ins; /* # of Tx VLAN insertions */ unsignedlong mapping_err; /* # of I/O MMU packet mapping errors */
};
int fwevtq_msix_idx; /* Index to firmware event queue MSI-X info */ int nd_msix_idx; /* Index to non-data interrupts MSI-X info */
};
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++) #define for_each_ofldtxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
struct l2t_data;
#ifdef CONFIG_PCI_IOV
/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial * Configuration initialization for T5 only has SR-IOV functionality enabled * on PF0-3 in order to simplify everything.
*/ #define NUM_OF_PF_WITH_SRIOV 4
struct cxgb4_ethtool_filter_info {
u32 *loc_array; /* Array holding the actual TIDs set to filters */ unsignedlong *bmap; /* Bitmap for managing filters in use */
u32 in_use; /* # of filters in use */
};
struct cxgb4_ethtool_filter {
u32 nentries; /* Adapter wide number of supported filters */ struct cxgb4_ethtool_filter_info *port; /* Per port entry */
};
/* lock for mailbox cmd list */
spinlock_t mbox_lock; struct mbox_list mlist;
/* support for mailbox command/reply logging */ #define T4_OS_LOG_MBOX_CMDS 256 struct mbox_cmd_log *mbox_log;
struct mutex uld_mutex;
struct dentry *debugfs_root; bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */ bool trace_rss; /* 1 implies that different RSS flit per filter is * used per filter else if 0 default RSS flit is * used for all 4 filters.
*/
/* Support for "sched_queue" command to allow one or more NIC TX Queues * to be bound to a TX Scheduling Class.
*/ struct ch_sched_queue {
s8 queue; /* queue index */
s8 class; /* class index */
};
/* Support for "sched_flowc" command to allow one or more FLOWC * to be bound to a TX Scheduling Class.
*/ struct ch_sched_flowc {
s32 tid; /* TID to bind */
s8 class; /* class index */
};
/* Filter matching rules. These consist of a set of ingress packet field * (value, mask) tuples. The associated ingress packet field matches the * tuple when ((field & mask) == value). (Thus a wildcard "don't care" field * rule can be constructed by specifying a tuple of (0, 0).) A filter rule * matches an ingress packet when all of the individual field * matching rules are true. * * Partial field masks are always valid, however, while it may be easy to * understand their meanings for some fields (e.g. IP address to match a * subnet), for others making sensible partial masks is less intuitive (e.g. * MPS match type) ... * * Most of the following data structures are modeled on T4 capabilities. * Drivers for earlier chips use the subsets which make sense for those chips. * We really need to come up with a hardware-independent mechanism to * represent hardware filter capabilities ...
*/ struct ch_filter_tuple { /* Compressed header matching field rules. The TP_VLAN_PRI_MAP * register selects which of these fields will participate in the * filter match rules -- up to a maximum of 36 bits. Because * TP_VLAN_PRI_MAP is a global register, all filters must use the same * set of fields.
*/
uint32_t ethtype:ETHTYPE_BITWIDTH; /* Ethernet type */
uint32_t frag:FRAG_BITWIDTH; /* IP fragmentation header */
uint32_t ivlan_vld:1; /* inner VLAN valid */
uint32_t ovlan_vld:1; /* outer VLAN valid */
uint32_t pfvf_vld:1; /* PF/VF valid */
uint32_t encap_vld:1; /* Encapsulation valid */
uint32_t macidx:MACIDX_BITWIDTH; /* exact match MAC index */
uint32_t fcoe:FCOE_BITWIDTH; /* FCoE packet */
uint32_t iport:IPORT_BITWIDTH; /* ingress port */
uint32_t matchtype:MATCHTYPE_BITWIDTH; /* MPS match type */
uint32_t proto:PROTO_BITWIDTH; /* protocol type */
uint32_t tos:TOS_BITWIDTH; /* TOS/Traffic Type */
uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */
uint32_t vf:VF_BITWIDTH; /* PCI-E VF ID */
uint32_t ivlan:IVLAN_BITWIDTH; /* inner VLAN */
uint32_t ovlan:OVLAN_BITWIDTH; /* outer VLAN */
uint32_t vni:ENCAP_VNI_BITWIDTH; /* VNI of tunnel */
/* Uncompressed header matching field rules. These are always * available for field rules.
*/
uint8_t lip[16]; /* local IP address (IPv4 in [3:0]) */
uint8_t fip[16]; /* foreign IP address (IPv4 in [3:0]) */
uint16_t lport; /* local port */
uint16_t fport; /* foreign port */
};
/* A filter ioctl command.
*/ struct ch_filter_specification { /* Administrative fields for filter.
*/
uint32_t hitcnts:1; /* count filter hits in TCB */
uint32_t prio:1; /* filter has priority over active/server */
/* Fundamental filter typing. This is the one element of filter * matching that doesn't exist as a (value, mask) tuple.
*/
uint32_t type:1; /* 0 => IPv4, 1 => IPv6 */
u32 hash:1; /* 0 => wild-card, 1 => exact-match */
/* Packet dispatch information. Ingress packets which match the * filter rules will be dropped, passed to the host or switched back * out as egress packets.
*/
uint32_t action:2; /* drop, pass, switch */
uint32_t rpttid:1; /* report TID in RSS hash field */
uint32_t dirsteer:1; /* 0 => RSS, 1 => steer to iq */
uint32_t iq:10; /* ingress queue */
uint32_t maskhash:1; /* dirsteer=0: store RSS hash in TCB */
uint32_t dirsteerhash:1;/* dirsteer=1: 0 => TCB contains RSS hash */ /* 1 => TCB contains IQ ID */
/* Switch proxy/rewrite fields. An ingress packet which matches a * filter with "switch" set will be looped back out as an egress * packet -- potentially with some Ethernet header rewriting.
*/
uint32_t eport:2; /* egress port to switch packet out */
uint32_t newdmac:1; /* rewrite destination MAC address */
uint32_t newsmac:1; /* rewrite source MAC address */
uint32_t newvlan:2; /* rewrite VLAN Tag */
uint32_t nat_mode:3; /* specify NAT operation mode */
uint8_t dmac[ETH_ALEN]; /* new destination MAC address */
uint8_t smac[ETH_ALEN]; /* new source MAC address */
uint16_t vlan; /* VLAN Tag to insert */
u8 nat_lip[16]; /* local IP to use after NAT'ing */
u8 nat_fip[16]; /* foreign IP to use after NAT'ing */
u16 nat_lport; /* local port to use after NAT'ing */
u16 nat_fport; /* foreign port to use after NAT'ing */
enum {
NAT_MODE_NONE = 0, /* No NAT performed */
NAT_MODE_DIP, /* NAT on Dst IP */
NAT_MODE_DIP_DP, /* NAT on Dst IP, Dst Port */
NAT_MODE_DIP_DP_SIP, /* NAT on Dst IP, Dst Port and Src IP */
NAT_MODE_DIP_DP_SP, /* NAT on Dst IP, Dst Port and Src Port */
NAT_MODE_SIP_SP, /* NAT on Src IP and Src Port */
NAT_MODE_DIP_SIP_SP, /* NAT on Dst IP, Src IP and Src Port */
NAT_MODE_ALL /* NAT on entire 4-tuple */
};
#define CXGB4_FILTER_TYPE_MAX 2
/* Host shadow copy of ingress filter entry. This is in host native format * and doesn't match the ordering or bit order, etc. of the hardware of the * firmware command. The use of bit-field structure elements is purely to * remind ourselves of the field size limitations and save memory in the case * where the filter table is large.
*/ struct filter_entry { /* Administrative fields for filter. */
u32 valid:1; /* filter allocated and valid */
u32 locked:1; /* filter is administratively locked */
u32 pending:1; /* filter action is pending firmware reply */ struct filter_ctx *ctx; /* Caller's completion hook */ struct l2t_entry *l2t; /* Layer Two Table entry for dmac */ struct smt_entry *smt; /* Source Mac Table entry for smac */ struct net_device *dev; /* Associated net device */
u32 tid; /* This will store the actual tid */
/* The filter itself. Most of this is a straight copy of information * provided by the extended ioctl(). Some fields are translated to * internal forms -- for instance the Ingress Queue ID passed in from * the ioctl() is translated into the Absolute Ingress Queue ID.
*/ struct ch_filter_specification fs;
};
/** * t4_set_hw_addr - store a port's MAC address in SW * @adapter: the adapter * @port_idx: the port index * @hw_addr: the Ethernet address * * Store the Ethernet address of the given port in SW. Called by the common * code when it retrieves a port's Ethernet address from EEPROM.
*/ staticinlinevoid t4_set_hw_addr(struct adapter *adapter, int port_idx,
u8 hw_addr[])
{
eth_hw_addr_set(adapter->port[port_idx], hw_addr);
ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
}
/** * netdev2pinfo - return the port_info structure associated with a net_device * @dev: the netdev * * Return the struct port_info associated with a net_device
*/ staticinlinestruct port_info *netdev2pinfo(conststruct net_device *dev)
{ return netdev_priv(dev);
}
/** * adap2pinfo - return the port_info of a port * @adap: the adapter * @idx: the port index * * Return the port_info structure for the port of the given index.
*/ staticinlinestruct port_info *adap2pinfo(struct adapter *adap, int idx)
{ return netdev_priv(adap->port[idx]);
}
/** * netdev2adap - return the adapter structure associated with a net_device * @dev: the netdev * * Return the struct adapter associated with a net_device
*/ staticinlinestruct adapter *netdev2adap(conststruct net_device *dev)
{ return netdev2pinfo(dev)->adapter;
}
/* Return a version number to identify the type of adapter. The scheme is: * - bits 0..9: chip version * - bits 10..15: chip revision * - bits 16..23: register dump version
*/ staticinlineunsignedint mk_adap_vers(struct adapter *ap)
{ return CHELSIO_CHIP_VERSION(ap->params.chip) |
(CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
}
/* Return a queue's interrupt hold-off time in us. 0 means no timer. */ staticinlineunsignedint qtimer_val(conststruct adapter *adap, conststruct sge_rspq *q)
{ unsignedint idx = q->intr_params >> 1;
staticinlineint is_bypass_device(int device)
{ /* this should be set based upon device capabilities */ switch (device) { case 0x440b: case 0x440c: return 1; default: return 0;
}
}
staticinlineint is_10gbt_device(int device)
{ /* this should be set based upon device capabilities */ switch (device) { case 0x4409: case 0x4486: return 1;
int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, constvoid *cmd, int size, void *rpl, bool sleep_ok, int timeout); int t4_wr_mbox_meat(struct adapter *adap, int mbox, constvoid *cmd, int size, void *rpl, bool sleep_ok);
staticinlineint t4_wr_mbox_timeout(struct adapter *adap, int mbox, constvoid *cmd, int size, void *rpl, int timeout)
{ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
timeout);
}
/** * hash_mac_addr - return the hash value of a MAC address * @addr: the 48-bit Ethernet MAC address * * Hashes a MAC address according to the hash function used by HW inexact * (hash) address matching.
*/ staticinlineint hash_mac_addr(const u8 *addr)
{
u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
a ^= b;
a ^= (a >> 12);
a ^= (a >> 6); return a & 0x3f;
}
/** * t4_is_inserted_mod_type - is a plugged in Firmware Module Type * @fw_mod_type: the Firmware Mofule Type * * Return whether the Firmware Module Type represents a real Transceiver * Module/Cable Module Type which has been inserted.
*/ staticinlinebool t4_is_inserted_mod_type(unsignedint fw_mod_type)
{ return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED &&
fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN &&
fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.