#define BE_VENDOR_ID 0x19a2 #define EMULEX_VENDOR_ID 0x10df #define BE_DEVICE_ID1 0x211 #define BE_DEVICE_ID2 0x221 #define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */ #define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */ #define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */ #define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */ #define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */ #define OC_DEVICE_ID6 0x728 /* Device id for VF in SkyHawk */ #define OC_SUBSYS_DEVICE_ID1 0xE602 #define OC_SUBSYS_DEVICE_ID2 0xE642 #define OC_SUBSYS_DEVICE_ID3 0xE612 #define OC_SUBSYS_DEVICE_ID4 0xE652
/* Number of bytes of an RX frame that are copied to skb->data */ #define BE_HDR_LEN ((u16) 64) /* allocate extra space to allow tunneling decapsulation without head reallocation */ #define BE_RX_SKB_ALLOC_SIZE 256
#define EVNT_Q_LEN 1024 #define TX_Q_LEN 2048 #define TX_CQ_LEN 1024 #define RX_Q_LEN 1024 /* Does not support any other value */ #define RX_CQ_LEN 1024 #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ #define MCC_CQ_LEN 256
#define BE2_MAX_RSS_QS 4 #define BE3_MAX_RSS_QS 16 #define BE3_MAX_TX_QS 16 #define BE3_MAX_EVT_QS 16 #define BE3_SRIOV_MAX_EVT_QS 8 #define SH_VF_MAX_NIC_EQS 3 /* Skyhawk VFs can have a max of 4 EQs * and at least 1 is granted to either * SURF/DPDK
*/
#define MAX_ROCE_EQS 5 #define MAX_MSIX_VECTORS 32 #define MIN_MSIX_VECTORS 1 #define MAX_RX_POST NAPI_POLL_WEIGHT /* Frags posted at a time */ #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) #define MAX_NUM_POST_ERX_DB 255u
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */ #define FW_VER_LEN 32 #define CNTL_SERIAL_NUM_WORDS 8 /* Controller serial number words */ #define CNTL_SERIAL_NUM_WORD_SZ (sizeof(u16)) /* Byte-sz of serial num word */
struct be_queue_info {
u32 len;
u32 entry_size; /* Size of an element in the queue */
u32 tail, head;
atomic_t used; /* Number of valid elements in the queue */
u32 id; struct be_dma_mem dma_mem; bool created;
};
struct be_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
u32 min_eqd; /* in usecs */
u32 max_eqd; /* in usecs */
u32 prev_eqd; /* in usecs */
u32 et_eqd; /* configured val when aic is off */
ulong jiffies;
u64 rx_pkts_prev; /* Used to calculate RX pps */
u64 tx_reqs_prev; /* Used to calculate TX pps */
};
/* Structure to hold some data of interest obtained from a TX CQE */ struct be_tx_compl_info {
u8 status; /* Completion status */
u16 end_index; /* Completed TXQ Index */
};
struct be_tx_obj {
u32 db_offset; struct be_tx_compl_info txcp; struct be_queue_info q; struct be_queue_info cq; /* Remember the skbs that were transmitted */ struct sk_buff *sent_skb_list[TX_Q_LEN]; struct be_tx_stats stats;
u16 pend_wrb_cnt; /* Number of WRBs yet to be given to HW */
u16 last_req_wrb_cnt; /* wrb cnt of the last req in the Q */
u16 last_req_hdr; /* index of the last req's hdr-wrb */
} ____cacheline_aligned_in_smp;
/* Struct to remember the pages posted for rx frags */ struct be_rx_page_info { struct page *page; /* set to page-addr for last frag of the page & frag-addr otherwise */
DEFINE_DMA_UNMAP_ADDR(bus);
u16 page_offset; bool last_frag; /* last frag of the page */
};
struct be_rx_stats {
u64 rx_bytes;
u64 rx_pkts;
u64 rx_vxlan_offload_pkts;
u32 rx_drops_no_skbs; /* skb allocation errors */
u32 rx_drops_no_frags; /* HW has no fetched frags */
u32 rx_post_fail; /* page post alloc failures */
u32 rx_compl;
u32 rx_mcast_pkts;
u32 rx_compl_err; /* completions with err set */ struct u64_stats_sync sync;
};
/* Macros to read/write the 'features' word of be_wrb_params structure.
*/ #define BE_WRB_F_BIT(name) BE_WRB_F_##name##_BIT #define BE_WRB_F_MASK(name) BIT_MASK(BE_WRB_F_##name##_BIT)
/* The structure below provides a HW-agnostic abstraction of WRB params * retrieved from a TX skb. This is in turn passed to chip specific routines * during transmit, to set the corresponding params in the WRB.
*/ struct be_wrb_params {
u32 features; /* Feature bits */
u16 vlan_tag; /* VLAN tag */
u16 lso_mss; /* MSS for LSO */
};
/* UE-detection-duration in BEx/Skyhawk: * All PFs must wait for this duration after they detect UE before reading * SLIPORT_SEMAPHORE register. At the end of this duration, the Firmware * guarantees that the SLIPORT_SEMAPHORE register is updated to indicate * if the UE is recoverable.
*/ #define ERR_RECOVERY_UE_DETECT_DURATION BE_SEC
/* Initial idle time (in msec) to elapse after driver load, * before UE recovery is allowed.
*/ #define ERR_IDLE_HR 24 #define ERR_RECOVERY_IDLE_TIME (ERR_IDLE_HR * BE_HOUR)
/* Time interval (in msec) after which UE recovery can be repeated */ #define ERR_INTERVAL_HR 72 #define ERR_RECOVERY_INTERVAL (ERR_INTERVAL_HR * BE_HOUR)
struct be_error_recovery { union {
u8 recovery_retries; /* used for Lancer */
u8 recovery_state; /* used for BEx and Skyhawk */
};
/* BEx/Skyhawk error recovery variables */ bool recovery_supported;
u16 ue_to_reset_time; /* Time after UE, to soft reset * the chip - PF0 only
*/
u16 ue_to_poll_time; /* Time after UE, to Restart Polling * of SLIPORT_SEMAPHORE reg
*/
u16 last_err_code; unsignedlong probe_time; unsignedlong last_recovery_time;
/* Common to both Lancer & BEx/SH error recovery */
u32 resched_delay; struct delayed_work err_detection_work;
};
u8 __iomem *csr; /* CSR BAR used only for BE2/3 */
u8 __iomem *db; /* Door Bell */
u8 __iomem *pcicfg; /* On SH,BEx only. Shadow of PCI config space */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */ struct be_dma_mem mbox_mem; /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */ struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
struct be_resources pool_res; /* resources available for the port */ struct be_resources res; /* resources available for the func */
u16 num_vfs; /* Number of VFs provisioned by PF */
u8 pf_num; /* Numbering used by FW, starts at 0 */
u8 vf_num; /* Numbering used by FW, starts at 1 */
u8 virtfn; struct be_vf_cfg *vf_cfg; bool be3_native;
u32 sli_family;
u8 hba_port_num;
u16 pvid;
__be16 vxlan_port; /* offloaded vxlan port num */ struct phy_info phy;
u8 wol_cap; bool wol_en;
u16 asic_rev;
u16 qnq_vid;
u32 msg_enable; int be_get_temp_freq; struct be_hwmon hwmon_info; struct rss_info rss_info; /* Filters for packets that need to be sent to BMC */
u32 bmc_filt_mask;
u32 fat_dump_len;
u16 serial_num[CNTL_SERIAL_NUM_WORDS];
u8 phy_state; /* state of sfp optics (functional, faulted, etc.,) */
u8 dev_mac[ETH_ALEN];
u32 priv_flags; /* ethtool get/set_priv_flags() */ struct be_error_recovery error_recovery;
};
/* Used for defered FW config cmds. Add fields to this struct as reqd */ struct be_cmd_work { struct work_struct work; struct be_adapter *adapter;
};
#define for_all_vfs(adapter, vf_cfg, i) \ for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
i++, vf_cfg++)
#define ON 1 #define OFF 0
#define be_max_vlans(adapter) (adapter->res.max_vlans) #define be_max_uc(adapter) (adapter->res.max_uc_mac) #define be_max_mc(adapter) (adapter->res.max_mcast_mac) #define be_max_vfs(adapter) (adapter->pool_res.max_vfs) #define be_max_rss(adapter) (adapter->res.max_rss_qs) #define be_max_txqs(adapter) (adapter->res.max_tx_qs) #define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs) #define be_max_rxqs(adapter) (adapter->res.max_rx_qs) /* Max number of EQs available for the function (NIC + RoCE (if enabled)) */ #define be_max_func_eqs(adapter) (adapter->res.max_evt_qs) /* Max number of EQs available avaialble only for NIC */ #define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs) #define be_if_cap_flags(adapter) (adapter->res.if_cap_flags) #define be_max_pf_pool_rss_tables(adapter) \
(adapter->pool_res.max_rss_tables) /* Max irqs avaialble for NIC */ #define be_max_irqs(adapter) \
(min_t(u16, be_max_nic_eqs(adapter), num_online_cpus()))
/* Max irqs *needed* for RX queues */ staticinline u16 be_max_rx_irqs(struct be_adapter *adapter)
{ /* If no RSS, need atleast one irq for def-RXQ */
u16 num = max_t(u16, be_max_rss(adapter), 1);
return min_t(u16, num, be_max_irqs(adapter));
}
/* Max irqs *needed* for TX queues */ staticinline u16 be_max_tx_irqs(struct be_adapter *adapter)
{ return min_t(u16, be_max_txqs(adapter), be_max_irqs(adapter));
}
/* Max irqs *needed* for combined queues */ staticinline u16 be_max_qp_irqs(struct be_adapter *adapter)
{ return min(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
}
/* Max irqs *needed* for RX and TX queues together */ staticinline u16 be_max_any_irqs(struct be_adapter *adapter)
{ return max(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
}
/* Is BE in pvid_tagging mode */ #define be_pvid_tagging_enabled(adapter) (adapter->pvid)
/* Is BE in QNQ multi-channel mode */ #define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE)
/* Returns number of pages spanned by the data starting at the given addr */ #define PAGES_4K_SPANNED(_address, size) \
((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \
(size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K))
/* Returns bit offset within a DWORD of a bitfield */ #define AMAP_BIT_OFFSET(_struct, field) \
(((size_t)&(((_struct *)0)->field))%32)
/* Returns the bit mask of the field that is NOT shifted into location. */ staticinline u32 amap_mask(u32 bitsize)
{ return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.