int iavf_status_to_errno(enum iavf_status status); int virtchnl_status_to_errno(enum virtchnl_status_code v_status);
/* VSI state flags shared with common code */ enum iavf_vsi_state_t {
__IAVF_VSI_DOWN, /* This must be last as it determines the size of the BITMAP */
__IAVF_VSI_STATE_SIZE__,
};
/* dummy struct to make common code less painful */ struct iavf_vsi { struct iavf_adapter *back; struct net_device *netdev;
u16 seid;
u16 id;
DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); int base_vector;
u16 qs_handle;
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */ #define IAVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define IAVF_DEFAULT_TXD 512 #define IAVF_DEFAULT_RXD 512 #define IAVF_MAX_TXD 4096 #define IAVF_MIN_TXD 64 #define IAVF_MAX_RXD 4096 #define IAVF_MIN_RXD 64 #define IAVF_REQ_DESCRIPTOR_MULTIPLE 32 #define IAVF_MAX_AQ_BUF_SIZE 4096 #define IAVF_AQ_LEN 32 #define IAVF_AQ_MAX_ERR 20 /* times to try before resetting AQ */
/* MAX_MSIX_Q_VECTORS of these are allocated, * but we only use one per queue-specific vector.
*/ struct iavf_q_vector { struct iavf_adapter *adapter; struct iavf_vsi *vsi; struct napi_struct napi; struct iavf_ring_container rx; struct iavf_ring_container tx;
u32 ring_mask;
u8 itr_countdown; /* when 0 should adjust adaptive ITR */
u8 num_ringpairs; /* total number of ring pairs in vector */
u16 v_idx; /* index in the vsi->q_vector array. */
u16 reg_idx; /* register index of the interrupt */ char name[IFNAMSIZ + 15]; bool arm_wb_state;
};
/* Helper macros to switch between ints/sec and what the register uses. * And yes, it's the same math going both ways. The lowest value * supported by all of the iavf hardware is 8.
*/ #define EITR_INTS_PER_SEC_TO_REG(_eitr) \
((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
enum iavf_vlan_state_t {
IAVF_VLAN_INVALID,
IAVF_VLAN_ADD, /* filter needs to be added */
IAVF_VLAN_IS_NEW, /* filter is new, wait for PF answer */
IAVF_VLAN_ACTIVE, /* filter is accepted by PF */
IAVF_VLAN_DISABLE, /* filter needs to be deleted by PF, then marked INACTIVE */
IAVF_VLAN_INACTIVE, /* filter is inactive, we are in IFF_DOWN */
IAVF_VLAN_REMOVE, /* filter needs to be removed from list */
};
#define IAVF_MAX_TRAFFIC_CLASS 4 /* State of traffic class creation */ enum iavf_tc_state_t {
__IAVF_TC_INVALID, /* no traffic class, default state */
__IAVF_TC_RUNNING, /* traffic classes have been created */
};
/* State of cloud filter */ enum iavf_cloud_filter_state_t {
__IAVF_CF_INVALID, /* cloud filter not added */
__IAVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */
__IAVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */
__IAVF_CF_ACTIVE, /* cloud filter is active */
};
/* Driver state. The order of these is important! */ enum iavf_state_t {
__IAVF_STARTUP, /* driver loaded, probe complete */
__IAVF_REMOVE, /* driver is being unloaded */
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
__IAVF_INIT_EXTENDED_CAPS, /* process extended caps which require aq msg exchange */
__IAVF_INIT_CONFIG_ADAPTER,
__IAVF_INIT_SW, /* got resources, setting up structs */
__IAVF_INIT_FAILED, /* init failed, restarting procedure */
__IAVF_RESETTING, /* in reset */
__IAVF_COMM_FAILED, /* communication with PF failed */ /* Below here, watchdog is running */
__IAVF_DOWN, /* ready, can be opened */
__IAVF_DOWN_PENDING, /* descending, waiting for watchdog */
__IAVF_TESTING, /* in ethtool self-test */
__IAVF_RUNNING, /* opened, working */
};
enum iavf_critical_section_t {
__IAVF_IN_REMOVE_TASK, /* device being removed */
};
/* AQ messages that must be sent after IAVF_FLAG_AQ_GET_CONFIG, in * order to negotiated extended capabilities.
*/ #define IAVF_FLAG_AQ_EXTENDED_CAPS \
(IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS | \
IAVF_FLAG_AQ_GET_SUPPORTED_RXDIDS | \
IAVF_FLAG_AQ_GET_PTP_CAPS)
/* flags for processing extended capability messages during * __IAVF_INIT_EXTENDED_CAPS. Each capability exchange requires * both a SEND and a RECV step, which must be processed in sequence. * * During the __IAVF_INIT_EXTENDED_CAPS state, the driver will * process one flag at a time during each state loop.
*/
u64 extended_caps; #define IAVF_EXTENDED_CAP_SEND_VLAN_V2 BIT_ULL(0) #define IAVF_EXTENDED_CAP_RECV_VLAN_V2 BIT_ULL(1) #define IAVF_EXTENDED_CAP_SEND_RXDID BIT_ULL(2) #define IAVF_EXTENDED_CAP_RECV_RXDID BIT_ULL(3) #define IAVF_EXTENDED_CAP_SEND_PTP BIT_ULL(4) #define IAVF_EXTENDED_CAP_RECV_PTP BIT_ULL(5)
/* Lock to prevent possible clobbering of * current_netdev_promisc_flags
*/
spinlock_t current_netdev_promisc_flags_lock;
netdev_features_t current_netdev_promisc_flags;
/* OS defined structs */ struct net_device *netdev; struct pci_dev *pdev;
struct delayed_work watchdog_task; bool link_up; enum virtchnl_link_speed link_speed; /* This is only populated if the VIRTCHNL_VF_CAP_ADV_LINK_SPEED is set * in vf_res->vf_cap_flags. Use ADV_LINK_SUPPORT macro to determine if * this field is valid. This field should be used going forward and the * enum virtchnl_link_speed above should be considered the legacy way of * storing/communicating link speeds.
*/
u32 link_speed_mbps;
enum virtchnl_ops current_op; /* RSS by the PF should be preferred over RSS via other methods. */ #define RSS_PF(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_RSS_PF) #define RSS_AQ(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_RSS_AQ) #define RSS_REG(_a) (!((_a)->vf_res->vf_cap_flags & \
(VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
VIRTCHNL_VF_OFFLOAD_RSS_PF))) #define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_VLAN) #define VLAN_V2_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_VLAN_V2) #define CRC_OFFLOAD_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_CRC) #define TC_U32_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_TC_U32) #define VLAN_V2_FILTERING_ALLOWED(_a) \
(VLAN_V2_ALLOWED((_a)) && \
((_a)->vlan_v2_caps.filtering.filtering_support.outer || \
(_a)->vlan_v2_caps.filtering.filtering_support.inner)) #define VLAN_FILTERING_ALLOWED(_a) \
(VLAN_ALLOWED((_a)) || VLAN_V2_FILTERING_ALLOWED((_a))) #define ADV_LINK_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_CAP_ADV_LINK_SPEED) #define FDIR_FLTR_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_FDIR_PF) #define ADV_RSS_SUPPORT(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF) #define QOS_ALLOWED(_a) ((_a)->vf_res->vf_cap_flags & \
VIRTCHNL_VF_OFFLOAD_QOS) #define IAVF_RXDID_ALLOWED(a) \
((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) #define IAVF_PTP_ALLOWED(a) \
((a)->vf_res->vf_cap_flags & VIRTCHNL_VF_CAP_PTP) struct virtchnl_vf_resource *vf_res; /* incl. all VSIs */ struct virtchnl_vsi_resource *vsi_res; /* our LAN VSI */ struct virtchnl_version_info pf_version; #define PF_IS_V11(_a) (((_a)->pf_version.major == 1) && \
((_a)->pf_version.minor == 1)) struct virtchnl_vlan_caps vlan_v2_caps;
u64 supp_rxdids; struct iavf_ptp ptp;
u16 msg_enable; struct iavf_eth_stats current_stats; struct virtchnl_qos_cap_list *qos_caps; struct iavf_vsi vsi;
u32 aq_wait_count; /* RSS stuff */ enum virtchnl_rss_algorithm hfunc;
u64 rss_hashcfg;
u16 rss_key_size;
u16 rss_lut_size;
u8 *rss_key;
u8 *rss_lut; /* ADQ related members */ struct iavf_channel_config ch_config;
u8 num_tc; struct list_head cloud_filter_list; /* lock to protect access to the cloud filter list */
spinlock_t cloud_filter_list_lock;
u16 num_cloud_filters; /* snapshot of "num_active_queues" before setup_tc for qdisc add * is invoked. This information is useful during qdisc del flow, * to restore correct number of queues
*/ int orig_num_active_queues;
#define IAVF_MAX_FDIR_FILTERS 128 /* max allowed Flow Director filters */
u16 fdir_active_fltr;
u16 raw_fdir_active_fltr; struct list_head fdir_list_head;
spinlock_t fdir_fltr_lock; /* protect the Flow Director filter list */
struct list_head adv_rss_list_head;
spinlock_t adv_rss_lock; /* protect the RSS management list */
};
/* Must be called with fdir_fltr_lock lock held */ staticinlinebool iavf_fdir_max_reached(struct iavf_adapter *adapter)
{ return adapter->fdir_active_fltr + adapter->raw_fdir_active_fltr >=
IAVF_MAX_FDIR_FILTERS;
}
/* needed by iavf_ethtool.c */ externchar iavf_driver_name[];
staticinlineconstchar *iavf_state_str(enum iavf_state_t state)
{ switch (state) { case __IAVF_STARTUP: return"__IAVF_STARTUP"; case __IAVF_REMOVE: return"__IAVF_REMOVE"; case __IAVF_INIT_VERSION_CHECK: return"__IAVF_INIT_VERSION_CHECK"; case __IAVF_INIT_GET_RESOURCES: return"__IAVF_INIT_GET_RESOURCES"; case __IAVF_INIT_EXTENDED_CAPS: return"__IAVF_INIT_EXTENDED_CAPS"; case __IAVF_INIT_CONFIG_ADAPTER: return"__IAVF_INIT_CONFIG_ADAPTER"; case __IAVF_INIT_SW: return"__IAVF_INIT_SW"; case __IAVF_INIT_FAILED: return"__IAVF_INIT_FAILED"; case __IAVF_RESETTING: return"__IAVF_RESETTING"; case __IAVF_COMM_FAILED: return"__IAVF_COMM_FAILED"; case __IAVF_DOWN: return"__IAVF_DOWN"; case __IAVF_DOWN_PENDING: return"__IAVF_DOWN_PENDING"; case __IAVF_TESTING: return"__IAVF_TESTING"; case __IAVF_RUNNING: return"__IAVF_RUNNING"; default: return"__IAVF_UNKNOWN_STATE";
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.