struct vf_pf_vfdev_info { #define VFPF_ACQUIRE_CAP_PRE_FP_HSI BIT(0) /* VF pre-FP hsi version */ #define VFPF_ACQUIRE_CAP_100G BIT(1) /* VF can support 100g */ /* A requirement for supporting multi-Tx queues on a single queue-zone, * VF would pass qids as additional information whenever passing queue * references.
*/ #define VFPF_ACQUIRE_CAP_QUEUE_QIDS BIT(2)
/* The VF is using the physical bar. While this is mostly internal * to the VF, might affect the number of CIDs supported assuming * QUEUE_QIDS is set.
*/ #define VFPF_ACQUIRE_CAP_PHYSICAL_BAR BIT(3)
u64 capabilities;
u8 fw_major;
u8 fw_minor;
u8 fw_revision;
u8 fw_engineering;
u32 driver_version;
u16 opaque_fid; /* ME register value */
u8 os_type; /* VFPF_ACQUIRE_OS_* value */
u8 eth_fp_hsi_major;
u8 eth_fp_hsi_minor;
u8 padding[3];
} vfdev_info;
u64 capabilities; #define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0) #define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */ /* There are old PF versions where the PF might mistakenly override the sanity * mechanism [version-based] and allow a VF that can't be supported to pass * the acquisition phase. * To overcome this, PFs now indicate that they're past that point and the new * VFs would fail probe on the older PFs that fail to do so.
*/ #define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
/* PF expects queues to be received with additional qids */ #define PFVF_ACQUIRE_CAP_QUEUE_QIDS BIT(3)
u16 db_size;
u8 indices_per_sb;
u8 os_type;
/* These should match the PF's qed_dev values */
u16 chip_rev;
u8 dev_type;
/* Doorbell bar size configured in HW: log(size) or 0 */
u8 bar_size;
struct pfvf_stats_info stats_info;
u8 port_mac[ETH_ALEN];
/* It's possible PF had to configure an older fastpath HSI * [in case VF is newer than PF]. This is communicated back * to the VF. It can also be used in case of error due to * non-matching versions to shed light in VF about failure.
*/
u8 major_fp_hsi;
u8 minor_fp_hsi;
} pfdev_info;
struct pfvf_start_queue_resp_tlv { struct pfvf_tlv hdr;
u32 offset; /* offset to consumer/producer of queue */
u8 padding[4];
};
/* Extended queue information - additional index for reference inside qzone. * If communicated between VF/PF, each TLV relating to queues should be * extended by one such [or have a future base TLV that already contains info].
*/ struct vfpf_qid_tlv { struct channel_tlv tl;
u8 qid;
u8 padding[3];
};
/* There are only 256 approx bins, and in HSI they're divided into * 32-bit values. As old VFs used to set-bit to the values on its side, * the upper half of the array is never expected to contain any data.
*/
u64 bins[4];
u64 obsolete_bins[4];
};
/* Primary tlv as a header for various extended tlvs for * various functionalities in vport update ramrod.
*/ struct vfpf_vport_update_tlv { struct vfpf_first_tlv first_tlv;
};
enum qed_bulletin_bit { /* Alert the VF that a forced MAC was set by the PF */
MAC_ADDR_FORCED = 0, /* Alert the VF that a forced VLAN was set by the PF */
VLAN_ADDR_FORCED = 2,
/* Indicate that `default_only_untagged' contains actual data */
VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
/* Alert the VF that suggested mac was sent by the PF. * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
*/
VFPF_BULLETIN_MAC_ADDR = 5
};
struct qed_bulletin_content { /* crc of structure to ensure is not in mid-update */
u32 crc;
u32 version;
/* bitmap indicating which fields hold valid values */
u64 valid_bitmap;
/* used for MAC_ADDR or MAC_ADDR_FORCED */
u8 mac[ETH_ALEN];
/* If valid, 1 => only untagged Rx if no vlan is configured */
u8 default_only_untagged;
u8 padding;
/* The following is a 'copy' of qed_mcp_link_state, * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's * possible the structs will increase further along the road we cannot * have it here; Instead we need to have all of its fields.
*/
u8 req_autoneg;
u8 req_autoneg_pause;
u8 req_forced_rx;
u8 req_forced_tx;
u8 padding2[4];
/* Required for iterating over vport-update tlvs. * Will break in case non-sequential vport-update tlvs.
*/
CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
};
/* Default number of CIDs [total of both Rx and Tx] to be requested * by default, and maximum possible number.
*/ #define QED_ETH_VF_DEFAULT_NUM_CIDS (32) #define QED_ETH_VF_MAX_NUM_CIDS (250)
/* This data is held in the qed_hwfn structure for VFs only. */ struct qed_vf_iov { union vfpf_tlvs *vf2pf_request;
dma_addr_t vf2pf_request_phys; union pfvf_tlvs *pf2vf_reply;
dma_addr_t pf2vf_reply_phys;
/* Should be taken whenever the mailbox buffers are accessed */ struct mutex mutex;
u8 *offset;
/* we set aside a copy of the acquire response */ struct pfvf_acquire_resp_tlv acquire_resp;
/* In case PF originates prior to the fp-hsi version comparison, * this has to be propagated as it affects the fastpath.
*/ bool b_pre_fp_hsi;
/* Current day VFs are passing the SBs physical address on vport * start, and as they lack an IGU mapping they need to store the * addresses of previously registered SBs. * Even if we were to change configuration flow, due to backward * compatibility [with older PFs] we'd still need to store these.
*/ struct qed_sb_info *sbs_info[PFVF_MAX_SBS_PER_VF];
/* Determines whether VF utilizes doorbells via limited register * bar or via the doorbell bar.
*/ bool b_doorbell_bar;
};
/** * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue. * Coalesce value '0' will omit the * configuration. * * @p_hwfn: HW device data. * @rx_coal: coalesce value in micro second for rx queue. * @tx_coal: coalesce value in micro second for tx queue. * @p_cid: queue cid. * * Return: Int. *
**/ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 rx_coal,
u16 tx_coal, struct qed_queue_cid *p_cid);
/** * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue. * * @p_hwfn: HW device data. * @p_coal: coalesce value in micro second for VF queues. * @p_cid: queue cid. * * Return: Int.
**/ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
u16 *p_coal, struct qed_queue_cid *p_cid);
#ifdef CONFIG_QED_SRIOV /** * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed. * * @p_hwfn: HW device data. * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise. * * Return: enum _qed_status.
*/ int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
/** * qed_vf_get_link_params(): Get link parameters for VF from qed * * @p_hwfn: HW device data. * @params: the link params structure to be filled for the VF. * * Return: Void.
*/ void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params);
/** * qed_vf_get_link_state(): Get link state for VF from qed. * * @p_hwfn: HW device data. * @link: the link state structure to be filled for the VF * * Return: Void.
*/ void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *link);
/** * qed_vf_get_link_caps(): Get link capabilities for VF from qed. * * @p_hwfn: HW device data. * @p_link_caps: the link capabilities structure to be filled for the VF * * Return: Void.
*/ void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps);
/** * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed * * @p_hwfn: HW device data. * @num_rxqs: allocated RX queues * * Return: Void.
*/ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
/** * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed * * @p_hwfn: HW device data. * @num_txqs: allocated RX queues * * Return: Void.
*/ void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
/** * qed_vf_get_num_cids(): Get number of available connections * [both Rx and Tx] for VF * * @p_hwfn: HW device data. * @num_cids: allocated number of connections * * Return: Void.
*/ void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
/** * qed_vf_get_port_mac(): Get port mac address for VF. * * @p_hwfn: HW device data. * @port_mac: destination location for port mac * * Return: Void.
*/ void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
/** * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated * for VF by qed. * * @p_hwfn: HW device data. * @num_vlan_filters: allocated VLAN filters * * Return: Void.
*/ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
u8 *num_vlan_filters);
/** * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated * for VF by qed * * @p_hwfn: HW device data. * @num_mac_filters: allocated MAC filters * * Return: Void.
*/ void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
/** * qed_vf_check_mac(): Check if VF can set a MAC address * * @p_hwfn: HW device data. * @mac: Mac. * * Return: bool.
*/ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
/** * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF * * @p_hwfn: HW device data. * @p_cid: Only relative fields are relevant * @bd_max_bytes: maximum number of bytes per bd * @bd_chain_phys_addr: physical address of bd chain * @cqe_pbl_addr: physical address of pbl * @cqe_pbl_size: pbl size * @pp_prod: pointer to the producer to be used in fastpath * * Return: Int.
*/ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size, void __iomem **pp_prod);
/** * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the * PF. * * @p_hwfn: HW device data. * @p_cid: CID. * @pbl_addr: PBL address. * @pbl_size: PBL Size. * @pp_doorbell: pointer to address to which to write the doorbell too. * * Return: Int.
*/ int
qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid,
dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell);
/** * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF. * * @p_hwfn: HW device data. * @p_cid: CID. * @cqe_completion: CQE Completion. * * Return: Int.
*/ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, bool cqe_completion);
/** * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF. * * @p_hwfn: HW device data. * @p_cid: CID. * * Return: Int.
*/ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
/** * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given * sb_id. For VFs igu sbs don't have to be contiguous * * @p_hwfn: HW device data. * @sb_id: SB ID. * * Return: INLINE u16
*/
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/** * qed_vf_pf_int_cleanup(): clean the SB of the VF * * @p_hwfn: HW device data. * * Return: enum _qed_status
*/ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
/** * __qed_vf_get_link_params(): return the link params in a given bulletin board * * @p_hwfn: HW device data. * @p_params: pointer to a struct to fill with link params * @p_bulletin: Bulletin. * * Return: Void.
*/ void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *p_params, struct qed_bulletin_content *p_bulletin);
/** * __qed_vf_get_link_state(): return the link state in a given bulletin board * * @p_hwfn: HW device data. * @p_link: pointer to a struct to fill with link state * @p_bulletin: Bulletin. * * Return: Void.
*/ void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *p_link, struct qed_bulletin_content *p_bulletin);
/** * __qed_vf_get_link_caps(): return the link capabilities in a given * bulletin board * * @p_hwfn: HW device data. * @p_link_caps: pointer to a struct to fill with link capabilities * @p_bulletin: Bulletin. * * Return: Void.
*/ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps, struct qed_bulletin_content *p_bulletin);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.