Quellcodebibliothek Statistik Leitseite products/Sources/formale Sprachen/C/Linux/drivers/net/ethernet/hisilicon/hns3/hns3pf/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 348 kB image not shown  

Quelle  hclge_main.c   Sprache: C

 
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2016-2017 Hisilicon Limited.

#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/if_vlan.h>
#include <linux/crash_dump.h>

#include <net/rtnetlink.h>

#include "hclge_cmd.h"
#include "hclge_dcb.h"
#include "hclge_main.h"
#include "hclge_mbx.h"
#include "hclge_mdio.h"
#include "hclge_regs.h"
#include "hclge_tm.h"
#include "hclge_err.h"
#include "hnae3.h"
#include "hclge_devlink.h"
#include "hclge_comm_cmd.h"

#include "hclge_trace.h"

#define HCLGE_NAME   "hclge"

#define HCLGE_BUF_SIZE_UNIT 256U
#define HCLGE_BUF_MUL_BY 2
#define HCLGE_BUF_DIV_BY 2
#define NEED_RESERVE_TC_NUM 2
#define BUF_MAX_PERCENT  100
#define BUF_RESERVE_PERCENT 90

#define HCLGE_RESET_MAX_FAIL_CNT 5
#define HCLGE_RESET_SYNC_TIME  100
#define HCLGE_PF_RESET_SYNC_TIME 20
#define HCLGE_PF_RESET_SYNC_CNT  1500

#define HCLGE_LINK_STATUS_MS 10

static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
         unsigned long *addr);
static int hclge_set_default_loopback(struct hclge_dev *hdev);

static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
static void hclge_update_fec_stats(struct hclge_dev *hdev);
static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
          int wait_cnt);
static int hclge_update_port_info(struct hclge_dev *hdev);

static struct hnae3_ae_algo ae_algo;

static struct workqueue_struct *hclge_wq;

static const struct pci_device_id ae_algo_pci_tbl[] = {
 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
 /* required last entry */
 {0, }
};

MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);

static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
 "External Loopback test",
 "App Loopback test",
 "Serdes serial Loopback test",
 "Serdes parallel Loopback test",
 "Phy Loopback test"
};

static const struct hclge_comm_stats_str g_mac_stats_string[] = {
 {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
 {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
 {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
 {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
 {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
 {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
 {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
 {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
 {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
 {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
 {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
 {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
 {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
 {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
 {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
 {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
 {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
 {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
 {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
 {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
 {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
 {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
 {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
 {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
 {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
 {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
 {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
 {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
 {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
 {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
 {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
 {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
 {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
 {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
 {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
 {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
 {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
 {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
 {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
 {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
 {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
 {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
 {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
 {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
 {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
 {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
 {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
 {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
 {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
 {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
 {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
 {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
 {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
 {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
 {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
 {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
 {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
 {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
 {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
 {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
 {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
 {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
 {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
 {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
 {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
 {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
 {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
 {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
 {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
 {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
 {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
 {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
 {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
 {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
 {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
 {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
 {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
 {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
 {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
 {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
 {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
 {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
 {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
 {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
 {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
 {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
 {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
 {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
 {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
 {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},

 {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
 {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
 {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
 {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
 {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
 {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
 {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
 {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
 {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
 {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
 {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
 {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
  HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
};

static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
 {
  .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
  .ethter_type = cpu_to_le16(ETH_P_LLDP),
  .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
  .i_port_bitmap = 0x1,
 },
};

static const struct key_info meta_data_key_info[] = {
 { PACKET_TYPE_ID, 6 },
 { IP_FRAGEMENT, 1 },
 { ROCE_TYPE, 1 },
 { NEXT_KEY, 5 },
 { VLAN_NUMBER, 2 },
 { SRC_VPORT, 12 },
 { DST_VPORT, 12 },
 { TUNNEL_PACKET, 1 },
};

static const struct key_info tuple_key_info[] = {
 { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
 { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
 { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
 { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
 { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
 { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
 { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
 { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
 { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
 { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
 { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
 { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
 { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
 { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
 { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
 { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
 { INNER_DST_MAC, 48, KEY_OPT_MAC,
   offsetof(struct hclge_fd_rule, tuples.dst_mac),
   offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
 { INNER_SRC_MAC, 48, KEY_OPT_MAC,
   offsetof(struct hclge_fd_rule, tuples.src_mac),
   offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
 { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
   offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
   offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
 { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
 { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
   offsetof(struct hclge_fd_rule, tuples.ether_proto),
   offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
 { INNER_L2_RSV, 16, KEY_OPT_LE16,
   offsetof(struct hclge_fd_rule, tuples.l2_user_def),
   offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
 { INNER_IP_TOS, 8, KEY_OPT_U8,
   offsetof(struct hclge_fd_rule, tuples.ip_tos),
   offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
 { INNER_IP_PROTO, 8, KEY_OPT_U8,
   offsetof(struct hclge_fd_rule, tuples.ip_proto),
   offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
 { INNER_SRC_IP, 32, KEY_OPT_IP,
   offsetof(struct hclge_fd_rule, tuples.src_ip),
   offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
 { INNER_DST_IP, 32, KEY_OPT_IP,
   offsetof(struct hclge_fd_rule, tuples.dst_ip),
   offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
 { INNER_L3_RSV, 16, KEY_OPT_LE16,
   offsetof(struct hclge_fd_rule, tuples.l3_user_def),
   offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
 { INNER_SRC_PORT, 16, KEY_OPT_LE16,
   offsetof(struct hclge_fd_rule, tuples.src_port),
   offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
 { INNER_DST_PORT, 16, KEY_OPT_LE16,
   offsetof(struct hclge_fd_rule, tuples.dst_port),
   offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
 { INNER_L4_RSV, 32, KEY_OPT_LE32,
   offsetof(struct hclge_fd_rule, tuples.l4_user_def),
   offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
};

/**
 * hclge_cmd_send - send command to command queue
 * @hw: pointer to the hw struct
 * @desc: prefilled descriptor for describing the command
 * @num : the number of descriptors to be sent
 *
 * This is the main send command for command queue, it
 * sends the queue, cleans the queue, etc
 **/

int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{
 return hclge_comm_cmd_send(&hw->hw, desc, num);
}

static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
     int num, bool is_special)
{
 int i;

 trace_hclge_pf_cmd_send(hw, desc, 0, num);

 if (!is_special) {
  for (i = 1; i < num; i++)
   trace_hclge_pf_cmd_send(hw, &desc[i], i, num);
 } else {
  for (i = 1; i < num; i++)
   trace_hclge_pf_special_cmd_send(hw, (__le32 *)&desc[i],
       i, num);
 }
}

static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
    int num, bool is_special)
{
 int i;

 if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
  return;

 trace_hclge_pf_cmd_get(hw, desc, 0, num);

 if (!is_special) {
  for (i = 1; i < num; i++)
   trace_hclge_pf_cmd_get(hw, &desc[i], i, num);
 } else {
  for (i = 1; i < num; i++)
   trace_hclge_pf_special_cmd_get(hw, (__le32 *)&desc[i],
             i, num);
 }
}

static const struct hclge_comm_cmq_ops hclge_cmq_ops = {
 .trace_cmd_send = hclge_trace_cmd_send,
 .trace_cmd_get = hclge_trace_cmd_get,
};

static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21

 u64 *data = (u64 *)(&hdev->mac_stats);
 struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
 __le64 *desc_data;
 u32 data_size;
 int ret;
 u32 i;

 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
 if (ret) {
  dev_err(&hdev->pdev->dev,
   "Get MAC pkt stats fail, status = %d.\n", ret);

  return ret;
 }

 /* The first desc has a 64-bit header, so data size need to minus 1 */
 data_size = sizeof(desc) / (sizeof(u64)) - 1;

 desc_data = (__le64 *)(&desc[0].data[0]);
 for (i = 0; i < data_size; i++) {
  /* data memory is continuous becase only the first desc has a
 * header in this command
 */

  *data += le64_to_cpu(*desc_data);
  data++;
  desc_data++;
 }

 return 0;
}

static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
{
#define HCLGE_REG_NUM_PER_DESC  4

 u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
 u64 *data = (u64 *)(&hdev->mac_stats);
 struct hclge_desc *desc;
 __le64 *desc_data;
 u32 data_size;
 u32 desc_num;
 int ret;
 u32 i;

 /* The first desc has a 64-bit header, so need to consider it */
 desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;

 /* This may be called inside atomic sections,
 * so GFP_ATOMIC is more suitable here
 */

 desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
 if (!desc)
  return -ENOMEM;

 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
 ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
 if (ret) {
  kfree(desc);
  return ret;
 }

 data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);

 desc_data = (__le64 *)(&desc[0].data[0]);
 for (i = 0; i < data_size; i++) {
  /* data memory is continuous becase only the first desc has a
 * header in this command
 */

  *data += le64_to_cpu(*desc_data);
  data++;
  desc_data++;
 }

 kfree(desc);

 return 0;
}

static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
{
 struct hclge_desc desc;
 int ret;

 /* Driver needs total register number of both valid registers and
 * reserved registers, but the old firmware only returns number
 * of valid registers in device V2. To be compatible with these
 * devices, driver uses a fixed value.
 */

 if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
  *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
  return 0;
 }

 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
 if (ret) {
  dev_err(&hdev->pdev->dev,
   "failed to query mac statistic reg number, ret = %d\n",
   ret);
  return ret;
 }

 *reg_num = le32_to_cpu(desc.data[0]);
 if (*reg_num == 0) {
  dev_err(&hdev->pdev->dev,
   "mac statistic reg number is invalid!\n");
  return -ENODATA;
 }

 return 0;
}

int hclge_mac_update_stats(struct hclge_dev *hdev)
{
 /* The firmware supports the new statistics acquisition method */
 if (hdev->ae_dev->dev_specs.mac_stats_num)
  return hclge_mac_update_stats_complete(hdev);
 else
  return hclge_mac_update_stats_defective(hdev);
}

static int hclge_comm_get_count(struct hclge_dev *hdev,
    const struct hclge_comm_stats_str strs[],
    u32 size)
{
 int count = 0;
 u32 i;

 for (i = 0; i < size; i++)
  if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
   count++;

 return count;
}

static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
     const struct hclge_comm_stats_str strs[],
     int size, u64 *data)
{
 u64 *buf = data;
 int i;

 for (i = 0; i < size; i++) {
  if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
   continue;

  *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
  buf++;
 }

 return buf;
}

static void hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
       const struct hclge_comm_stats_str strs[],
       int size, u8 **data)
{
 int i;

 if (stringset != ETH_SS_STATS)
  return;

 for (i = 0; i < size; i++) {
  if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
   continue;

  ethtool_puts(data, strs[i].desc);
 }
}

static void hclge_update_stats_for_all(struct hclge_dev *hdev)
{
 struct hnae3_handle *handle;
 int status;

 handle = &hdev->vport[0].nic;
 if (handle->client) {
  status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
  if (status) {
   dev_err(&hdev->pdev->dev,
    "Update TQPS stats fail, status = %d.\n",
    status);
  }
 }

 hclge_update_fec_stats(hdev);

 status = hclge_mac_update_stats(hdev);
 if (status)
  dev_err(&hdev->pdev->dev,
   "Update MAC stats fail, status = %d.\n", status);
}

static void hclge_update_stats(struct hnae3_handle *handle)
{
 struct hclge_vport *vport = hclge_get_vport(handle);
 struct hclge_dev *hdev = vport->back;
 int status;

 if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
  return;

 status = hclge_mac_update_stats(hdev);
 if (status)
  dev_err(&hdev->pdev->dev,
   "Update MAC stats fail, status = %d.\n",
   status);

 status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
 if (status)
  dev_err(&hdev->pdev->dev,
   "Update TQPS stats fail, status = %d.\n",
   status);

 clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
}

static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
{
#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
  HNAE3_SUPPORT_PHY_LOOPBACK | \
  HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
  HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
  HNAE3_SUPPORT_EXTERNAL_LOOPBACK)

 struct hclge_vport *vport = hclge_get_vport(handle);
 struct hclge_dev *hdev = vport->back;
 int count = 0;

 /* Loopback test support rules:
 * mac: only GE mode support
 * serdes: all mac mode will support include GE/XGE/LGE/CGE
 * phy: only support when phy device exist on board
 */

 if (stringset == ETH_SS_TEST) {
  /* clear loopback bit flags at first */
  handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
  if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
      hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
      hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
      hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
   count += 1;
   handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
  }

  if (hdev->ae_dev->dev_specs.hilink_version !=
      HCLGE_HILINK_H60) {
   count += 1;
   handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
  }

  count += 1;
  handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
  count += 1;
  handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;

  if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
       hdev->hw.mac.phydev->drv->set_loopback) ||
      hnae3_dev_phy_imp_supported(hdev)) {
   count += 1;
   handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
  }
 } else if (stringset == ETH_SS_STATS) {
  count = hclge_comm_get_count(hdev, g_mac_stats_string,
          ARRAY_SIZE(g_mac_stats_string)) +
   hclge_comm_tqps_get_sset_count(handle);
 }

 return count;
}

static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
         u8 **data)
{
 struct hclge_vport *vport = hclge_get_vport(handle);
 struct hclge_dev *hdev = vport->back;
 const char *str;
 int size;

 if (stringset == ETH_SS_STATS) {
  size = ARRAY_SIZE(g_mac_stats_string);
  hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
           size, data);
  hclge_comm_tqps_get_strings(handle, data);
 } else if (stringset == ETH_SS_TEST) {
  if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
   str = hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL];
   ethtool_puts(data, str);
  }
  if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
   str = hns3_nic_test_strs[HNAE3_LOOP_APP];
   ethtool_puts(data, str);
  }
  if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
   str = hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES];
   ethtool_puts(data, str);
  }
  if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
   str = hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES];
   ethtool_puts(data, str);
  }
  if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
   str = hns3_nic_test_strs[HNAE3_LOOP_PHY];
   ethtool_puts(data, str);
  }
 }
}

static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
{
 struct hclge_vport *vport = hclge_get_vport(handle);
 struct hclge_dev *hdev = vport->back;
 u64 *p;

 p = hclge_comm_get_stats(hdev, g_mac_stats_string,
     ARRAY_SIZE(g_mac_stats_string), data);
 p = hclge_comm_tqps_get_stats(handle, p);
}

static void hclge_get_mac_stat(struct hnae3_handle *handle,
          struct hns3_mac_stats *mac_stats)
{
 struct hclge_vport *vport = hclge_get_vport(handle);
 struct hclge_dev *hdev = vport->back;

 hclge_update_stats(handle);

 mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
 mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
}

static int hclge_parse_func_status(struct hclge_dev *hdev,
       struct hclge_func_status_cmd *status)
{
#define HCLGE_MAC_ID_MASK 0xF

 if (!(status->pf_state & HCLGE_PF_STATE_DONE))
  return -EINVAL;

 /* Set the pf to main pf */
 if (status->pf_state & HCLGE_PF_STATE_MAIN)
  hdev->flag |= HCLGE_FLAG_MAIN;
 else
  hdev->flag &= ~HCLGE_FLAG_MAIN;

 hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
 return 0;
}

static int hclge_query_function_status(struct hclge_dev *hdev)
{
#define HCLGE_QUERY_MAX_CNT 5

 struct hclge_func_status_cmd *req;
 struct hclge_desc desc;
 int timeout = 0;
 int ret;

 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
 req = (struct hclge_func_status_cmd *)desc.data;

 do {
  ret = hclge_cmd_send(&hdev->hw, &desc, 1);
  if (ret) {
   dev_err(&hdev->pdev->dev,
    "query function status failed %d.\n", ret);
   return ret;
  }

  /* Check pf reset is done */
  if (req->pf_state)
   break;
  usleep_range(1000, 2000);
 } while (timeout++ < HCLGE_QUERY_MAX_CNT);

 return hclge_parse_func_status(hdev, req);
}

static int hclge_query_pf_resource(struct hclge_dev *hdev)
{
 struct hclge_pf_res_cmd *req;
 struct hclge_desc desc;
 int ret;

 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
 if (ret) {
  dev_err(&hdev->pdev->dev,
   "query pf resource failed %d.\n", ret);
  return ret;
 }

 req = (struct hclge_pf_res_cmd *)desc.data;
 hdev->num_tqps = le16_to_cpu(req->tqp_num) +
    le16_to_cpu(req->ext_tqp_num);
 hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;

 if (req->tx_buf_size)
  hdev->tx_buf_size =
   le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
 else
  hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;

 hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);

 if (req->dv_buf_size)
  hdev->dv_buf_size =
   le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
 else
  hdev->dv_buf_size = HCLGE_DEFAULT_DV;

 hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);

 hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
 if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
  dev_err(&hdev->pdev->dev,
   "only %u msi resources available, not enough for pf(min:2).\n",
   hdev->num_nic_msi);
  return -EINVAL;
 }

 if (hnae3_dev_roce_supported(hdev)) {
  hdev->num_roce_msi =
   le16_to_cpu(req->pf_intr_vector_number_roce);

  /* PF should have NIC vectors and Roce vectors,
 * NIC vectors are queued before Roce vectors.
 */

  hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
 } else {
  hdev->num_msi = hdev->num_nic_msi;
 }

 return 0;
}

static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
{
 switch (speed_cmd) {
 case HCLGE_FW_MAC_SPEED_10M:
  *speed = HCLGE_MAC_SPEED_10M;
  break;
 case HCLGE_FW_MAC_SPEED_100M:
  *speed = HCLGE_MAC_SPEED_100M;
  break;
 case HCLGE_FW_MAC_SPEED_1G:
  *speed = HCLGE_MAC_SPEED_1G;
  break;
 case HCLGE_FW_MAC_SPEED_10G:
  *speed = HCLGE_MAC_SPEED_10G;
  break;
 case HCLGE_FW_MAC_SPEED_25G:
  *speed = HCLGE_MAC_SPEED_25G;
  break;
 case HCLGE_FW_MAC_SPEED_40G:
  *speed = HCLGE_MAC_SPEED_40G;
  break;
 case HCLGE_FW_MAC_SPEED_50G:
  *speed = HCLGE_MAC_SPEED_50G;
  break;
 case HCLGE_FW_MAC_SPEED_100G:
  *speed = HCLGE_MAC_SPEED_100G;
  break;
 case HCLGE_FW_MAC_SPEED_200G:
  *speed = HCLGE_MAC_SPEED_200G;
  break;
 default:
  return -EINVAL;
 }

 return 0;
}

static const struct hclge_speed_bit_map speed_bit_map[] = {
 {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
 {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
 {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
 {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
 {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
 {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
 {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BITS},
 {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BITS},
 {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BITS},
};

static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
{
 u16 i;

 for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
  if (speed == speed_bit_map[i].speed) {
   *speed_bit = speed_bit_map[i].speed_bit;
   return 0;
  }
 }

 return -EINVAL;
}

static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
{
 struct hclge_vport *vport = hclge_get_vport(handle);
 struct hclge_dev *hdev = vport->back;
 u32 speed_ability = hdev->hw.mac.speed_ability;
 u32 speed_bit = 0;
 int ret;

 ret = hclge_get_speed_bit(speed, &speed_bit);
 if (ret)
  return ret;

 if (speed_bit & speed_ability)
  return 0;

 return -EINVAL;
}

static void hclge_update_fec_support(struct hclge_mac *mac)
{
 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
 linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);

 if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
  linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
     mac->supported);
 if (mac->fec_ability & BIT(HNAE3_FEC_RS))
  linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
     mac->supported);
 if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
  linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
     mac->supported);
 if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
  linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
     mac->supported);
}

static const struct hclge_link_mode_bmap hclge_sr_link_mode_bmap[] = {
 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseSR_Full_BIT},
 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseSR_Full_BIT},
 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT},
 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT},
 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseSR_Full_BIT},
 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT},
 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT},
 {HCLGE_SUPPORT_200G_R4_EXT_BIT,
  ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT},
};

static const struct hclge_link_mode_bmap hclge_lr_link_mode_bmap[] = {
 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseLR_Full_BIT},
 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT},
 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT},
 {HCLGE_SUPPORT_100G_R4_BIT,
  ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT},
 {HCLGE_SUPPORT_100G_R2_BIT,
  ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT},
 {HCLGE_SUPPORT_200G_R4_EXT_BIT,
  ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
 {HCLGE_SUPPORT_200G_R4_BIT,
  ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT},
};

static const struct hclge_link_mode_bmap hclge_cr_link_mode_bmap[] = {
 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseCR_Full_BIT},
 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseCR_Full_BIT},
 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT},
 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT},
 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseCR_Full_BIT},
 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT},
 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT},
 {HCLGE_SUPPORT_200G_R4_EXT_BIT,
  ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT},
};

static const struct hclge_link_mode_bmap hclge_kr_link_mode_bmap[] = {
 {HCLGE_SUPPORT_1G_BIT, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT},
 {HCLGE_SUPPORT_10G_BIT, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT},
 {HCLGE_SUPPORT_25G_BIT, ETHTOOL_LINK_MODE_25000baseKR_Full_BIT},
 {HCLGE_SUPPORT_40G_BIT, ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT},
 {HCLGE_SUPPORT_50G_R2_BIT, ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT},
 {HCLGE_SUPPORT_50G_R1_BIT, ETHTOOL_LINK_MODE_50000baseKR_Full_BIT},
 {HCLGE_SUPPORT_100G_R4_BIT, ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT},
 {HCLGE_SUPPORT_100G_R2_BIT, ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT},
 {HCLGE_SUPPORT_200G_R4_EXT_BIT,
  ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
 {HCLGE_SUPPORT_200G_R4_BIT, ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT},
};

static void hclge_convert_setting_sr(u16 speed_ability,
         unsigned long *link_mode)
{
 int i;

 for (i = 0; i < ARRAY_SIZE(hclge_sr_link_mode_bmap); i++) {
  if (speed_ability & hclge_sr_link_mode_bmap[i].support_bit)
   linkmode_set_bit(hclge_sr_link_mode_bmap[i].link_mode,
      link_mode);
 }
}

static void hclge_convert_setting_lr(u16 speed_ability,
         unsigned long *link_mode)
{
 int i;

 for (i = 0; i < ARRAY_SIZE(hclge_lr_link_mode_bmap); i++) {
  if (speed_ability & hclge_lr_link_mode_bmap[i].support_bit)
   linkmode_set_bit(hclge_lr_link_mode_bmap[i].link_mode,
      link_mode);
 }
}

static void hclge_convert_setting_cr(u16 speed_ability,
         unsigned long *link_mode)
{
 int i;

 for (i = 0; i < ARRAY_SIZE(hclge_cr_link_mode_bmap); i++) {
  if (speed_ability & hclge_cr_link_mode_bmap[i].support_bit)
   linkmode_set_bit(hclge_cr_link_mode_bmap[i].link_mode,
      link_mode);
 }
}

static void hclge_convert_setting_kr(u16 speed_ability,
         unsigned long *link_mode)
{
 int i;

 for (i = 0; i < ARRAY_SIZE(hclge_kr_link_mode_bmap); i++) {
  if (speed_ability & hclge_kr_link_mode_bmap[i].support_bit)
   linkmode_set_bit(hclge_kr_link_mode_bmap[i].link_mode,
      link_mode);
 }
}

static void hclge_convert_setting_fec(struct hclge_mac *mac)
{
 /* If firmware has reported fec_ability, don't need to convert by speed */
 if (mac->fec_ability)
  goto out;

 switch (mac->speed) {
 case HCLGE_MAC_SPEED_10G:
 case HCLGE_MAC_SPEED_40G:
  mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
       BIT(HNAE3_FEC_NONE);
  break;
 case HCLGE_MAC_SPEED_25G:
 case HCLGE_MAC_SPEED_50G:
  mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
       BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
  break;
 case HCLGE_MAC_SPEED_100G:
  mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
       BIT(HNAE3_FEC_NONE);
  break;
 case HCLGE_MAC_SPEED_200G:
  mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
       BIT(HNAE3_FEC_LLRS);
  break;
 default:
  mac->fec_ability = 0;
  break;
 }

out:
 hclge_update_fec_support(mac);
}

static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
     u16 speed_ability)
{
 struct hclge_mac *mac = &hdev->hw.mac;

 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
  linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
     mac->supported);

 hclge_convert_setting_sr(speed_ability, mac->supported);
 hclge_convert_setting_lr(speed_ability, mac->supported);
 hclge_convert_setting_cr(speed_ability, mac->supported);
 if (hnae3_dev_fec_supported(hdev))
  hclge_convert_setting_fec(mac);

 if (hnae3_dev_pause_supported(hdev))
  linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);

 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
}

static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
         u16 speed_ability)
{
 struct hclge_mac *mac = &hdev->hw.mac;

 hclge_convert_setting_kr(speed_ability, mac->supported);
 if (hnae3_dev_fec_supported(hdev))
  hclge_convert_setting_fec(mac);

 if (hnae3_dev_pause_supported(hdev))
  linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);

 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
}

static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
      u16 speed_ability)
{
 unsigned long *supported = hdev->hw.mac.supported;

 /* default to support all speed for GE port */
 if (!speed_ability)
  speed_ability = HCLGE_SUPPORT_GE;

 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
  linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
     supported);

 if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
  linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
     supported);
  linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
     supported);
 }

 if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
  linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
  linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
 }

 if (hnae3_dev_pause_supported(hdev)) {
  linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
  linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
 }

 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
}

static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
{
 u8 media_type = hdev->hw.mac.media_type;

 if (media_type == HNAE3_MEDIA_TYPE_FIBER)
  hclge_parse_fiber_link_mode(hdev, speed_ability);
 else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
  hclge_parse_copper_link_mode(hdev, speed_ability);
 else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
  hclge_parse_backplane_link_mode(hdev, speed_ability);
}

static u32 hclge_get_max_speed(u16 speed_ability)
{
 if (speed_ability & HCLGE_SUPPORT_200G_BITS)
  return HCLGE_MAC_SPEED_200G;

 if (speed_ability & HCLGE_SUPPORT_100G_BITS)
  return HCLGE_MAC_SPEED_100G;

 if (speed_ability & HCLGE_SUPPORT_50G_BITS)
  return HCLGE_MAC_SPEED_50G;

 if (speed_ability & HCLGE_SUPPORT_40G_BIT)
  return HCLGE_MAC_SPEED_40G;

 if (speed_ability & HCLGE_SUPPORT_25G_BIT)
  return HCLGE_MAC_SPEED_25G;

 if (speed_ability & HCLGE_SUPPORT_10G_BIT)
  return HCLGE_MAC_SPEED_10G;

 if (speed_ability & HCLGE_SUPPORT_1G_BIT)
  return HCLGE_MAC_SPEED_1G;

 if (speed_ability & HCLGE_SUPPORT_100M_BIT)
  return HCLGE_MAC_SPEED_100M;

 if (speed_ability & HCLGE_SUPPORT_10M_BIT)
  return HCLGE_MAC_SPEED_10M;

 return HCLGE_MAC_SPEED_1G;
}

static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
#define HCLGE_TX_SPARE_SIZE_UNIT  4096
#define SPEED_ABILITY_EXT_SHIFT   8

 struct hclge_cfg_param_cmd *req;
 u64 mac_addr_tmp_high;
 u16 speed_ability_ext;
 u64 mac_addr_tmp;
 unsigned int i;

 req = (struct hclge_cfg_param_cmd *)desc[0].data;

 /* get the configuration */
 cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
          HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
 cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
         HCLGE_CFG_TQP_DESC_N_M,
         HCLGE_CFG_TQP_DESC_N_S);

 cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
     HCLGE_CFG_PHY_ADDR_M,
     HCLGE_CFG_PHY_ADDR_S);
 cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
       HCLGE_CFG_MEDIA_TP_M,
       HCLGE_CFG_MEDIA_TP_S);
 cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
       HCLGE_CFG_RX_BUF_LEN_M,
       HCLGE_CFG_RX_BUF_LEN_S);
 /* get mac_address */
 mac_addr_tmp = __le32_to_cpu(req->param[2]);
 mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
         HCLGE_CFG_MAC_ADDR_H_M,
         HCLGE_CFG_MAC_ADDR_H_S);

 mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;

 cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
          HCLGE_CFG_DEFAULT_SPEED_M,
          HCLGE_CFG_DEFAULT_SPEED_S);
 cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
            HCLGE_CFG_RSS_SIZE_M,
            HCLGE_CFG_RSS_SIZE_S);

 for (i = 0; i < ETH_ALEN; i++)
  cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;

 req = (struct hclge_cfg_param_cmd *)desc[1].data;
 cfg->numa_node_map = __le32_to_cpu(req->param[0]);

 cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
          HCLGE_CFG_SPEED_ABILITY_M,
          HCLGE_CFG_SPEED_ABILITY_S);
 speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
         HCLGE_CFG_SPEED_ABILITY_EXT_M,
         HCLGE_CFG_SPEED_ABILITY_EXT_S);
 cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;

 cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
            HCLGE_CFG_VLAN_FLTR_CAP_M,
            HCLGE_CFG_VLAN_FLTR_CAP_S);

 cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
      HCLGE_CFG_UMV_TBL_SPACE_M,
      HCLGE_CFG_UMV_TBL_SPACE_S);

 cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
            HCLGE_CFG_PF_RSS_SIZE_M,
            HCLGE_CFG_PF_RSS_SIZE_S);

 /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
 * power of 2, instead of reading out directly. This would
 * be more flexible for future changes and expansions.
 * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
 * it does not make sense if PF's field is 0. In this case, PF and VF
 * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
 */

 cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
          1U << cfg->pf_rss_size_max :
          cfg->vf_rss_size_max;

 /* The unit of the tx spare buffer size queried from configuration
 * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
 * needed here.
 */

 cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
       HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
       HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
 cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
}

/* hclge_get_cfg: query the static parameter from flash
 * @hdev: pointer to struct hclge_dev
 * @hcfg: the config structure to be getted
 */

static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
{
 struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
 struct hclge_cfg_param_cmd *req;
 unsigned int i;
 int ret;

 for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
  u32 offset = 0;

  req = (struct hclge_cfg_param_cmd *)desc[i].data;
  hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
        true);
  hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
    HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
  /* Len should be united by 4 bytes when send to hardware */
  hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
    HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
  req->offset = cpu_to_le32(offset);
 }

 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
 if (ret) {
  dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
  return ret;
 }

 hclge_parse_cfg(hcfg, desc);

 return 0;
}

static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
{
#define HCLGE_MAX_NON_TSO_BD_NUM   8U

 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);

 ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
 ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
 ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
 ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
 ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
 ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
 ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
 ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
 ae_dev->dev_specs.tnl_num = 0;
}

static void hclge_parse_dev_specs(struct hclge_dev *hdev,
      struct hclge_desc *desc)
{
 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
 struct hclge_dev_specs_0_cmd *req0;
 struct hclge_dev_specs_1_cmd *req1;

 req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
 req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;

 ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
 ae_dev->dev_specs.rss_ind_tbl_size =
  le16_to_cpu(req0->rss_ind_tbl_size);
 ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
 ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
 ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
 ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
 ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
 ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
 ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
 ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
 ae_dev->dev_specs.tnl_num = req1->tnl_num;
 ae_dev->dev_specs.hilink_version = req1->hilink_version;
}

static void hclge_check_dev_specs(struct hclge_dev *hdev)
{
 struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;

 if (!dev_specs->max_non_tso_bd_num)
  dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
 if (!dev_specs->rss_ind_tbl_size)
  dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
 if (!dev_specs->rss_key_size)
  dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
 if (!dev_specs->max_tm_rate)
  dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
 if (!dev_specs->max_qset_num)
  dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
 if (!dev_specs->max_int_gl)
  dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
 if (!dev_specs->max_frm_size)
  dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
 if (!dev_specs->umv_size)
  dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
}

static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
{
 u32 reg_num = 0;
 int ret;

 ret = hclge_mac_query_reg_num(hdev, ®_num);
 if (ret && ret != -EOPNOTSUPP)
  return ret;

 hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
 return 0;
}

static int hclge_query_dev_specs(struct hclge_dev *hdev)
{
 struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
 int ret;
 int i;

 ret = hclge_query_mac_stats_num(hdev);
 if (ret)
  return ret;

 /* set default specifications as devices lower than version V3 do not
 * support querying specifications from firmware.
 */

 if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
  hclge_set_default_dev_specs(hdev);
  return 0;
 }

 for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
  hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
        true);
  desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
 }
 hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);

 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
 if (ret)
  return ret;

 hclge_parse_dev_specs(hdev, desc);
 hclge_check_dev_specs(hdev);

 return 0;
}

static int hclge_get_cap(struct hclge_dev *hdev)
{
 int ret;

 ret = hclge_query_function_status(hdev);
 if (ret) {
  dev_err(&hdev->pdev->dev,
   "query function status error %d.\n", ret);
  return ret;
 }

 /* get pf resource */
 return hclge_query_pf_resource(hdev);
}

static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
{
#define HCLGE_MIN_TX_DESC 64
#define HCLGE_MIN_RX_DESC 64

 if (!is_kdump_kernel())
  return;

 dev_info(&hdev->pdev->dev,
   "Running kdump kernel. Using minimal resources\n");

 /* minimal queue pairs equals to the number of vports */
 hdev->num_tqps = hdev->num_req_vfs + 1;
 hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
 hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
}

static void hclge_init_tc_config(struct hclge_dev *hdev)
{
 unsigned int i;

 if (hdev->tc_max > HNAE3_MAX_TC ||
     hdev->tc_max < 1) {
  dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
    hdev->tc_max);
  hdev->tc_max = 1;
 }

 /* Dev does not support DCB */
 if (!hnae3_dev_dcb_supported(hdev)) {
  hdev->tc_max = 1;
  hdev->pfc_max = 0;
 } else {
  hdev->pfc_max = hdev->tc_max;
 }

 hdev->tm_info.num_tc = 1;

 /* Currently not support uncontiuous tc */
 for (i = 0; i < hdev->tm_info.num_tc; i++)
  hnae3_set_bit(hdev->hw_tc_map, i, 1);

 hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
}

static int hclge_configure(struct hclge_dev *hdev)
{
 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
 struct hclge_cfg cfg;
 int ret;

 ret = hclge_get_cfg(hdev, &cfg);
 if (ret)
  return ret;

 hdev->base_tqp_pid = 0;
 hdev->vf_rss_size_max = cfg.vf_rss_size_max;
 hdev->pf_rss_size_max = cfg.pf_rss_size_max;
 hdev->rx_buf_len = cfg.rx_buf_len;
 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
 hdev->hw.mac.media_type = cfg.media_type;
 hdev->hw.mac.phy_addr = cfg.phy_addr;
 hdev->num_tx_desc = cfg.tqp_desc_num;
 hdev->num_rx_desc = cfg.tqp_desc_num;
 hdev->tm_info.num_pg = 1;
 hdev->tc_max = cfg.tc_num;
 hdev->tm_info.hw_pfc_map = 0;
 if (cfg.umv_space)
  hdev->wanted_umv_size = cfg.umv_space;
 else
  hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
 hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
 hdev->gro_en = true;
 if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
  set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);

 if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
  hdev->fd_en = true;
  hdev->fd_active_type = HCLGE_FD_RULE_NONE;
 }

 ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
 if (ret) {
  dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
   cfg.default_speed, ret);
  return ret;
 }
 hdev->hw.mac.req_speed = hdev->hw.mac.speed;
 hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
 hdev->hw.mac.req_duplex = DUPLEX_FULL;

 hclge_parse_link_mode(hdev, cfg.speed_ability);

 hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);

 hclge_init_tc_config(hdev);
 hclge_init_kdump_kernel_config(hdev);

 return ret;
}

static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
       u16 tso_mss_max)
{
 struct hclge_cfg_tso_status_cmd *req;
 struct hclge_desc desc;

 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);

 req = (struct hclge_cfg_tso_status_cmd *)desc.data;
 req->tso_mss_min = cpu_to_le16(tso_mss_min);
 req->tso_mss_max = cpu_to_le16(tso_mss_max);

 return hclge_cmd_send(&hdev->hw, &desc, 1);
}

static int hclge_config_gro(struct hclge_dev *hdev)
{
 struct hclge_cfg_gro_status_cmd *req;
 struct hclge_desc desc;
 int ret;

 if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
  return 0;

 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
 req = (struct hclge_cfg_gro_status_cmd *)desc.data;

 req->gro_en = hdev->gro_en ? 1 : 0;

 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
 if (ret)
  dev_err(&hdev->pdev->dev,
   "GRO hardware config cmd failed, ret = %d\n", ret);

 return ret;
}

static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
 struct hclge_comm_tqp *tqp;
 int i;

 hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
      sizeof(struct hclge_comm_tqp), GFP_KERNEL);
 if (!hdev->htqp)
  return -ENOMEM;

 tqp = hdev->htqp;

 for (i = 0; i < hdev->num_tqps; i++) {
  tqp->dev = &hdev->pdev->dev;
  tqp->index = i;

  tqp->q.ae_algo = &ae_algo;
  tqp->q.buf_size = hdev->rx_buf_len;
  tqp->q.tx_desc_num = hdev->num_tx_desc;
  tqp->q.rx_desc_num = hdev->num_rx_desc;

  /* need an extended offset to configure queues >=
 * HCLGE_TQP_MAX_SIZE_DEV_V2
 */

  if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
   tqp->q.io_base = hdev->hw.hw.io_base +
      HCLGE_TQP_REG_OFFSET +
      i * HCLGE_TQP_REG_SIZE;
  else
   tqp->q.io_base = hdev->hw.hw.io_base +
      HCLGE_TQP_REG_OFFSET +
      HCLGE_TQP_EXT_REG_OFFSET +
      (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
      HCLGE_TQP_REG_SIZE;

  /* when device supports tx push and has device memory,
 * the queue can execute push mode or doorbell mode on
 * device memory.
 */

  if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
   tqp->q.mem_base = hdev->hw.hw.mem_base +
       HCLGE_TQP_MEM_OFFSET(hdev, i);

  tqp++;
 }

 return 0;
}

static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
      u16 tqp_pid, u16 tqp_vid, bool is_pf)
{
 struct hclge_tqp_map_cmd *req;
 struct hclge_desc desc;
 int ret;

 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);

 req = (struct hclge_tqp_map_cmd *)desc.data;
 req->tqp_id = cpu_to_le16(tqp_pid);
 req->tqp_vf = func_id;
 req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
 if (!is_pf)
  req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
 req->tqp_vid = cpu_to_le16(tqp_vid);

 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
 if (ret)
  dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);

 return ret;
}

static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
{
 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
 struct hclge_dev *hdev = vport->back;
 int i, alloced;

 for (i = 0, alloced = 0; i < hdev->num_tqps &&
      alloced < num_tqps; i++) {
  if (!hdev->htqp[i].alloced) {
   hdev->htqp[i].q.handle = &vport->nic;
   hdev->htqp[i].q.tqp_index = alloced;
   hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
   hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
   kinfo->tqp[alloced] = &hdev->htqp[i].q;
   hdev->htqp[i].alloced = true;
   alloced++;
  }
 }
 vport->alloc_tqps = alloced;
 kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
    vport->alloc_tqps / hdev->tm_info.num_tc);

 /* ensure one to one mapping between irq and queue at default */
 kinfo->rss_size = min_t(u16, kinfo->rss_size,
    (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);

 return 0;
}

static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
       u16 num_tx_desc, u16 num_rx_desc)

{
 struct hnae3_handle *nic = &vport->nic;
 struct hnae3_knic_private_info *kinfo = &nic->kinfo;
 struct hclge_dev *hdev = vport->back;
 int ret;

 kinfo->num_tx_desc = num_tx_desc;
 kinfo->num_rx_desc = num_rx_desc;

 kinfo->rx_buf_len = hdev->rx_buf_len;
 kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;

 kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
      sizeof(struct hnae3_queue *), GFP_KERNEL);
 if (!kinfo->tqp)
  return -ENOMEM;

 ret = hclge_assign_tqp(vport, num_tqps);
 if (ret)
  dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);

 return ret;
}

static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
      struct hclge_vport *vport)
{
 struct hnae3_handle *nic = &vport->nic;
 struct hnae3_knic_private_info *kinfo;
 u16 i;

 kinfo = &nic->kinfo;
 for (i = 0; i < vport->alloc_tqps; i++) {
  struct hclge_comm_tqp *q =
   container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
  bool is_pf;
  int ret;

  is_pf = !(vport->vport_id);
  ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
          i, is_pf);
  if (ret)
   return ret;
 }

 return 0;
}

static int hclge_map_tqp(struct hclge_dev *hdev)
{
 struct hclge_vport *vport = hdev->vport;
 u16 i, num_vport;

 num_vport = hdev->num_req_vfs + 1;
 for (i = 0; i < num_vport; i++) {
  int ret;

  ret = hclge_map_tqp_to_vport(hdev, vport);
  if (ret)
   return ret;

  vport++;
 }

 return 0;
}

static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
{
 struct hnae3_handle *nic = &vport->nic;
 struct hclge_dev *hdev = vport->back;
 int ret;

 nic->pdev = hdev->pdev;
 nic->ae_algo = &ae_algo;
 bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
      MAX_NUMNODES);
 nic->kinfo.io_base = hdev->hw.hw.io_base;

 ret = hclge_knic_setup(vport, num_tqps,
          hdev->num_tx_desc, hdev->num_rx_desc);
 if (ret)
  dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);

 return ret;
}

static int hclge_alloc_vport(struct hclge_dev *hdev)
{
 struct pci_dev *pdev = hdev->pdev;
 struct hclge_vport *vport;
 u32 tqp_main_vport;
 u32 tqp_per_vport;
 int num_vport, i;
 int ret;

 /* We need to alloc a vport for main NIC of PF */
 num_vport = hdev->num_req_vfs + 1;

 if (hdev->num_tqps < num_vport) {
  dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
   hdev->num_tqps, num_vport);
  return -EINVAL;
 }

 /* Alloc the same number of TQPs for every vport */
 tqp_per_vport = hdev->num_tqps / num_vport;
 tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;

 vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
        GFP_KERNEL);
 if (!vport)
  return -ENOMEM;

 hdev->vport = vport;
 hdev->num_alloc_vport = num_vport;

 if (IS_ENABLED(CONFIG_PCI_IOV))
  hdev->num_alloc_vfs = hdev->num_req_vfs;

 for (i = 0; i < num_vport; i++) {
  vport->back = hdev;
  vport->vport_id = i;
  vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
  vport->mps = HCLGE_MAC_DEFAULT_FRAME;
  vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
  vport->port_base_vlan_cfg.tbl_sta = true;
  vport->rxvlan_cfg.rx_vlan_offload_en = true;
  vport->req_vlan_fltr_en = true;
  INIT_LIST_HEAD(&vport->vlan_list);
  INIT_LIST_HEAD(&vport->uc_mac_list);
  INIT_LIST_HEAD(&vport->mc_mac_list);
  spin_lock_init(&vport->mac_list_lock);

  if (i == 0)
   ret = hclge_vport_setup(vport, tqp_main_vport);
  else
   ret = hclge_vport_setup(vport, tqp_per_vport);
  if (ret) {
   dev_err(&pdev->dev,
    "vport setup failed for vport %d, %d\n",
    i, ret);
   return ret;
  }

  vport++;
 }

 return 0;
}

static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
        struct hclge_pkt_buf_alloc *buf_alloc)
{
/* TX buffer size is unit by 128 byte */
#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
 struct hclge_tx_buff_alloc_cmd *req;
 struct hclge_desc desc;
 int ret;
 u8 i;

 req = (struct hclge_tx_buff_alloc_cmd *)desc.data;

 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
  u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;

  req->tx_pkt_buff[i] =
   cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
         HCLGE_BUF_SIZE_UPDATE_EN_MSK);
 }

 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
 if (ret)
  dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
   ret);

 return ret;
}

static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
     struct hclge_pkt_buf_alloc *buf_alloc)
{
 int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);

 if (ret)
  dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);

 return ret;
}

static u32 hclge_get_tc_num(struct hclge_dev *hdev)
{
 unsigned int i;
 u32 cnt = 0;

 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
  if (hdev->hw_tc_map & BIT(i))
   cnt++;
 return cnt;
}

/* Get the number of pfc enabled TCs, which have private buffer */
static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
      struct hclge_pkt_buf_alloc *buf_alloc)
{
 struct hclge_priv_buf *priv;
 unsigned int i;
 int cnt = 0;

 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
  priv = &buf_alloc->priv_buf[i];
  if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
      priv->enable)
   cnt++;
 }

 return cnt;
}

/* Get the number of pfc disabled TCs, which have private buffer */
static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
         struct hclge_pkt_buf_alloc *buf_alloc)
{
 struct hclge_priv_buf *priv;
 unsigned int i;
 int cnt = 0;

 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
  priv = &buf_alloc->priv_buf[i];
  if (hdev->hw_tc_map & BIT(i) &&
      !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
      priv->enable)
   cnt++;
 }

 return cnt;
}

static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
{
 struct hclge_priv_buf *priv;
 u32 rx_priv = 0;
 int i;

 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
  priv = &buf_alloc->priv_buf[i];
  if (priv->enable)
   rx_priv += priv->buf_size;
 }
 return rx_priv;
}

static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
{
 u32 i, total_tx_size = 0;

 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
  total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;

 return total_tx_size;
}

static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
    struct hclge_pkt_buf_alloc *buf_alloc,
    u32 rx_all)
{
 u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
 u32 tc_num = hclge_get_tc_num(hdev);
 u32 shared_buf, aligned_mps;
 u32 rx_priv;
 int i;

 aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);

 if (hnae3_dev_dcb_supported(hdev))
  shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
     hdev->dv_buf_size;
 else
  shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
     + hdev->dv_buf_size;

 shared_buf_tc = tc_num * aligned_mps + aligned_mps;
 shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
        HCLGE_BUF_SIZE_UNIT);

 rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
 if (rx_all < rx_priv + shared_std)
  return false;

 shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
 buf_alloc->s_buf.buf_size = shared_buf;
 if (hnae3_dev_dcb_supported(hdev)) {
  buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
  buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
   - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
      HCLGE_BUF_SIZE_UNIT);
 } else {
  buf_alloc->s_buf.self.high = aligned_mps +
      HCLGE_NON_DCB_ADDITIONAL_BUF;
  buf_alloc->s_buf.self.low = aligned_mps;
 }

 if (hnae3_dev_dcb_supported(hdev)) {
  hi_thrd = shared_buf - hdev->dv_buf_size;

  if (tc_num <= NEED_RESERVE_TC_NUM)
   hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
     / BUF_MAX_PERCENT;

  if (tc_num)
   hi_thrd = hi_thrd / tc_num;

  hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
  hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
  lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
 } else {
  hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
  lo_thrd = aligned_mps;
 }

 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
  buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
  buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
 }

 return true;
}

static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
    struct hclge_pkt_buf_alloc *buf_alloc)
{
 u32 i, total_size;

 total_size = hdev->pkt_buf_size;

 /* alloc tx buffer for all enabled tc */
 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
  struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];

  if (hdev->hw_tc_map & BIT(i)) {
   if (total_size < hdev->tx_buf_size)
    return -ENOMEM;

   priv->tx_buf_size = hdev->tx_buf_size;
  } else {
   priv->tx_buf_size = 0;
  }

  total_size -= priv->tx_buf_size;
 }

 return 0;
}

static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
      struct hclge_pkt_buf_alloc *buf_alloc)
{
 u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=99 H=92 G=95

¤ Dauer der Verarbeitung: 0.23 Sekunden  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.