Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0+
0002 // Copyright (c) 2016-2017 Hisilicon Limited.
0003 
0004 #include <linux/acpi.h>
0005 #include <linux/device.h>
0006 #include <linux/etherdevice.h>
0007 #include <linux/init.h>
0008 #include <linux/interrupt.h>
0009 #include <linux/kernel.h>
0010 #include <linux/module.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/pci.h>
0013 #include <linux/platform_device.h>
0014 #include <linux/if_vlan.h>
0015 #include <linux/crash_dump.h>
0016 #include <net/ipv6.h>
0017 #include <net/rtnetlink.h>
0018 #include "hclge_cmd.h"
0019 #include "hclge_dcb.h"
0020 #include "hclge_main.h"
0021 #include "hclge_mbx.h"
0022 #include "hclge_mdio.h"
0023 #include "hclge_tm.h"
0024 #include "hclge_err.h"
0025 #include "hnae3.h"
0026 #include "hclge_devlink.h"
0027 #include "hclge_comm_cmd.h"
0028 
0029 #define HCLGE_NAME          "hclge"
0030 
0031 #define HCLGE_BUF_SIZE_UNIT 256U
0032 #define HCLGE_BUF_MUL_BY    2
0033 #define HCLGE_BUF_DIV_BY    2
0034 #define NEED_RESERVE_TC_NUM 2
0035 #define BUF_MAX_PERCENT     100
0036 #define BUF_RESERVE_PERCENT 90
0037 
0038 #define HCLGE_RESET_MAX_FAIL_CNT    5
0039 #define HCLGE_RESET_SYNC_TIME       100
0040 #define HCLGE_PF_RESET_SYNC_TIME    20
0041 #define HCLGE_PF_RESET_SYNC_CNT     1500
0042 
0043 /* Get DFX BD number offset */
0044 #define HCLGE_DFX_BIOS_BD_OFFSET        1
0045 #define HCLGE_DFX_SSU_0_BD_OFFSET       2
0046 #define HCLGE_DFX_SSU_1_BD_OFFSET       3
0047 #define HCLGE_DFX_IGU_BD_OFFSET         4
0048 #define HCLGE_DFX_RPU_0_BD_OFFSET       5
0049 #define HCLGE_DFX_RPU_1_BD_OFFSET       6
0050 #define HCLGE_DFX_NCSI_BD_OFFSET        7
0051 #define HCLGE_DFX_RTC_BD_OFFSET         8
0052 #define HCLGE_DFX_PPP_BD_OFFSET         9
0053 #define HCLGE_DFX_RCB_BD_OFFSET         10
0054 #define HCLGE_DFX_TQP_BD_OFFSET         11
0055 #define HCLGE_DFX_SSU_2_BD_OFFSET       12
0056 
0057 #define HCLGE_LINK_STATUS_MS    10
0058 
0059 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
0060 static int hclge_init_vlan_config(struct hclge_dev *hdev);
0061 static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
0062 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
0063 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
0064 static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
0065 static int hclge_clear_arfs_rules(struct hclge_dev *hdev);
0066 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
0067                            unsigned long *addr);
0068 static int hclge_set_default_loopback(struct hclge_dev *hdev);
0069 
0070 static void hclge_sync_mac_table(struct hclge_dev *hdev);
0071 static void hclge_restore_hw_table(struct hclge_dev *hdev);
0072 static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
0073 static void hclge_sync_fd_table(struct hclge_dev *hdev);
0074 
0075 static struct hnae3_ae_algo ae_algo;
0076 
0077 static struct workqueue_struct *hclge_wq;
0078 
0079 static const struct pci_device_id ae_algo_pci_tbl[] = {
0080     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
0081     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
0082     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
0083     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
0084     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
0085     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
0086     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
0087     {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
0088     /* required last entry */
0089     {0, }
0090 };
0091 
0092 MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
0093 
0094 static const u32 cmdq_reg_addr_list[] = {HCLGE_COMM_NIC_CSQ_BASEADDR_L_REG,
0095                      HCLGE_COMM_NIC_CSQ_BASEADDR_H_REG,
0096                      HCLGE_COMM_NIC_CSQ_DEPTH_REG,
0097                      HCLGE_COMM_NIC_CSQ_TAIL_REG,
0098                      HCLGE_COMM_NIC_CSQ_HEAD_REG,
0099                      HCLGE_COMM_NIC_CRQ_BASEADDR_L_REG,
0100                      HCLGE_COMM_NIC_CRQ_BASEADDR_H_REG,
0101                      HCLGE_COMM_NIC_CRQ_DEPTH_REG,
0102                      HCLGE_COMM_NIC_CRQ_TAIL_REG,
0103                      HCLGE_COMM_NIC_CRQ_HEAD_REG,
0104                      HCLGE_COMM_VECTOR0_CMDQ_SRC_REG,
0105                      HCLGE_COMM_CMDQ_INTR_STS_REG,
0106                      HCLGE_COMM_CMDQ_INTR_EN_REG,
0107                      HCLGE_COMM_CMDQ_INTR_GEN_REG};
0108 
0109 static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
0110                        HCLGE_PF_OTHER_INT_REG,
0111                        HCLGE_MISC_RESET_STS_REG,
0112                        HCLGE_MISC_VECTOR_INT_STS,
0113                        HCLGE_GLOBAL_RESET_REG,
0114                        HCLGE_FUN_RST_ING,
0115                        HCLGE_GRO_EN_REG};
0116 
0117 static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
0118                      HCLGE_RING_RX_ADDR_H_REG,
0119                      HCLGE_RING_RX_BD_NUM_REG,
0120                      HCLGE_RING_RX_BD_LENGTH_REG,
0121                      HCLGE_RING_RX_MERGE_EN_REG,
0122                      HCLGE_RING_RX_TAIL_REG,
0123                      HCLGE_RING_RX_HEAD_REG,
0124                      HCLGE_RING_RX_FBD_NUM_REG,
0125                      HCLGE_RING_RX_OFFSET_REG,
0126                      HCLGE_RING_RX_FBD_OFFSET_REG,
0127                      HCLGE_RING_RX_STASH_REG,
0128                      HCLGE_RING_RX_BD_ERR_REG,
0129                      HCLGE_RING_TX_ADDR_L_REG,
0130                      HCLGE_RING_TX_ADDR_H_REG,
0131                      HCLGE_RING_TX_BD_NUM_REG,
0132                      HCLGE_RING_TX_PRIORITY_REG,
0133                      HCLGE_RING_TX_TC_REG,
0134                      HCLGE_RING_TX_MERGE_EN_REG,
0135                      HCLGE_RING_TX_TAIL_REG,
0136                      HCLGE_RING_TX_HEAD_REG,
0137                      HCLGE_RING_TX_FBD_NUM_REG,
0138                      HCLGE_RING_TX_OFFSET_REG,
0139                      HCLGE_RING_TX_EBD_NUM_REG,
0140                      HCLGE_RING_TX_EBD_OFFSET_REG,
0141                      HCLGE_RING_TX_BD_ERR_REG,
0142                      HCLGE_RING_EN_REG};
0143 
0144 static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
0145                          HCLGE_TQP_INTR_GL0_REG,
0146                          HCLGE_TQP_INTR_GL1_REG,
0147                          HCLGE_TQP_INTR_GL2_REG,
0148                          HCLGE_TQP_INTR_RL_REG};
0149 
0150 static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
0151     "App    Loopback test",
0152     "Serdes serial Loopback test",
0153     "Serdes parallel Loopback test",
0154     "Phy    Loopback test"
0155 };
0156 
0157 static const struct hclge_comm_stats_str g_mac_stats_string[] = {
0158     {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0159         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
0160     {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0161         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
0162     {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0163         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
0164     {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0165         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
0166     {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0167         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
0168     {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0169         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
0170     {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0171         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
0172     {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0173         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
0174     {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0175         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
0176     {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0177         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
0178     {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0179         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
0180     {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0181         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
0182     {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0183         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
0184     {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0185         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
0186     {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0187         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
0188     {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0189         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
0190     {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0191         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
0192     {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0193         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
0194     {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0195         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
0196     {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0197         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
0198     {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0199         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
0200     {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0201         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
0202     {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0203         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
0204     {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0205         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
0206     {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0207         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
0208     {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0209         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
0210     {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0211         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
0212     {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0213         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
0214     {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0215         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
0216     {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0217         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
0218     {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0219         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
0220     {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0221         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
0222     {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0223         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
0224     {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0225         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
0226     {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0227         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
0228     {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0229         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
0230     {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0231         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
0232     {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0233         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
0234     {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0235         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
0236     {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
0237         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
0238     {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0239         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
0240     {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0241         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
0242     {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0243         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
0244     {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0245         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
0246     {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0247         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
0248     {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0249         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
0250     {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0251         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
0252     {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0253         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
0254     {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0255         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
0256     {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0257         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
0258     {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0259         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
0260     {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0261         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
0262     {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0263         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
0264     {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0265         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
0266     {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0267         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
0268     {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0269         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
0270     {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0271         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
0272     {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0273         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
0274     {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0275         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
0276     {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0277         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
0278     {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0279         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
0280     {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0281         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
0282     {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0283         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
0284     {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0285         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
0286     {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0287         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
0288     {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0289         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
0290     {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0291         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
0292     {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0293         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
0294     {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0295         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
0296     {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0297         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
0298     {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0299         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
0300     {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0301         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
0302     {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0303         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
0304     {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0305         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
0306     {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0307         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
0308     {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0309         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
0310     {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0311         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
0312     {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0313         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
0314     {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0315         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
0316     {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0317         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
0318     {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0319         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
0320     {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0321         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
0322     {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0323         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
0324     {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0325         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
0326     {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0327         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
0328     {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0329         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
0330     {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0331         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
0332     {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0333         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
0334     {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0335         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
0336     {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0337         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
0338 
0339     {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0340         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
0341     {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0342         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
0343     {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0344         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
0345     {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0346         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
0347     {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0348         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
0349     {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0350         HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
0351     {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0352         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
0353     {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0354         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
0355     {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0356         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
0357     {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0358         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
0359     {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0360         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
0361     {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
0362         HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
0363 };
0364 
0365 static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
0366     {
0367         .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
0368         .ethter_type = cpu_to_le16(ETH_P_LLDP),
0369         .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
0370         .i_port_bitmap = 0x1,
0371     },
0372 };
0373 
0374 static const u32 hclge_dfx_bd_offset_list[] = {
0375     HCLGE_DFX_BIOS_BD_OFFSET,
0376     HCLGE_DFX_SSU_0_BD_OFFSET,
0377     HCLGE_DFX_SSU_1_BD_OFFSET,
0378     HCLGE_DFX_IGU_BD_OFFSET,
0379     HCLGE_DFX_RPU_0_BD_OFFSET,
0380     HCLGE_DFX_RPU_1_BD_OFFSET,
0381     HCLGE_DFX_NCSI_BD_OFFSET,
0382     HCLGE_DFX_RTC_BD_OFFSET,
0383     HCLGE_DFX_PPP_BD_OFFSET,
0384     HCLGE_DFX_RCB_BD_OFFSET,
0385     HCLGE_DFX_TQP_BD_OFFSET,
0386     HCLGE_DFX_SSU_2_BD_OFFSET
0387 };
0388 
0389 static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
0390     HCLGE_OPC_DFX_BIOS_COMMON_REG,
0391     HCLGE_OPC_DFX_SSU_REG_0,
0392     HCLGE_OPC_DFX_SSU_REG_1,
0393     HCLGE_OPC_DFX_IGU_EGU_REG,
0394     HCLGE_OPC_DFX_RPU_REG_0,
0395     HCLGE_OPC_DFX_RPU_REG_1,
0396     HCLGE_OPC_DFX_NCSI_REG,
0397     HCLGE_OPC_DFX_RTC_REG,
0398     HCLGE_OPC_DFX_PPP_REG,
0399     HCLGE_OPC_DFX_RCB_REG,
0400     HCLGE_OPC_DFX_TQP_REG,
0401     HCLGE_OPC_DFX_SSU_REG_2
0402 };
0403 
0404 static const struct key_info meta_data_key_info[] = {
0405     { PACKET_TYPE_ID, 6 },
0406     { IP_FRAGEMENT, 1 },
0407     { ROCE_TYPE, 1 },
0408     { NEXT_KEY, 5 },
0409     { VLAN_NUMBER, 2 },
0410     { SRC_VPORT, 12 },
0411     { DST_VPORT, 12 },
0412     { TUNNEL_PACKET, 1 },
0413 };
0414 
0415 static const struct key_info tuple_key_info[] = {
0416     { OUTER_DST_MAC, 48, KEY_OPT_MAC, -1, -1 },
0417     { OUTER_SRC_MAC, 48, KEY_OPT_MAC, -1, -1 },
0418     { OUTER_VLAN_TAG_FST, 16, KEY_OPT_LE16, -1, -1 },
0419     { OUTER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
0420     { OUTER_ETH_TYPE, 16, KEY_OPT_LE16, -1, -1 },
0421     { OUTER_L2_RSV, 16, KEY_OPT_LE16, -1, -1 },
0422     { OUTER_IP_TOS, 8, KEY_OPT_U8, -1, -1 },
0423     { OUTER_IP_PROTO, 8, KEY_OPT_U8, -1, -1 },
0424     { OUTER_SRC_IP, 32, KEY_OPT_IP, -1, -1 },
0425     { OUTER_DST_IP, 32, KEY_OPT_IP, -1, -1 },
0426     { OUTER_L3_RSV, 16, KEY_OPT_LE16, -1, -1 },
0427     { OUTER_SRC_PORT, 16, KEY_OPT_LE16, -1, -1 },
0428     { OUTER_DST_PORT, 16, KEY_OPT_LE16, -1, -1 },
0429     { OUTER_L4_RSV, 32, KEY_OPT_LE32, -1, -1 },
0430     { OUTER_TUN_VNI, 24, KEY_OPT_VNI, -1, -1 },
0431     { OUTER_TUN_FLOW_ID, 8, KEY_OPT_U8, -1, -1 },
0432     { INNER_DST_MAC, 48, KEY_OPT_MAC,
0433       offsetof(struct hclge_fd_rule, tuples.dst_mac),
0434       offsetof(struct hclge_fd_rule, tuples_mask.dst_mac) },
0435     { INNER_SRC_MAC, 48, KEY_OPT_MAC,
0436       offsetof(struct hclge_fd_rule, tuples.src_mac),
0437       offsetof(struct hclge_fd_rule, tuples_mask.src_mac) },
0438     { INNER_VLAN_TAG_FST, 16, KEY_OPT_LE16,
0439       offsetof(struct hclge_fd_rule, tuples.vlan_tag1),
0440       offsetof(struct hclge_fd_rule, tuples_mask.vlan_tag1) },
0441     { INNER_VLAN_TAG_SEC, 16, KEY_OPT_LE16, -1, -1 },
0442     { INNER_ETH_TYPE, 16, KEY_OPT_LE16,
0443       offsetof(struct hclge_fd_rule, tuples.ether_proto),
0444       offsetof(struct hclge_fd_rule, tuples_mask.ether_proto) },
0445     { INNER_L2_RSV, 16, KEY_OPT_LE16,
0446       offsetof(struct hclge_fd_rule, tuples.l2_user_def),
0447       offsetof(struct hclge_fd_rule, tuples_mask.l2_user_def) },
0448     { INNER_IP_TOS, 8, KEY_OPT_U8,
0449       offsetof(struct hclge_fd_rule, tuples.ip_tos),
0450       offsetof(struct hclge_fd_rule, tuples_mask.ip_tos) },
0451     { INNER_IP_PROTO, 8, KEY_OPT_U8,
0452       offsetof(struct hclge_fd_rule, tuples.ip_proto),
0453       offsetof(struct hclge_fd_rule, tuples_mask.ip_proto) },
0454     { INNER_SRC_IP, 32, KEY_OPT_IP,
0455       offsetof(struct hclge_fd_rule, tuples.src_ip),
0456       offsetof(struct hclge_fd_rule, tuples_mask.src_ip) },
0457     { INNER_DST_IP, 32, KEY_OPT_IP,
0458       offsetof(struct hclge_fd_rule, tuples.dst_ip),
0459       offsetof(struct hclge_fd_rule, tuples_mask.dst_ip) },
0460     { INNER_L3_RSV, 16, KEY_OPT_LE16,
0461       offsetof(struct hclge_fd_rule, tuples.l3_user_def),
0462       offsetof(struct hclge_fd_rule, tuples_mask.l3_user_def) },
0463     { INNER_SRC_PORT, 16, KEY_OPT_LE16,
0464       offsetof(struct hclge_fd_rule, tuples.src_port),
0465       offsetof(struct hclge_fd_rule, tuples_mask.src_port) },
0466     { INNER_DST_PORT, 16, KEY_OPT_LE16,
0467       offsetof(struct hclge_fd_rule, tuples.dst_port),
0468       offsetof(struct hclge_fd_rule, tuples_mask.dst_port) },
0469     { INNER_L4_RSV, 32, KEY_OPT_LE32,
0470       offsetof(struct hclge_fd_rule, tuples.l4_user_def),
0471       offsetof(struct hclge_fd_rule, tuples_mask.l4_user_def) },
0472 };
0473 
0474 /**
0475  * hclge_cmd_send - send command to command queue
0476  * @hw: pointer to the hw struct
0477  * @desc: prefilled descriptor for describing the command
0478  * @num : the number of descriptors to be sent
0479  *
0480  * This is the main send command for command queue, it
0481  * sends the queue, cleans the queue, etc
0482  **/
0483 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
0484 {
0485     return hclge_comm_cmd_send(&hw->hw, desc, num);
0486 }
0487 
0488 static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
0489 {
0490 #define HCLGE_MAC_CMD_NUM 21
0491 
0492     u64 *data = (u64 *)(&hdev->mac_stats);
0493     struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
0494     __le64 *desc_data;
0495     u32 data_size;
0496     int ret;
0497     u32 i;
0498 
0499     hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
0500     ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
0501     if (ret) {
0502         dev_err(&hdev->pdev->dev,
0503             "Get MAC pkt stats fail, status = %d.\n", ret);
0504 
0505         return ret;
0506     }
0507 
0508     /* The first desc has a 64-bit header, so data size need to minus 1 */
0509     data_size = sizeof(desc) / (sizeof(u64)) - 1;
0510 
0511     desc_data = (__le64 *)(&desc[0].data[0]);
0512     for (i = 0; i < data_size; i++) {
0513         /* data memory is continuous becase only the first desc has a
0514          * header in this command
0515          */
0516         *data += le64_to_cpu(*desc_data);
0517         data++;
0518         desc_data++;
0519     }
0520 
0521     return 0;
0522 }
0523 
0524 static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
0525 {
0526 #define HCLGE_REG_NUM_PER_DESC      4
0527 
0528     u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
0529     u64 *data = (u64 *)(&hdev->mac_stats);
0530     struct hclge_desc *desc;
0531     __le64 *desc_data;
0532     u32 data_size;
0533     u32 desc_num;
0534     int ret;
0535     u32 i;
0536 
0537     /* The first desc has a 64-bit header, so need to consider it */
0538     desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
0539 
0540     /* This may be called inside atomic sections,
0541      * so GFP_ATOMIC is more suitalbe here
0542      */
0543     desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
0544     if (!desc)
0545         return -ENOMEM;
0546 
0547     hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
0548     ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
0549     if (ret) {
0550         kfree(desc);
0551         return ret;
0552     }
0553 
0554     data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
0555 
0556     desc_data = (__le64 *)(&desc[0].data[0]);
0557     for (i = 0; i < data_size; i++) {
0558         /* data memory is continuous becase only the first desc has a
0559          * header in this command
0560          */
0561         *data += le64_to_cpu(*desc_data);
0562         data++;
0563         desc_data++;
0564     }
0565 
0566     kfree(desc);
0567 
0568     return 0;
0569 }
0570 
0571 static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
0572 {
0573     struct hclge_desc desc;
0574     int ret;
0575 
0576     /* Driver needs total register number of both valid registers and
0577      * reserved registers, but the old firmware only returns number
0578      * of valid registers in device V2. To be compatible with these
0579      * devices, driver uses a fixed value.
0580      */
0581     if (hdev->ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
0582         *reg_num = HCLGE_MAC_STATS_MAX_NUM_V1;
0583         return 0;
0584     }
0585 
0586     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
0587     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
0588     if (ret) {
0589         dev_err(&hdev->pdev->dev,
0590             "failed to query mac statistic reg number, ret = %d\n",
0591             ret);
0592         return ret;
0593     }
0594 
0595     *reg_num = le32_to_cpu(desc.data[0]);
0596     if (*reg_num == 0) {
0597         dev_err(&hdev->pdev->dev,
0598             "mac statistic reg number is invalid!\n");
0599         return -ENODATA;
0600     }
0601 
0602     return 0;
0603 }
0604 
0605 int hclge_mac_update_stats(struct hclge_dev *hdev)
0606 {
0607     /* The firmware supports the new statistics acquisition method */
0608     if (hdev->ae_dev->dev_specs.mac_stats_num)
0609         return hclge_mac_update_stats_complete(hdev);
0610     else
0611         return hclge_mac_update_stats_defective(hdev);
0612 }
0613 
0614 static int hclge_comm_get_count(struct hclge_dev *hdev,
0615                 const struct hclge_comm_stats_str strs[],
0616                 u32 size)
0617 {
0618     int count = 0;
0619     u32 i;
0620 
0621     for (i = 0; i < size; i++)
0622         if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
0623             count++;
0624 
0625     return count;
0626 }
0627 
0628 static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
0629                  const struct hclge_comm_stats_str strs[],
0630                  int size, u64 *data)
0631 {
0632     u64 *buf = data;
0633     u32 i;
0634 
0635     for (i = 0; i < size; i++) {
0636         if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
0637             continue;
0638 
0639         *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
0640         buf++;
0641     }
0642 
0643     return buf;
0644 }
0645 
0646 static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
0647                   const struct hclge_comm_stats_str strs[],
0648                   int size, u8 *data)
0649 {
0650     char *buff = (char *)data;
0651     u32 i;
0652 
0653     if (stringset != ETH_SS_STATS)
0654         return buff;
0655 
0656     for (i = 0; i < size; i++) {
0657         if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
0658             continue;
0659 
0660         snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
0661         buff = buff + ETH_GSTRING_LEN;
0662     }
0663 
0664     return (u8 *)buff;
0665 }
0666 
0667 static void hclge_update_stats_for_all(struct hclge_dev *hdev)
0668 {
0669     struct hnae3_handle *handle;
0670     int status;
0671 
0672     handle = &hdev->vport[0].nic;
0673     if (handle->client) {
0674         status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
0675         if (status) {
0676             dev_err(&hdev->pdev->dev,
0677                 "Update TQPS stats fail, status = %d.\n",
0678                 status);
0679         }
0680     }
0681 
0682     status = hclge_mac_update_stats(hdev);
0683     if (status)
0684         dev_err(&hdev->pdev->dev,
0685             "Update MAC stats fail, status = %d.\n", status);
0686 }
0687 
0688 static void hclge_update_stats(struct hnae3_handle *handle,
0689                    struct net_device_stats *net_stats)
0690 {
0691     struct hclge_vport *vport = hclge_get_vport(handle);
0692     struct hclge_dev *hdev = vport->back;
0693     int status;
0694 
0695     if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
0696         return;
0697 
0698     status = hclge_mac_update_stats(hdev);
0699     if (status)
0700         dev_err(&hdev->pdev->dev,
0701             "Update MAC stats fail, status = %d.\n",
0702             status);
0703 
0704     status = hclge_comm_tqps_update_stats(handle, &hdev->hw.hw);
0705     if (status)
0706         dev_err(&hdev->pdev->dev,
0707             "Update TQPS stats fail, status = %d.\n",
0708             status);
0709 
0710     clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
0711 }
0712 
0713 static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
0714 {
0715 #define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
0716         HNAE3_SUPPORT_PHY_LOOPBACK | \
0717         HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
0718         HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
0719 
0720     struct hclge_vport *vport = hclge_get_vport(handle);
0721     struct hclge_dev *hdev = vport->back;
0722     int count = 0;
0723 
0724     /* Loopback test support rules:
0725      * mac: only GE mode support
0726      * serdes: all mac mode will support include GE/XGE/LGE/CGE
0727      * phy: only support when phy device exist on board
0728      */
0729     if (stringset == ETH_SS_TEST) {
0730         /* clear loopback bit flags at first */
0731         handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
0732         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
0733             hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
0734             hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
0735             hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
0736             count += 1;
0737             handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
0738         }
0739 
0740         count += 2;
0741         handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
0742         handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
0743 
0744         if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
0745              hdev->hw.mac.phydev->drv->set_loopback) ||
0746             hnae3_dev_phy_imp_supported(hdev)) {
0747             count += 1;
0748             handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
0749         }
0750     } else if (stringset == ETH_SS_STATS) {
0751         count = hclge_comm_get_count(hdev, g_mac_stats_string,
0752                          ARRAY_SIZE(g_mac_stats_string)) +
0753             hclge_comm_tqps_get_sset_count(handle);
0754     }
0755 
0756     return count;
0757 }
0758 
0759 static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
0760                   u8 *data)
0761 {
0762     struct hclge_vport *vport = hclge_get_vport(handle);
0763     struct hclge_dev *hdev = vport->back;
0764     u8 *p = (char *)data;
0765     int size;
0766 
0767     if (stringset == ETH_SS_STATS) {
0768         size = ARRAY_SIZE(g_mac_stats_string);
0769         p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
0770                        size, p);
0771         p = hclge_comm_tqps_get_strings(handle, p);
0772     } else if (stringset == ETH_SS_TEST) {
0773         if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
0774             memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
0775                    ETH_GSTRING_LEN);
0776             p += ETH_GSTRING_LEN;
0777         }
0778         if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
0779             memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
0780                    ETH_GSTRING_LEN);
0781             p += ETH_GSTRING_LEN;
0782         }
0783         if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
0784             memcpy(p,
0785                    hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
0786                    ETH_GSTRING_LEN);
0787             p += ETH_GSTRING_LEN;
0788         }
0789         if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
0790             memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
0791                    ETH_GSTRING_LEN);
0792             p += ETH_GSTRING_LEN;
0793         }
0794     }
0795 }
0796 
0797 static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
0798 {
0799     struct hclge_vport *vport = hclge_get_vport(handle);
0800     struct hclge_dev *hdev = vport->back;
0801     u64 *p;
0802 
0803     p = hclge_comm_get_stats(hdev, g_mac_stats_string,
0804                  ARRAY_SIZE(g_mac_stats_string), data);
0805     p = hclge_comm_tqps_get_stats(handle, p);
0806 }
0807 
0808 static void hclge_get_mac_stat(struct hnae3_handle *handle,
0809                    struct hns3_mac_stats *mac_stats)
0810 {
0811     struct hclge_vport *vport = hclge_get_vport(handle);
0812     struct hclge_dev *hdev = vport->back;
0813 
0814     hclge_update_stats(handle, NULL);
0815 
0816     mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
0817     mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
0818 }
0819 
0820 static int hclge_parse_func_status(struct hclge_dev *hdev,
0821                    struct hclge_func_status_cmd *status)
0822 {
0823 #define HCLGE_MAC_ID_MASK   0xF
0824 
0825     if (!(status->pf_state & HCLGE_PF_STATE_DONE))
0826         return -EINVAL;
0827 
0828     /* Set the pf to main pf */
0829     if (status->pf_state & HCLGE_PF_STATE_MAIN)
0830         hdev->flag |= HCLGE_FLAG_MAIN;
0831     else
0832         hdev->flag &= ~HCLGE_FLAG_MAIN;
0833 
0834     hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
0835     return 0;
0836 }
0837 
0838 static int hclge_query_function_status(struct hclge_dev *hdev)
0839 {
0840 #define HCLGE_QUERY_MAX_CNT 5
0841 
0842     struct hclge_func_status_cmd *req;
0843     struct hclge_desc desc;
0844     int timeout = 0;
0845     int ret;
0846 
0847     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
0848     req = (struct hclge_func_status_cmd *)desc.data;
0849 
0850     do {
0851         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
0852         if (ret) {
0853             dev_err(&hdev->pdev->dev,
0854                 "query function status failed %d.\n", ret);
0855             return ret;
0856         }
0857 
0858         /* Check pf reset is done */
0859         if (req->pf_state)
0860             break;
0861         usleep_range(1000, 2000);
0862     } while (timeout++ < HCLGE_QUERY_MAX_CNT);
0863 
0864     return hclge_parse_func_status(hdev, req);
0865 }
0866 
0867 static int hclge_query_pf_resource(struct hclge_dev *hdev)
0868 {
0869     struct hclge_pf_res_cmd *req;
0870     struct hclge_desc desc;
0871     int ret;
0872 
0873     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
0874     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
0875     if (ret) {
0876         dev_err(&hdev->pdev->dev,
0877             "query pf resource failed %d.\n", ret);
0878         return ret;
0879     }
0880 
0881     req = (struct hclge_pf_res_cmd *)desc.data;
0882     hdev->num_tqps = le16_to_cpu(req->tqp_num) +
0883              le16_to_cpu(req->ext_tqp_num);
0884     hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
0885 
0886     if (req->tx_buf_size)
0887         hdev->tx_buf_size =
0888             le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
0889     else
0890         hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
0891 
0892     hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
0893 
0894     if (req->dv_buf_size)
0895         hdev->dv_buf_size =
0896             le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
0897     else
0898         hdev->dv_buf_size = HCLGE_DEFAULT_DV;
0899 
0900     hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
0901 
0902     hdev->num_nic_msi = le16_to_cpu(req->msixcap_localid_number_nic);
0903     if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
0904         dev_err(&hdev->pdev->dev,
0905             "only %u msi resources available, not enough for pf(min:2).\n",
0906             hdev->num_nic_msi);
0907         return -EINVAL;
0908     }
0909 
0910     if (hnae3_dev_roce_supported(hdev)) {
0911         hdev->num_roce_msi =
0912             le16_to_cpu(req->pf_intr_vector_number_roce);
0913 
0914         /* PF should have NIC vectors and Roce vectors,
0915          * NIC vectors are queued before Roce vectors.
0916          */
0917         hdev->num_msi = hdev->num_nic_msi + hdev->num_roce_msi;
0918     } else {
0919         hdev->num_msi = hdev->num_nic_msi;
0920     }
0921 
0922     return 0;
0923 }
0924 
0925 static int hclge_parse_speed(u8 speed_cmd, u32 *speed)
0926 {
0927     switch (speed_cmd) {
0928     case HCLGE_FW_MAC_SPEED_10M:
0929         *speed = HCLGE_MAC_SPEED_10M;
0930         break;
0931     case HCLGE_FW_MAC_SPEED_100M:
0932         *speed = HCLGE_MAC_SPEED_100M;
0933         break;
0934     case HCLGE_FW_MAC_SPEED_1G:
0935         *speed = HCLGE_MAC_SPEED_1G;
0936         break;
0937     case HCLGE_FW_MAC_SPEED_10G:
0938         *speed = HCLGE_MAC_SPEED_10G;
0939         break;
0940     case HCLGE_FW_MAC_SPEED_25G:
0941         *speed = HCLGE_MAC_SPEED_25G;
0942         break;
0943     case HCLGE_FW_MAC_SPEED_40G:
0944         *speed = HCLGE_MAC_SPEED_40G;
0945         break;
0946     case HCLGE_FW_MAC_SPEED_50G:
0947         *speed = HCLGE_MAC_SPEED_50G;
0948         break;
0949     case HCLGE_FW_MAC_SPEED_100G:
0950         *speed = HCLGE_MAC_SPEED_100G;
0951         break;
0952     case HCLGE_FW_MAC_SPEED_200G:
0953         *speed = HCLGE_MAC_SPEED_200G;
0954         break;
0955     default:
0956         return -EINVAL;
0957     }
0958 
0959     return 0;
0960 }
0961 
0962 static const struct hclge_speed_bit_map speed_bit_map[] = {
0963     {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT},
0964     {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT},
0965     {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT},
0966     {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT},
0967     {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT},
0968     {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT},
0969     {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT},
0970     {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT},
0971     {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT},
0972 };
0973 
0974 static int hclge_get_speed_bit(u32 speed, u32 *speed_bit)
0975 {
0976     u16 i;
0977 
0978     for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) {
0979         if (speed == speed_bit_map[i].speed) {
0980             *speed_bit = speed_bit_map[i].speed_bit;
0981             return 0;
0982         }
0983     }
0984 
0985     return -EINVAL;
0986 }
0987 
0988 static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
0989 {
0990     struct hclge_vport *vport = hclge_get_vport(handle);
0991     struct hclge_dev *hdev = vport->back;
0992     u32 speed_ability = hdev->hw.mac.speed_ability;
0993     u32 speed_bit = 0;
0994     int ret;
0995 
0996     ret = hclge_get_speed_bit(speed, &speed_bit);
0997     if (ret)
0998         return ret;
0999 
1000     if (speed_bit & speed_ability)
1001         return 0;
1002 
1003     return -EINVAL;
1004 }
1005 
1006 static void hclge_convert_setting_sr(u16 speed_ability,
1007                      unsigned long *link_mode)
1008 {
1009     if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1010         linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
1011                  link_mode);
1012     if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1013         linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1014                  link_mode);
1015     if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1016         linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
1017                  link_mode);
1018     if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1019         linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
1020                  link_mode);
1021     if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1022         linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
1023                  link_mode);
1024     if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1025         linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
1026                  link_mode);
1027 }
1028 
1029 static void hclge_convert_setting_lr(u16 speed_ability,
1030                      unsigned long *link_mode)
1031 {
1032     if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1033         linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
1034                  link_mode);
1035     if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1036         linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
1037                  link_mode);
1038     if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1039         linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
1040                  link_mode);
1041     if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1042         linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
1043                  link_mode);
1044     if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1045         linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
1046                  link_mode);
1047     if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1048         linkmode_set_bit(
1049             ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
1050             link_mode);
1051 }
1052 
1053 static void hclge_convert_setting_cr(u16 speed_ability,
1054                      unsigned long *link_mode)
1055 {
1056     if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1057         linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
1058                  link_mode);
1059     if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1060         linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
1061                  link_mode);
1062     if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1063         linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
1064                  link_mode);
1065     if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1066         linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
1067                  link_mode);
1068     if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1069         linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
1070                  link_mode);
1071     if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1072         linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
1073                  link_mode);
1074 }
1075 
1076 static void hclge_convert_setting_kr(u16 speed_ability,
1077                      unsigned long *link_mode)
1078 {
1079     if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1080         linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
1081                  link_mode);
1082     if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1083         linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
1084                  link_mode);
1085     if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1086         linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
1087                  link_mode);
1088     if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1089         linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
1090                  link_mode);
1091     if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1092         linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
1093                  link_mode);
1094     if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1095         linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
1096                  link_mode);
1097     if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1098         linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
1099                  link_mode);
1100 }
1101 
1102 static void hclge_convert_setting_fec(struct hclge_mac *mac)
1103 {
1104     linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
1105     linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1106 
1107     switch (mac->speed) {
1108     case HCLGE_MAC_SPEED_10G:
1109     case HCLGE_MAC_SPEED_40G:
1110         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
1111                  mac->supported);
1112         mac->fec_ability =
1113             BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
1114         break;
1115     case HCLGE_MAC_SPEED_25G:
1116     case HCLGE_MAC_SPEED_50G:
1117         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
1118                  mac->supported);
1119         mac->fec_ability =
1120             BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
1121             BIT(HNAE3_FEC_AUTO);
1122         break;
1123     case HCLGE_MAC_SPEED_100G:
1124     case HCLGE_MAC_SPEED_200G:
1125         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
1126         mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
1127         break;
1128     default:
1129         mac->fec_ability = 0;
1130         break;
1131     }
1132 }
1133 
1134 static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
1135                     u16 speed_ability)
1136 {
1137     struct hclge_mac *mac = &hdev->hw.mac;
1138 
1139     if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1140         linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
1141                  mac->supported);
1142 
1143     hclge_convert_setting_sr(speed_ability, mac->supported);
1144     hclge_convert_setting_lr(speed_ability, mac->supported);
1145     hclge_convert_setting_cr(speed_ability, mac->supported);
1146     if (hnae3_dev_fec_supported(hdev))
1147         hclge_convert_setting_fec(mac);
1148 
1149     if (hnae3_dev_pause_supported(hdev))
1150         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1151 
1152     linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
1153     linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1154 }
1155 
1156 static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
1157                         u16 speed_ability)
1158 {
1159     struct hclge_mac *mac = &hdev->hw.mac;
1160 
1161     hclge_convert_setting_kr(speed_ability, mac->supported);
1162     if (hnae3_dev_fec_supported(hdev))
1163         hclge_convert_setting_fec(mac);
1164 
1165     if (hnae3_dev_pause_supported(hdev))
1166         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
1167 
1168     linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
1169     linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
1170 }
1171 
1172 static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
1173                      u16 speed_ability)
1174 {
1175     unsigned long *supported = hdev->hw.mac.supported;
1176 
1177     /* default to support all speed for GE port */
1178     if (!speed_ability)
1179         speed_ability = HCLGE_SUPPORT_GE;
1180 
1181     if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1182         linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
1183                  supported);
1184 
1185     if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
1186         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
1187                  supported);
1188         linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
1189                  supported);
1190     }
1191 
1192     if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
1193         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
1194         linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
1195     }
1196 
1197     if (hnae3_dev_pause_supported(hdev)) {
1198         linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
1199         linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
1200     }
1201 
1202     linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
1203     linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
1204 }
1205 
1206 static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
1207 {
1208     u8 media_type = hdev->hw.mac.media_type;
1209 
1210     if (media_type == HNAE3_MEDIA_TYPE_FIBER)
1211         hclge_parse_fiber_link_mode(hdev, speed_ability);
1212     else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
1213         hclge_parse_copper_link_mode(hdev, speed_ability);
1214     else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
1215         hclge_parse_backplane_link_mode(hdev, speed_ability);
1216 }
1217 
1218 static u32 hclge_get_max_speed(u16 speed_ability)
1219 {
1220     if (speed_ability & HCLGE_SUPPORT_200G_BIT)
1221         return HCLGE_MAC_SPEED_200G;
1222 
1223     if (speed_ability & HCLGE_SUPPORT_100G_BIT)
1224         return HCLGE_MAC_SPEED_100G;
1225 
1226     if (speed_ability & HCLGE_SUPPORT_50G_BIT)
1227         return HCLGE_MAC_SPEED_50G;
1228 
1229     if (speed_ability & HCLGE_SUPPORT_40G_BIT)
1230         return HCLGE_MAC_SPEED_40G;
1231 
1232     if (speed_ability & HCLGE_SUPPORT_25G_BIT)
1233         return HCLGE_MAC_SPEED_25G;
1234 
1235     if (speed_ability & HCLGE_SUPPORT_10G_BIT)
1236         return HCLGE_MAC_SPEED_10G;
1237 
1238     if (speed_ability & HCLGE_SUPPORT_1G_BIT)
1239         return HCLGE_MAC_SPEED_1G;
1240 
1241     if (speed_ability & HCLGE_SUPPORT_100M_BIT)
1242         return HCLGE_MAC_SPEED_100M;
1243 
1244     if (speed_ability & HCLGE_SUPPORT_10M_BIT)
1245         return HCLGE_MAC_SPEED_10M;
1246 
1247     return HCLGE_MAC_SPEED_1G;
1248 }
1249 
1250 static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
1251 {
1252 #define HCLGE_TX_SPARE_SIZE_UNIT        4096
1253 #define SPEED_ABILITY_EXT_SHIFT         8
1254 
1255     struct hclge_cfg_param_cmd *req;
1256     u64 mac_addr_tmp_high;
1257     u16 speed_ability_ext;
1258     u64 mac_addr_tmp;
1259     unsigned int i;
1260 
1261     req = (struct hclge_cfg_param_cmd *)desc[0].data;
1262 
1263     /* get the configuration */
1264     cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1265                       HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
1266     cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
1267                         HCLGE_CFG_TQP_DESC_N_M,
1268                         HCLGE_CFG_TQP_DESC_N_S);
1269 
1270     cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
1271                     HCLGE_CFG_PHY_ADDR_M,
1272                     HCLGE_CFG_PHY_ADDR_S);
1273     cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
1274                       HCLGE_CFG_MEDIA_TP_M,
1275                       HCLGE_CFG_MEDIA_TP_S);
1276     cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
1277                       HCLGE_CFG_RX_BUF_LEN_M,
1278                       HCLGE_CFG_RX_BUF_LEN_S);
1279     /* get mac_address */
1280     mac_addr_tmp = __le32_to_cpu(req->param[2]);
1281     mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
1282                         HCLGE_CFG_MAC_ADDR_H_M,
1283                         HCLGE_CFG_MAC_ADDR_H_S);
1284 
1285     mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
1286 
1287     cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
1288                          HCLGE_CFG_DEFAULT_SPEED_M,
1289                          HCLGE_CFG_DEFAULT_SPEED_S);
1290     cfg->vf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
1291                            HCLGE_CFG_RSS_SIZE_M,
1292                            HCLGE_CFG_RSS_SIZE_S);
1293 
1294     for (i = 0; i < ETH_ALEN; i++)
1295         cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
1296 
1297     req = (struct hclge_cfg_param_cmd *)desc[1].data;
1298     cfg->numa_node_map = __le32_to_cpu(req->param[0]);
1299 
1300     cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
1301                          HCLGE_CFG_SPEED_ABILITY_M,
1302                          HCLGE_CFG_SPEED_ABILITY_S);
1303     speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
1304                         HCLGE_CFG_SPEED_ABILITY_EXT_M,
1305                         HCLGE_CFG_SPEED_ABILITY_EXT_S);
1306     cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
1307 
1308     cfg->vlan_fliter_cap = hnae3_get_field(__le32_to_cpu(req->param[1]),
1309                            HCLGE_CFG_VLAN_FLTR_CAP_M,
1310                            HCLGE_CFG_VLAN_FLTR_CAP_S);
1311 
1312     cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
1313                      HCLGE_CFG_UMV_TBL_SPACE_M,
1314                      HCLGE_CFG_UMV_TBL_SPACE_S);
1315 
1316     cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
1317                            HCLGE_CFG_PF_RSS_SIZE_M,
1318                            HCLGE_CFG_PF_RSS_SIZE_S);
1319 
1320     /* HCLGE_CFG_PF_RSS_SIZE_M is the PF max rss size, which is a
1321      * power of 2, instead of reading out directly. This would
1322      * be more flexible for future changes and expansions.
1323      * When VF max  rss size field is HCLGE_CFG_RSS_SIZE_S,
1324      * it does not make sense if PF's field is 0. In this case, PF and VF
1325      * has the same max rss size filed: HCLGE_CFG_RSS_SIZE_S.
1326      */
1327     cfg->pf_rss_size_max = cfg->pf_rss_size_max ?
1328                    1U << cfg->pf_rss_size_max :
1329                    cfg->vf_rss_size_max;
1330 
1331     /* The unit of the tx spare buffer size queried from configuration
1332      * file is HCLGE_TX_SPARE_SIZE_UNIT(4096) bytes, so a conversion is
1333      * needed here.
1334      */
1335     cfg->tx_spare_buf_size = hnae3_get_field(__le32_to_cpu(req->param[2]),
1336                          HCLGE_CFG_TX_SPARE_BUF_SIZE_M,
1337                          HCLGE_CFG_TX_SPARE_BUF_SIZE_S);
1338     cfg->tx_spare_buf_size *= HCLGE_TX_SPARE_SIZE_UNIT;
1339 }
1340 
1341 /* hclge_get_cfg: query the static parameter from flash
1342  * @hdev: pointer to struct hclge_dev
1343  * @hcfg: the config structure to be getted
1344  */
1345 static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
1346 {
1347     struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
1348     struct hclge_cfg_param_cmd *req;
1349     unsigned int i;
1350     int ret;
1351 
1352     for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
1353         u32 offset = 0;
1354 
1355         req = (struct hclge_cfg_param_cmd *)desc[i].data;
1356         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
1357                        true);
1358         hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
1359                 HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
1360         /* Len should be united by 4 bytes when send to hardware */
1361         hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
1362                 HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
1363         req->offset = cpu_to_le32(offset);
1364     }
1365 
1366     ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
1367     if (ret) {
1368         dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
1369         return ret;
1370     }
1371 
1372     hclge_parse_cfg(hcfg, desc);
1373 
1374     return 0;
1375 }
1376 
1377 static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
1378 {
1379 #define HCLGE_MAX_NON_TSO_BD_NUM            8U
1380 
1381     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1382 
1383     ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1384     ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1385     ae_dev->dev_specs.rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1386     ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
1387     ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
1388     ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
1389     ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
1390     ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1391 }
1392 
1393 static void hclge_parse_dev_specs(struct hclge_dev *hdev,
1394                   struct hclge_desc *desc)
1395 {
1396     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1397     struct hclge_dev_specs_0_cmd *req0;
1398     struct hclge_dev_specs_1_cmd *req1;
1399 
1400     req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
1401     req1 = (struct hclge_dev_specs_1_cmd *)desc[1].data;
1402 
1403     ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
1404     ae_dev->dev_specs.rss_ind_tbl_size =
1405         le16_to_cpu(req0->rss_ind_tbl_size);
1406     ae_dev->dev_specs.int_ql_max = le16_to_cpu(req0->int_ql_max);
1407     ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
1408     ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
1409     ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
1410     ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
1411     ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
1412     ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
1413     ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
1414 }
1415 
1416 static void hclge_check_dev_specs(struct hclge_dev *hdev)
1417 {
1418     struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
1419 
1420     if (!dev_specs->max_non_tso_bd_num)
1421         dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
1422     if (!dev_specs->rss_ind_tbl_size)
1423         dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
1424     if (!dev_specs->rss_key_size)
1425         dev_specs->rss_key_size = HCLGE_COMM_RSS_KEY_SIZE;
1426     if (!dev_specs->max_tm_rate)
1427         dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
1428     if (!dev_specs->max_qset_num)
1429         dev_specs->max_qset_num = HCLGE_MAX_QSET_NUM;
1430     if (!dev_specs->max_int_gl)
1431         dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
1432     if (!dev_specs->max_frm_size)
1433         dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
1434     if (!dev_specs->umv_size)
1435         dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
1436 }
1437 
1438 static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
1439 {
1440     u32 reg_num = 0;
1441     int ret;
1442 
1443     ret = hclge_mac_query_reg_num(hdev, &reg_num);
1444     if (ret && ret != -EOPNOTSUPP)
1445         return ret;
1446 
1447     hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
1448     return 0;
1449 }
1450 
1451 static int hclge_query_dev_specs(struct hclge_dev *hdev)
1452 {
1453     struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
1454     int ret;
1455     int i;
1456 
1457     ret = hclge_query_mac_stats_num(hdev);
1458     if (ret)
1459         return ret;
1460 
1461     /* set default specifications as devices lower than version V3 do not
1462      * support querying specifications from firmware.
1463      */
1464     if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
1465         hclge_set_default_dev_specs(hdev);
1466         return 0;
1467     }
1468 
1469     for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
1470         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
1471                        true);
1472         desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
1473     }
1474     hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
1475 
1476     ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
1477     if (ret)
1478         return ret;
1479 
1480     hclge_parse_dev_specs(hdev, desc);
1481     hclge_check_dev_specs(hdev);
1482 
1483     return 0;
1484 }
1485 
1486 static int hclge_get_cap(struct hclge_dev *hdev)
1487 {
1488     int ret;
1489 
1490     ret = hclge_query_function_status(hdev);
1491     if (ret) {
1492         dev_err(&hdev->pdev->dev,
1493             "query function status error %d.\n", ret);
1494         return ret;
1495     }
1496 
1497     /* get pf resource */
1498     return hclge_query_pf_resource(hdev);
1499 }
1500 
1501 static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
1502 {
1503 #define HCLGE_MIN_TX_DESC   64
1504 #define HCLGE_MIN_RX_DESC   64
1505 
1506     if (!is_kdump_kernel())
1507         return;
1508 
1509     dev_info(&hdev->pdev->dev,
1510          "Running kdump kernel. Using minimal resources\n");
1511 
1512     /* minimal queue pairs equals to the number of vports */
1513     hdev->num_tqps = hdev->num_req_vfs + 1;
1514     hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
1515     hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
1516 }
1517 
1518 static void hclge_init_tc_config(struct hclge_dev *hdev)
1519 {
1520     unsigned int i;
1521 
1522     if (hdev->tc_max > HNAE3_MAX_TC ||
1523         hdev->tc_max < 1) {
1524         dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
1525              hdev->tc_max);
1526         hdev->tc_max = 1;
1527     }
1528 
1529     /* Dev does not support DCB */
1530     if (!hnae3_dev_dcb_supported(hdev)) {
1531         hdev->tc_max = 1;
1532         hdev->pfc_max = 0;
1533     } else {
1534         hdev->pfc_max = hdev->tc_max;
1535     }
1536 
1537     hdev->tm_info.num_tc = 1;
1538 
1539     /* Currently not support uncontiuous tc */
1540     for (i = 0; i < hdev->tm_info.num_tc; i++)
1541         hnae3_set_bit(hdev->hw_tc_map, i, 1);
1542 
1543     hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
1544 }
1545 
1546 static int hclge_configure(struct hclge_dev *hdev)
1547 {
1548     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1549     struct hclge_cfg cfg;
1550     int ret;
1551 
1552     ret = hclge_get_cfg(hdev, &cfg);
1553     if (ret)
1554         return ret;
1555 
1556     hdev->base_tqp_pid = 0;
1557     hdev->vf_rss_size_max = cfg.vf_rss_size_max;
1558     hdev->pf_rss_size_max = cfg.pf_rss_size_max;
1559     hdev->rx_buf_len = cfg.rx_buf_len;
1560     ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1561     hdev->hw.mac.media_type = cfg.media_type;
1562     hdev->hw.mac.phy_addr = cfg.phy_addr;
1563     hdev->num_tx_desc = cfg.tqp_desc_num;
1564     hdev->num_rx_desc = cfg.tqp_desc_num;
1565     hdev->tm_info.num_pg = 1;
1566     hdev->tc_max = cfg.tc_num;
1567     hdev->tm_info.hw_pfc_map = 0;
1568     if (cfg.umv_space)
1569         hdev->wanted_umv_size = cfg.umv_space;
1570     else
1571         hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
1572     hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
1573     hdev->gro_en = true;
1574     if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
1575         set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
1576 
1577     if (hnae3_dev_fd_supported(hdev)) {
1578         hdev->fd_en = true;
1579         hdev->fd_active_type = HCLGE_FD_RULE_NONE;
1580     }
1581 
1582     ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
1583     if (ret) {
1584         dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
1585             cfg.default_speed, ret);
1586         return ret;
1587     }
1588 
1589     hclge_parse_link_mode(hdev, cfg.speed_ability);
1590 
1591     hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
1592 
1593     hclge_init_tc_config(hdev);
1594     hclge_init_kdump_kernel_config(hdev);
1595 
1596     return ret;
1597 }
1598 
1599 static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
1600                 u16 tso_mss_max)
1601 {
1602     struct hclge_cfg_tso_status_cmd *req;
1603     struct hclge_desc desc;
1604 
1605     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
1606 
1607     req = (struct hclge_cfg_tso_status_cmd *)desc.data;
1608     req->tso_mss_min = cpu_to_le16(tso_mss_min);
1609     req->tso_mss_max = cpu_to_le16(tso_mss_max);
1610 
1611     return hclge_cmd_send(&hdev->hw, &desc, 1);
1612 }
1613 
1614 static int hclge_config_gro(struct hclge_dev *hdev)
1615 {
1616     struct hclge_cfg_gro_status_cmd *req;
1617     struct hclge_desc desc;
1618     int ret;
1619 
1620     if (!hnae3_dev_gro_supported(hdev))
1621         return 0;
1622 
1623     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
1624     req = (struct hclge_cfg_gro_status_cmd *)desc.data;
1625 
1626     req->gro_en = hdev->gro_en ? 1 : 0;
1627 
1628     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1629     if (ret)
1630         dev_err(&hdev->pdev->dev,
1631             "GRO hardware config cmd failed, ret = %d\n", ret);
1632 
1633     return ret;
1634 }
1635 
1636 static int hclge_alloc_tqps(struct hclge_dev *hdev)
1637 {
1638     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
1639     struct hclge_comm_tqp *tqp;
1640     int i;
1641 
1642     hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
1643                   sizeof(struct hclge_comm_tqp), GFP_KERNEL);
1644     if (!hdev->htqp)
1645         return -ENOMEM;
1646 
1647     tqp = hdev->htqp;
1648 
1649     for (i = 0; i < hdev->num_tqps; i++) {
1650         tqp->dev = &hdev->pdev->dev;
1651         tqp->index = i;
1652 
1653         tqp->q.ae_algo = &ae_algo;
1654         tqp->q.buf_size = hdev->rx_buf_len;
1655         tqp->q.tx_desc_num = hdev->num_tx_desc;
1656         tqp->q.rx_desc_num = hdev->num_rx_desc;
1657 
1658         /* need an extended offset to configure queues >=
1659          * HCLGE_TQP_MAX_SIZE_DEV_V2
1660          */
1661         if (i < HCLGE_TQP_MAX_SIZE_DEV_V2)
1662             tqp->q.io_base = hdev->hw.hw.io_base +
1663                      HCLGE_TQP_REG_OFFSET +
1664                      i * HCLGE_TQP_REG_SIZE;
1665         else
1666             tqp->q.io_base = hdev->hw.hw.io_base +
1667                      HCLGE_TQP_REG_OFFSET +
1668                      HCLGE_TQP_EXT_REG_OFFSET +
1669                      (i - HCLGE_TQP_MAX_SIZE_DEV_V2) *
1670                      HCLGE_TQP_REG_SIZE;
1671 
1672         /* when device supports tx push and has device memory,
1673          * the queue can execute push mode or doorbell mode on
1674          * device memory.
1675          */
1676         if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps))
1677             tqp->q.mem_base = hdev->hw.hw.mem_base +
1678                       HCLGE_TQP_MEM_OFFSET(hdev, i);
1679 
1680         tqp++;
1681     }
1682 
1683     return 0;
1684 }
1685 
1686 static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
1687                   u16 tqp_pid, u16 tqp_vid, bool is_pf)
1688 {
1689     struct hclge_tqp_map_cmd *req;
1690     struct hclge_desc desc;
1691     int ret;
1692 
1693     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
1694 
1695     req = (struct hclge_tqp_map_cmd *)desc.data;
1696     req->tqp_id = cpu_to_le16(tqp_pid);
1697     req->tqp_vf = func_id;
1698     req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
1699     if (!is_pf)
1700         req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
1701     req->tqp_vid = cpu_to_le16(tqp_vid);
1702 
1703     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1704     if (ret)
1705         dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
1706 
1707     return ret;
1708 }
1709 
1710 static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
1711 {
1712     struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1713     struct hclge_dev *hdev = vport->back;
1714     int i, alloced;
1715 
1716     for (i = 0, alloced = 0; i < hdev->num_tqps &&
1717          alloced < num_tqps; i++) {
1718         if (!hdev->htqp[i].alloced) {
1719             hdev->htqp[i].q.handle = &vport->nic;
1720             hdev->htqp[i].q.tqp_index = alloced;
1721             hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
1722             hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
1723             kinfo->tqp[alloced] = &hdev->htqp[i].q;
1724             hdev->htqp[i].alloced = true;
1725             alloced++;
1726         }
1727     }
1728     vport->alloc_tqps = alloced;
1729     kinfo->rss_size = min_t(u16, hdev->pf_rss_size_max,
1730                 vport->alloc_tqps / hdev->tm_info.num_tc);
1731 
1732     /* ensure one to one mapping between irq and queue at default */
1733     kinfo->rss_size = min_t(u16, kinfo->rss_size,
1734                 (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
1735 
1736     return 0;
1737 }
1738 
1739 static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
1740                 u16 num_tx_desc, u16 num_rx_desc)
1741 
1742 {
1743     struct hnae3_handle *nic = &vport->nic;
1744     struct hnae3_knic_private_info *kinfo = &nic->kinfo;
1745     struct hclge_dev *hdev = vport->back;
1746     int ret;
1747 
1748     kinfo->num_tx_desc = num_tx_desc;
1749     kinfo->num_rx_desc = num_rx_desc;
1750 
1751     kinfo->rx_buf_len = hdev->rx_buf_len;
1752     kinfo->tx_spare_buf_size = hdev->tx_spare_buf_size;
1753 
1754     kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
1755                   sizeof(struct hnae3_queue *), GFP_KERNEL);
1756     if (!kinfo->tqp)
1757         return -ENOMEM;
1758 
1759     ret = hclge_assign_tqp(vport, num_tqps);
1760     if (ret)
1761         dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
1762 
1763     return ret;
1764 }
1765 
1766 static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
1767                   struct hclge_vport *vport)
1768 {
1769     struct hnae3_handle *nic = &vport->nic;
1770     struct hnae3_knic_private_info *kinfo;
1771     u16 i;
1772 
1773     kinfo = &nic->kinfo;
1774     for (i = 0; i < vport->alloc_tqps; i++) {
1775         struct hclge_comm_tqp *q =
1776             container_of(kinfo->tqp[i], struct hclge_comm_tqp, q);
1777         bool is_pf;
1778         int ret;
1779 
1780         is_pf = !(vport->vport_id);
1781         ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
1782                          i, is_pf);
1783         if (ret)
1784             return ret;
1785     }
1786 
1787     return 0;
1788 }
1789 
1790 static int hclge_map_tqp(struct hclge_dev *hdev)
1791 {
1792     struct hclge_vport *vport = hdev->vport;
1793     u16 i, num_vport;
1794 
1795     num_vport = hdev->num_req_vfs + 1;
1796     for (i = 0; i < num_vport; i++) {
1797         int ret;
1798 
1799         ret = hclge_map_tqp_to_vport(hdev, vport);
1800         if (ret)
1801             return ret;
1802 
1803         vport++;
1804     }
1805 
1806     return 0;
1807 }
1808 
1809 static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
1810 {
1811     struct hnae3_handle *nic = &vport->nic;
1812     struct hclge_dev *hdev = vport->back;
1813     int ret;
1814 
1815     nic->pdev = hdev->pdev;
1816     nic->ae_algo = &ae_algo;
1817     nic->numa_node_mask = hdev->numa_node_mask;
1818     nic->kinfo.io_base = hdev->hw.hw.io_base;
1819 
1820     ret = hclge_knic_setup(vport, num_tqps,
1821                    hdev->num_tx_desc, hdev->num_rx_desc);
1822     if (ret)
1823         dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
1824 
1825     return ret;
1826 }
1827 
1828 static int hclge_alloc_vport(struct hclge_dev *hdev)
1829 {
1830     struct pci_dev *pdev = hdev->pdev;
1831     struct hclge_vport *vport;
1832     u32 tqp_main_vport;
1833     u32 tqp_per_vport;
1834     int num_vport, i;
1835     int ret;
1836 
1837     /* We need to alloc a vport for main NIC of PF */
1838     num_vport = hdev->num_req_vfs + 1;
1839 
1840     if (hdev->num_tqps < num_vport) {
1841         dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
1842             hdev->num_tqps, num_vport);
1843         return -EINVAL;
1844     }
1845 
1846     /* Alloc the same number of TQPs for every vport */
1847     tqp_per_vport = hdev->num_tqps / num_vport;
1848     tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
1849 
1850     vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
1851                  GFP_KERNEL);
1852     if (!vport)
1853         return -ENOMEM;
1854 
1855     hdev->vport = vport;
1856     hdev->num_alloc_vport = num_vport;
1857 
1858     if (IS_ENABLED(CONFIG_PCI_IOV))
1859         hdev->num_alloc_vfs = hdev->num_req_vfs;
1860 
1861     for (i = 0; i < num_vport; i++) {
1862         vport->back = hdev;
1863         vport->vport_id = i;
1864         vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
1865         vport->mps = HCLGE_MAC_DEFAULT_FRAME;
1866         vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
1867         vport->port_base_vlan_cfg.tbl_sta = true;
1868         vport->rxvlan_cfg.rx_vlan_offload_en = true;
1869         vport->req_vlan_fltr_en = true;
1870         INIT_LIST_HEAD(&vport->vlan_list);
1871         INIT_LIST_HEAD(&vport->uc_mac_list);
1872         INIT_LIST_HEAD(&vport->mc_mac_list);
1873         spin_lock_init(&vport->mac_list_lock);
1874 
1875         if (i == 0)
1876             ret = hclge_vport_setup(vport, tqp_main_vport);
1877         else
1878             ret = hclge_vport_setup(vport, tqp_per_vport);
1879         if (ret) {
1880             dev_err(&pdev->dev,
1881                 "vport setup failed for vport %d, %d\n",
1882                 i, ret);
1883             return ret;
1884         }
1885 
1886         vport++;
1887     }
1888 
1889     return 0;
1890 }
1891 
1892 static int  hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
1893                     struct hclge_pkt_buf_alloc *buf_alloc)
1894 {
1895 /* TX buffer size is unit by 128 byte */
1896 #define HCLGE_BUF_SIZE_UNIT_SHIFT   7
1897 #define HCLGE_BUF_SIZE_UPDATE_EN_MSK    BIT(15)
1898     struct hclge_tx_buff_alloc_cmd *req;
1899     struct hclge_desc desc;
1900     int ret;
1901     u8 i;
1902 
1903     req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
1904 
1905     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
1906     for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1907         u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
1908 
1909         req->tx_pkt_buff[i] =
1910             cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
1911                      HCLGE_BUF_SIZE_UPDATE_EN_MSK);
1912     }
1913 
1914     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1915     if (ret)
1916         dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
1917             ret);
1918 
1919     return ret;
1920 }
1921 
1922 static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
1923                  struct hclge_pkt_buf_alloc *buf_alloc)
1924 {
1925     int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
1926 
1927     if (ret)
1928         dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
1929 
1930     return ret;
1931 }
1932 
1933 static u32 hclge_get_tc_num(struct hclge_dev *hdev)
1934 {
1935     unsigned int i;
1936     u32 cnt = 0;
1937 
1938     for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
1939         if (hdev->hw_tc_map & BIT(i))
1940             cnt++;
1941     return cnt;
1942 }
1943 
1944 /* Get the number of pfc enabled TCs, which have private buffer */
1945 static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
1946                   struct hclge_pkt_buf_alloc *buf_alloc)
1947 {
1948     struct hclge_priv_buf *priv;
1949     unsigned int i;
1950     int cnt = 0;
1951 
1952     for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1953         priv = &buf_alloc->priv_buf[i];
1954         if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
1955             priv->enable)
1956             cnt++;
1957     }
1958 
1959     return cnt;
1960 }
1961 
1962 /* Get the number of pfc disabled TCs, which have private buffer */
1963 static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
1964                      struct hclge_pkt_buf_alloc *buf_alloc)
1965 {
1966     struct hclge_priv_buf *priv;
1967     unsigned int i;
1968     int cnt = 0;
1969 
1970     for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1971         priv = &buf_alloc->priv_buf[i];
1972         if (hdev->hw_tc_map & BIT(i) &&
1973             !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
1974             priv->enable)
1975             cnt++;
1976     }
1977 
1978     return cnt;
1979 }
1980 
1981 static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1982 {
1983     struct hclge_priv_buf *priv;
1984     u32 rx_priv = 0;
1985     int i;
1986 
1987     for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1988         priv = &buf_alloc->priv_buf[i];
1989         if (priv->enable)
1990             rx_priv += priv->buf_size;
1991     }
1992     return rx_priv;
1993 }
1994 
1995 static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
1996 {
1997     u32 i, total_tx_size = 0;
1998 
1999     for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
2000         total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
2001 
2002     return total_tx_size;
2003 }
2004 
2005 static bool  hclge_is_rx_buf_ok(struct hclge_dev *hdev,
2006                 struct hclge_pkt_buf_alloc *buf_alloc,
2007                 u32 rx_all)
2008 {
2009     u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
2010     u32 tc_num = hclge_get_tc_num(hdev);
2011     u32 shared_buf, aligned_mps;
2012     u32 rx_priv;
2013     int i;
2014 
2015     aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2016 
2017     if (hnae3_dev_dcb_supported(hdev))
2018         shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
2019                     hdev->dv_buf_size;
2020     else
2021         shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
2022                     + hdev->dv_buf_size;
2023 
2024     shared_buf_tc = tc_num * aligned_mps + aligned_mps;
2025     shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
2026                  HCLGE_BUF_SIZE_UNIT);
2027 
2028     rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
2029     if (rx_all < rx_priv + shared_std)
2030         return false;
2031 
2032     shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
2033     buf_alloc->s_buf.buf_size = shared_buf;
2034     if (hnae3_dev_dcb_supported(hdev)) {
2035         buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
2036         buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
2037             - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
2038                   HCLGE_BUF_SIZE_UNIT);
2039     } else {
2040         buf_alloc->s_buf.self.high = aligned_mps +
2041                         HCLGE_NON_DCB_ADDITIONAL_BUF;
2042         buf_alloc->s_buf.self.low = aligned_mps;
2043     }
2044 
2045     if (hnae3_dev_dcb_supported(hdev)) {
2046         hi_thrd = shared_buf - hdev->dv_buf_size;
2047 
2048         if (tc_num <= NEED_RESERVE_TC_NUM)
2049             hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
2050                     / BUF_MAX_PERCENT;
2051 
2052         if (tc_num)
2053             hi_thrd = hi_thrd / tc_num;
2054 
2055         hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
2056         hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
2057         lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
2058     } else {
2059         hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
2060         lo_thrd = aligned_mps;
2061     }
2062 
2063     for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2064         buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
2065         buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
2066     }
2067 
2068     return true;
2069 }
2070 
2071 static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
2072                 struct hclge_pkt_buf_alloc *buf_alloc)
2073 {
2074     u32 i, total_size;
2075 
2076     total_size = hdev->pkt_buf_size;
2077 
2078     /* alloc tx buffer for all enabled tc */
2079     for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2080         struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2081 
2082         if (hdev->hw_tc_map & BIT(i)) {
2083             if (total_size < hdev->tx_buf_size)
2084                 return -ENOMEM;
2085 
2086             priv->tx_buf_size = hdev->tx_buf_size;
2087         } else {
2088             priv->tx_buf_size = 0;
2089         }
2090 
2091         total_size -= priv->tx_buf_size;
2092     }
2093 
2094     return 0;
2095 }
2096 
2097 static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
2098                   struct hclge_pkt_buf_alloc *buf_alloc)
2099 {
2100     u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2101     u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
2102     unsigned int i;
2103 
2104     for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2105         struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2106 
2107         priv->enable = 0;
2108         priv->wl.low = 0;
2109         priv->wl.high = 0;
2110         priv->buf_size = 0;
2111 
2112         if (!(hdev->hw_tc_map & BIT(i)))
2113             continue;
2114 
2115         priv->enable = 1;
2116 
2117         if (hdev->tm_info.hw_pfc_map & BIT(i)) {
2118             priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
2119             priv->wl.high = roundup(priv->wl.low + aligned_mps,
2120                         HCLGE_BUF_SIZE_UNIT);
2121         } else {
2122             priv->wl.low = 0;
2123             priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
2124                     aligned_mps;
2125         }
2126 
2127         priv->buf_size = priv->wl.high + hdev->dv_buf_size;
2128     }
2129 
2130     return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2131 }
2132 
2133 static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
2134                       struct hclge_pkt_buf_alloc *buf_alloc)
2135 {
2136     u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2137     int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
2138     int i;
2139 
2140     /* let the last to be cleared first */
2141     for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2142         struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2143         unsigned int mask = BIT((unsigned int)i);
2144 
2145         if (hdev->hw_tc_map & mask &&
2146             !(hdev->tm_info.hw_pfc_map & mask)) {
2147             /* Clear the no pfc TC private buffer */
2148             priv->wl.low = 0;
2149             priv->wl.high = 0;
2150             priv->buf_size = 0;
2151             priv->enable = 0;
2152             no_pfc_priv_num--;
2153         }
2154 
2155         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2156             no_pfc_priv_num == 0)
2157             break;
2158     }
2159 
2160     return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2161 }
2162 
2163 static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
2164                     struct hclge_pkt_buf_alloc *buf_alloc)
2165 {
2166     u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2167     int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
2168     int i;
2169 
2170     /* let the last to be cleared first */
2171     for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
2172         struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2173         unsigned int mask = BIT((unsigned int)i);
2174 
2175         if (hdev->hw_tc_map & mask &&
2176             hdev->tm_info.hw_pfc_map & mask) {
2177             /* Reduce the number of pfc TC with private buffer */
2178             priv->wl.low = 0;
2179             priv->enable = 0;
2180             priv->wl.high = 0;
2181             priv->buf_size = 0;
2182             pfc_priv_num--;
2183         }
2184 
2185         if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
2186             pfc_priv_num == 0)
2187             break;
2188     }
2189 
2190     return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
2191 }
2192 
2193 static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
2194                       struct hclge_pkt_buf_alloc *buf_alloc)
2195 {
2196 #define COMPENSATE_BUFFER   0x3C00
2197 #define COMPENSATE_HALF_MPS_NUM 5
2198 #define PRIV_WL_GAP     0x1800
2199 
2200     u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
2201     u32 tc_num = hclge_get_tc_num(hdev);
2202     u32 half_mps = hdev->mps >> 1;
2203     u32 min_rx_priv;
2204     unsigned int i;
2205 
2206     if (tc_num)
2207         rx_priv = rx_priv / tc_num;
2208 
2209     if (tc_num <= NEED_RESERVE_TC_NUM)
2210         rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
2211 
2212     min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
2213             COMPENSATE_HALF_MPS_NUM * half_mps;
2214     min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
2215     rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
2216     if (rx_priv < min_rx_priv)
2217         return false;
2218 
2219     for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2220         struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2221 
2222         priv->enable = 0;
2223         priv->wl.low = 0;
2224         priv->wl.high = 0;
2225         priv->buf_size = 0;
2226 
2227         if (!(hdev->hw_tc_map & BIT(i)))
2228             continue;
2229 
2230         priv->enable = 1;
2231         priv->buf_size = rx_priv;
2232         priv->wl.high = rx_priv - hdev->dv_buf_size;
2233         priv->wl.low = priv->wl.high - PRIV_WL_GAP;
2234     }
2235 
2236     buf_alloc->s_buf.buf_size = 0;
2237 
2238     return true;
2239 }
2240 
2241 /* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
2242  * @hdev: pointer to struct hclge_dev
2243  * @buf_alloc: pointer to buffer calculation data
2244  * @return: 0: calculate successful, negative: fail
2245  */
2246 static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
2247                 struct hclge_pkt_buf_alloc *buf_alloc)
2248 {
2249     /* When DCB is not supported, rx private buffer is not allocated. */
2250     if (!hnae3_dev_dcb_supported(hdev)) {
2251         u32 rx_all = hdev->pkt_buf_size;
2252 
2253         rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
2254         if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
2255             return -ENOMEM;
2256 
2257         return 0;
2258     }
2259 
2260     if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
2261         return 0;
2262 
2263     if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
2264         return 0;
2265 
2266     /* try to decrease the buffer size */
2267     if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
2268         return 0;
2269 
2270     if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
2271         return 0;
2272 
2273     if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
2274         return 0;
2275 
2276     return -ENOMEM;
2277 }
2278 
2279 static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
2280                    struct hclge_pkt_buf_alloc *buf_alloc)
2281 {
2282     struct hclge_rx_priv_buff_cmd *req;
2283     struct hclge_desc desc;
2284     int ret;
2285     int i;
2286 
2287     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
2288     req = (struct hclge_rx_priv_buff_cmd *)desc.data;
2289 
2290     /* Alloc private buffer TCs */
2291     for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2292         struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
2293 
2294         req->buf_num[i] =
2295             cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
2296         req->buf_num[i] |=
2297             cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
2298     }
2299 
2300     req->shared_buf =
2301         cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
2302                 (1 << HCLGE_TC0_PRI_BUF_EN_B));
2303 
2304     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2305     if (ret)
2306         dev_err(&hdev->pdev->dev,
2307             "rx private buffer alloc cmd failed %d\n", ret);
2308 
2309     return ret;
2310 }
2311 
2312 static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
2313                    struct hclge_pkt_buf_alloc *buf_alloc)
2314 {
2315     struct hclge_rx_priv_wl_buf *req;
2316     struct hclge_priv_buf *priv;
2317     struct hclge_desc desc[2];
2318     int i, j;
2319     int ret;
2320 
2321     for (i = 0; i < 2; i++) {
2322         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
2323                        false);
2324         req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
2325 
2326         /* The first descriptor set the NEXT bit to 1 */
2327         if (i == 0)
2328             desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2329         else
2330             desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2331 
2332         for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2333             u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
2334 
2335             priv = &buf_alloc->priv_buf[idx];
2336             req->tc_wl[j].high =
2337                 cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
2338             req->tc_wl[j].high |=
2339                 cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2340             req->tc_wl[j].low =
2341                 cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
2342             req->tc_wl[j].low |=
2343                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2344         }
2345     }
2346 
2347     /* Send 2 descriptor at one time */
2348     ret = hclge_cmd_send(&hdev->hw, desc, 2);
2349     if (ret)
2350         dev_err(&hdev->pdev->dev,
2351             "rx private waterline config cmd failed %d\n",
2352             ret);
2353     return ret;
2354 }
2355 
2356 static int hclge_common_thrd_config(struct hclge_dev *hdev,
2357                     struct hclge_pkt_buf_alloc *buf_alloc)
2358 {
2359     struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
2360     struct hclge_rx_com_thrd *req;
2361     struct hclge_desc desc[2];
2362     struct hclge_tc_thrd *tc;
2363     int i, j;
2364     int ret;
2365 
2366     for (i = 0; i < 2; i++) {
2367         hclge_cmd_setup_basic_desc(&desc[i],
2368                        HCLGE_OPC_RX_COM_THRD_ALLOC, false);
2369         req = (struct hclge_rx_com_thrd *)&desc[i].data;
2370 
2371         /* The first descriptor set the NEXT bit to 1 */
2372         if (i == 0)
2373             desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2374         else
2375             desc[i].flag &= ~cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
2376 
2377         for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
2378             tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
2379 
2380             req->com_thrd[j].high =
2381                 cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
2382             req->com_thrd[j].high |=
2383                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2384             req->com_thrd[j].low =
2385                 cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
2386             req->com_thrd[j].low |=
2387                  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2388         }
2389     }
2390 
2391     /* Send 2 descriptors at one time */
2392     ret = hclge_cmd_send(&hdev->hw, desc, 2);
2393     if (ret)
2394         dev_err(&hdev->pdev->dev,
2395             "common threshold config cmd failed %d\n", ret);
2396     return ret;
2397 }
2398 
2399 static int hclge_common_wl_config(struct hclge_dev *hdev,
2400                   struct hclge_pkt_buf_alloc *buf_alloc)
2401 {
2402     struct hclge_shared_buf *buf = &buf_alloc->s_buf;
2403     struct hclge_rx_com_wl *req;
2404     struct hclge_desc desc;
2405     int ret;
2406 
2407     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
2408 
2409     req = (struct hclge_rx_com_wl *)desc.data;
2410     req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
2411     req->com_wl.high |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2412 
2413     req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
2414     req->com_wl.low |=  cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
2415 
2416     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2417     if (ret)
2418         dev_err(&hdev->pdev->dev,
2419             "common waterline config cmd failed %d\n", ret);
2420 
2421     return ret;
2422 }
2423 
2424 int hclge_buffer_alloc(struct hclge_dev *hdev)
2425 {
2426     struct hclge_pkt_buf_alloc *pkt_buf;
2427     int ret;
2428 
2429     pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
2430     if (!pkt_buf)
2431         return -ENOMEM;
2432 
2433     ret = hclge_tx_buffer_calc(hdev, pkt_buf);
2434     if (ret) {
2435         dev_err(&hdev->pdev->dev,
2436             "could not calc tx buffer size for all TCs %d\n", ret);
2437         goto out;
2438     }
2439 
2440     ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
2441     if (ret) {
2442         dev_err(&hdev->pdev->dev,
2443             "could not alloc tx buffers %d\n", ret);
2444         goto out;
2445     }
2446 
2447     ret = hclge_rx_buffer_calc(hdev, pkt_buf);
2448     if (ret) {
2449         dev_err(&hdev->pdev->dev,
2450             "could not calc rx priv buffer size for all TCs %d\n",
2451             ret);
2452         goto out;
2453     }
2454 
2455     ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
2456     if (ret) {
2457         dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
2458             ret);
2459         goto out;
2460     }
2461 
2462     if (hnae3_dev_dcb_supported(hdev)) {
2463         ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
2464         if (ret) {
2465             dev_err(&hdev->pdev->dev,
2466                 "could not configure rx private waterline %d\n",
2467                 ret);
2468             goto out;
2469         }
2470 
2471         ret = hclge_common_thrd_config(hdev, pkt_buf);
2472         if (ret) {
2473             dev_err(&hdev->pdev->dev,
2474                 "could not configure common threshold %d\n",
2475                 ret);
2476             goto out;
2477         }
2478     }
2479 
2480     ret = hclge_common_wl_config(hdev, pkt_buf);
2481     if (ret)
2482         dev_err(&hdev->pdev->dev,
2483             "could not configure common waterline %d\n", ret);
2484 
2485 out:
2486     kfree(pkt_buf);
2487     return ret;
2488 }
2489 
2490 static int hclge_init_roce_base_info(struct hclge_vport *vport)
2491 {
2492     struct hnae3_handle *roce = &vport->roce;
2493     struct hnae3_handle *nic = &vport->nic;
2494     struct hclge_dev *hdev = vport->back;
2495 
2496     roce->rinfo.num_vectors = vport->back->num_roce_msi;
2497 
2498     if (hdev->num_msi < hdev->num_nic_msi + hdev->num_roce_msi)
2499         return -EINVAL;
2500 
2501     roce->rinfo.base_vector = hdev->num_nic_msi;
2502 
2503     roce->rinfo.netdev = nic->kinfo.netdev;
2504     roce->rinfo.roce_io_base = hdev->hw.hw.io_base;
2505     roce->rinfo.roce_mem_base = hdev->hw.hw.mem_base;
2506 
2507     roce->pdev = nic->pdev;
2508     roce->ae_algo = nic->ae_algo;
2509     roce->numa_node_mask = nic->numa_node_mask;
2510 
2511     return 0;
2512 }
2513 
2514 static int hclge_init_msi(struct hclge_dev *hdev)
2515 {
2516     struct pci_dev *pdev = hdev->pdev;
2517     int vectors;
2518     int i;
2519 
2520     vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
2521                     hdev->num_msi,
2522                     PCI_IRQ_MSI | PCI_IRQ_MSIX);
2523     if (vectors < 0) {
2524         dev_err(&pdev->dev,
2525             "failed(%d) to allocate MSI/MSI-X vectors\n",
2526             vectors);
2527         return vectors;
2528     }
2529     if (vectors < hdev->num_msi)
2530         dev_warn(&hdev->pdev->dev,
2531              "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
2532              hdev->num_msi, vectors);
2533 
2534     hdev->num_msi = vectors;
2535     hdev->num_msi_left = vectors;
2536 
2537     hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
2538                        sizeof(u16), GFP_KERNEL);
2539     if (!hdev->vector_status) {
2540         pci_free_irq_vectors(pdev);
2541         return -ENOMEM;
2542     }
2543 
2544     for (i = 0; i < hdev->num_msi; i++)
2545         hdev->vector_status[i] = HCLGE_INVALID_VPORT;
2546 
2547     hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
2548                     sizeof(int), GFP_KERNEL);
2549     if (!hdev->vector_irq) {
2550         pci_free_irq_vectors(pdev);
2551         return -ENOMEM;
2552     }
2553 
2554     return 0;
2555 }
2556 
2557 static u8 hclge_check_speed_dup(u8 duplex, int speed)
2558 {
2559     if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
2560         duplex = HCLGE_MAC_FULL;
2561 
2562     return duplex;
2563 }
2564 
2565 static struct hclge_mac_speed_map hclge_mac_speed_map_to_fw[] = {
2566     {HCLGE_MAC_SPEED_10M, HCLGE_FW_MAC_SPEED_10M},
2567     {HCLGE_MAC_SPEED_100M, HCLGE_FW_MAC_SPEED_100M},
2568     {HCLGE_MAC_SPEED_1G, HCLGE_FW_MAC_SPEED_1G},
2569     {HCLGE_MAC_SPEED_10G, HCLGE_FW_MAC_SPEED_10G},
2570     {HCLGE_MAC_SPEED_25G, HCLGE_FW_MAC_SPEED_25G},
2571     {HCLGE_MAC_SPEED_40G, HCLGE_FW_MAC_SPEED_40G},
2572     {HCLGE_MAC_SPEED_50G, HCLGE_FW_MAC_SPEED_50G},
2573     {HCLGE_MAC_SPEED_100G, HCLGE_FW_MAC_SPEED_100G},
2574     {HCLGE_MAC_SPEED_200G, HCLGE_FW_MAC_SPEED_200G},
2575 };
2576 
2577 static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
2578 {
2579     u16 i;
2580 
2581     for (i = 0; i < ARRAY_SIZE(hclge_mac_speed_map_to_fw); i++) {
2582         if (hclge_mac_speed_map_to_fw[i].speed_drv == speed_drv) {
2583             *speed_fw = hclge_mac_speed_map_to_fw[i].speed_fw;
2584             return 0;
2585         }
2586     }
2587 
2588     return -EINVAL;
2589 }
2590 
2591 static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
2592                       u8 duplex)
2593 {
2594     struct hclge_config_mac_speed_dup_cmd *req;
2595     struct hclge_desc desc;
2596     u32 speed_fw;
2597     int ret;
2598 
2599     req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
2600 
2601     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
2602 
2603     if (duplex)
2604         hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
2605 
2606     ret = hclge_convert_to_fw_speed(speed, &speed_fw);
2607     if (ret) {
2608         dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
2609         return ret;
2610     }
2611 
2612     hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, HCLGE_CFG_SPEED_S,
2613             speed_fw);
2614     hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
2615               1);
2616 
2617     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2618     if (ret) {
2619         dev_err(&hdev->pdev->dev,
2620             "mac speed/duplex config cmd failed %d.\n", ret);
2621         return ret;
2622     }
2623 
2624     return 0;
2625 }
2626 
2627 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
2628 {
2629     struct hclge_mac *mac = &hdev->hw.mac;
2630     int ret;
2631 
2632     duplex = hclge_check_speed_dup(duplex, speed);
2633     if (!mac->support_autoneg && mac->speed == speed &&
2634         mac->duplex == duplex)
2635         return 0;
2636 
2637     ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
2638     if (ret)
2639         return ret;
2640 
2641     hdev->hw.mac.speed = speed;
2642     hdev->hw.mac.duplex = duplex;
2643 
2644     return 0;
2645 }
2646 
2647 static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
2648                      u8 duplex)
2649 {
2650     struct hclge_vport *vport = hclge_get_vport(handle);
2651     struct hclge_dev *hdev = vport->back;
2652 
2653     return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
2654 }
2655 
2656 static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
2657 {
2658     struct hclge_config_auto_neg_cmd *req;
2659     struct hclge_desc desc;
2660     u32 flag = 0;
2661     int ret;
2662 
2663     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
2664 
2665     req = (struct hclge_config_auto_neg_cmd *)desc.data;
2666     if (enable)
2667         hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
2668     req->cfg_an_cmd_flag = cpu_to_le32(flag);
2669 
2670     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2671     if (ret)
2672         dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
2673             ret);
2674 
2675     return ret;
2676 }
2677 
2678 static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
2679 {
2680     struct hclge_vport *vport = hclge_get_vport(handle);
2681     struct hclge_dev *hdev = vport->back;
2682 
2683     if (!hdev->hw.mac.support_autoneg) {
2684         if (enable) {
2685             dev_err(&hdev->pdev->dev,
2686                 "autoneg is not supported by current port\n");
2687             return -EOPNOTSUPP;
2688         } else {
2689             return 0;
2690         }
2691     }
2692 
2693     return hclge_set_autoneg_en(hdev, enable);
2694 }
2695 
2696 static int hclge_get_autoneg(struct hnae3_handle *handle)
2697 {
2698     struct hclge_vport *vport = hclge_get_vport(handle);
2699     struct hclge_dev *hdev = vport->back;
2700     struct phy_device *phydev = hdev->hw.mac.phydev;
2701 
2702     if (phydev)
2703         return phydev->autoneg;
2704 
2705     return hdev->hw.mac.autoneg;
2706 }
2707 
2708 static int hclge_restart_autoneg(struct hnae3_handle *handle)
2709 {
2710     struct hclge_vport *vport = hclge_get_vport(handle);
2711     struct hclge_dev *hdev = vport->back;
2712     int ret;
2713 
2714     dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
2715 
2716     ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
2717     if (ret)
2718         return ret;
2719     return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
2720 }
2721 
2722 static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
2723 {
2724     struct hclge_vport *vport = hclge_get_vport(handle);
2725     struct hclge_dev *hdev = vport->back;
2726 
2727     if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
2728         return hclge_set_autoneg_en(hdev, !halt);
2729 
2730     return 0;
2731 }
2732 
2733 static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
2734 {
2735     struct hclge_config_fec_cmd *req;
2736     struct hclge_desc desc;
2737     int ret;
2738 
2739     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
2740 
2741     req = (struct hclge_config_fec_cmd *)desc.data;
2742     if (fec_mode & BIT(HNAE3_FEC_AUTO))
2743         hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
2744     if (fec_mode & BIT(HNAE3_FEC_RS))
2745         hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2746                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
2747     if (fec_mode & BIT(HNAE3_FEC_BASER))
2748         hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
2749                 HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
2750 
2751     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2752     if (ret)
2753         dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
2754 
2755     return ret;
2756 }
2757 
2758 static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
2759 {
2760     struct hclge_vport *vport = hclge_get_vport(handle);
2761     struct hclge_dev *hdev = vport->back;
2762     struct hclge_mac *mac = &hdev->hw.mac;
2763     int ret;
2764 
2765     if (fec_mode && !(mac->fec_ability & fec_mode)) {
2766         dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
2767         return -EINVAL;
2768     }
2769 
2770     ret = hclge_set_fec_hw(hdev, fec_mode);
2771     if (ret)
2772         return ret;
2773 
2774     mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
2775     return 0;
2776 }
2777 
2778 static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
2779               u8 *fec_mode)
2780 {
2781     struct hclge_vport *vport = hclge_get_vport(handle);
2782     struct hclge_dev *hdev = vport->back;
2783     struct hclge_mac *mac = &hdev->hw.mac;
2784 
2785     if (fec_ability)
2786         *fec_ability = mac->fec_ability;
2787     if (fec_mode)
2788         *fec_mode = mac->fec_mode;
2789 }
2790 
2791 static int hclge_mac_init(struct hclge_dev *hdev)
2792 {
2793     struct hclge_mac *mac = &hdev->hw.mac;
2794     int ret;
2795 
2796     hdev->support_sfp_query = true;
2797     hdev->hw.mac.duplex = HCLGE_MAC_FULL;
2798     ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
2799                      hdev->hw.mac.duplex);
2800     if (ret)
2801         return ret;
2802 
2803     if (hdev->hw.mac.support_autoneg) {
2804         ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
2805         if (ret)
2806             return ret;
2807     }
2808 
2809     mac->link = 0;
2810 
2811     if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
2812         ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
2813         if (ret)
2814             return ret;
2815     }
2816 
2817     ret = hclge_set_mac_mtu(hdev, hdev->mps);
2818     if (ret) {
2819         dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
2820         return ret;
2821     }
2822 
2823     ret = hclge_set_default_loopback(hdev);
2824     if (ret)
2825         return ret;
2826 
2827     ret = hclge_buffer_alloc(hdev);
2828     if (ret)
2829         dev_err(&hdev->pdev->dev,
2830             "allocate buffer fail, ret=%d\n", ret);
2831 
2832     return ret;
2833 }
2834 
2835 static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
2836 {
2837     if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2838         !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) {
2839         hdev->last_mbx_scheduled = jiffies;
2840         mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2841     }
2842 }
2843 
2844 static void hclge_reset_task_schedule(struct hclge_dev *hdev)
2845 {
2846     if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2847         test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
2848         !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) {
2849         hdev->last_rst_scheduled = jiffies;
2850         mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2851     }
2852 }
2853 
2854 static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
2855 {
2856     if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2857         !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
2858         mod_delayed_work(hclge_wq, &hdev->service_task, 0);
2859 }
2860 
2861 void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
2862 {
2863     if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
2864         !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
2865         mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
2866 }
2867 
2868 static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
2869 {
2870     struct hclge_link_status_cmd *req;
2871     struct hclge_desc desc;
2872     int ret;
2873 
2874     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
2875     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2876     if (ret) {
2877         dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
2878             ret);
2879         return ret;
2880     }
2881 
2882     req = (struct hclge_link_status_cmd *)desc.data;
2883     *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
2884         HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
2885 
2886     return 0;
2887 }
2888 
2889 static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
2890 {
2891     struct phy_device *phydev = hdev->hw.mac.phydev;
2892 
2893     *link_status = HCLGE_LINK_STATUS_DOWN;
2894 
2895     if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
2896         return 0;
2897 
2898     if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
2899         return 0;
2900 
2901     return hclge_get_mac_link_status(hdev, link_status);
2902 }
2903 
2904 static void hclge_push_link_status(struct hclge_dev *hdev)
2905 {
2906     struct hclge_vport *vport;
2907     int ret;
2908     u16 i;
2909 
2910     for (i = 0; i < pci_num_vf(hdev->pdev); i++) {
2911         vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
2912 
2913         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) ||
2914             vport->vf_info.link_state != IFLA_VF_LINK_STATE_AUTO)
2915             continue;
2916 
2917         ret = hclge_push_vf_link_status(vport);
2918         if (ret) {
2919             dev_err(&hdev->pdev->dev,
2920                 "failed to push link status to vf%u, ret = %d\n",
2921                 i, ret);
2922         }
2923     }
2924 }
2925 
2926 static void hclge_update_link_status(struct hclge_dev *hdev)
2927 {
2928     struct hnae3_handle *rhandle = &hdev->vport[0].roce;
2929     struct hnae3_handle *handle = &hdev->vport[0].nic;
2930     struct hnae3_client *rclient = hdev->roce_client;
2931     struct hnae3_client *client = hdev->nic_client;
2932     int state;
2933     int ret;
2934 
2935     if (!client)
2936         return;
2937 
2938     if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
2939         return;
2940 
2941     ret = hclge_get_mac_phy_link(hdev, &state);
2942     if (ret) {
2943         clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2944         return;
2945     }
2946 
2947     if (state != hdev->hw.mac.link) {
2948         hdev->hw.mac.link = state;
2949         client->ops->link_status_change(handle, state);
2950         hclge_config_mac_tnl_int(hdev, state);
2951         if (rclient && rclient->ops->link_status_change)
2952             rclient->ops->link_status_change(rhandle, state);
2953 
2954         hclge_push_link_status(hdev);
2955     }
2956 
2957     clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
2958 }
2959 
2960 static void hclge_update_speed_advertising(struct hclge_mac *mac)
2961 {
2962     u32 speed_ability;
2963 
2964     if (hclge_get_speed_bit(mac->speed, &speed_ability))
2965         return;
2966 
2967     switch (mac->module_type) {
2968     case HNAE3_MODULE_TYPE_FIBRE_LR:
2969         hclge_convert_setting_lr(speed_ability, mac->advertising);
2970         break;
2971     case HNAE3_MODULE_TYPE_FIBRE_SR:
2972     case HNAE3_MODULE_TYPE_AOC:
2973         hclge_convert_setting_sr(speed_ability, mac->advertising);
2974         break;
2975     case HNAE3_MODULE_TYPE_CR:
2976         hclge_convert_setting_cr(speed_ability, mac->advertising);
2977         break;
2978     case HNAE3_MODULE_TYPE_KR:
2979         hclge_convert_setting_kr(speed_ability, mac->advertising);
2980         break;
2981     default:
2982         break;
2983     }
2984 }
2985 
2986 static void hclge_update_fec_advertising(struct hclge_mac *mac)
2987 {
2988     if (mac->fec_mode & BIT(HNAE3_FEC_RS))
2989         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
2990                  mac->advertising);
2991     else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
2992         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
2993                  mac->advertising);
2994     else
2995         linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
2996                  mac->advertising);
2997 }
2998 
2999 static void hclge_update_pause_advertising(struct hclge_dev *hdev)
3000 {
3001     struct hclge_mac *mac = &hdev->hw.mac;
3002     bool rx_en, tx_en;
3003 
3004     switch (hdev->fc_mode_last_time) {
3005     case HCLGE_FC_RX_PAUSE:
3006         rx_en = true;
3007         tx_en = false;
3008         break;
3009     case HCLGE_FC_TX_PAUSE:
3010         rx_en = false;
3011         tx_en = true;
3012         break;
3013     case HCLGE_FC_FULL:
3014         rx_en = true;
3015         tx_en = true;
3016         break;
3017     default:
3018         rx_en = false;
3019         tx_en = false;
3020         break;
3021     }
3022 
3023     linkmode_set_pause(mac->advertising, tx_en, rx_en);
3024 }
3025 
3026 static void hclge_update_advertising(struct hclge_dev *hdev)
3027 {
3028     struct hclge_mac *mac = &hdev->hw.mac;
3029 
3030     linkmode_zero(mac->advertising);
3031     hclge_update_speed_advertising(mac);
3032     hclge_update_fec_advertising(mac);
3033     hclge_update_pause_advertising(hdev);
3034 }
3035 
3036 static void hclge_update_port_capability(struct hclge_dev *hdev,
3037                      struct hclge_mac *mac)
3038 {
3039     if (hnae3_dev_fec_supported(hdev))
3040         /* update fec ability by speed */
3041         hclge_convert_setting_fec(mac);
3042 
3043     /* firmware can not identify back plane type, the media type
3044      * read from configuration can help deal it
3045      */
3046     if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
3047         mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
3048         mac->module_type = HNAE3_MODULE_TYPE_KR;
3049     else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3050         mac->module_type = HNAE3_MODULE_TYPE_TP;
3051 
3052     if (mac->support_autoneg) {
3053         linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
3054         linkmode_copy(mac->advertising, mac->supported);
3055     } else {
3056         linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
3057                    mac->supported);
3058         hclge_update_advertising(hdev);
3059     }
3060 }
3061 
3062 static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
3063 {
3064     struct hclge_sfp_info_cmd *resp;
3065     struct hclge_desc desc;
3066     int ret;
3067 
3068     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3069     resp = (struct hclge_sfp_info_cmd *)desc.data;
3070     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3071     if (ret == -EOPNOTSUPP) {
3072         dev_warn(&hdev->pdev->dev,
3073              "IMP do not support get SFP speed %d\n", ret);
3074         return ret;
3075     } else if (ret) {
3076         dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
3077         return ret;
3078     }
3079 
3080     *speed = le32_to_cpu(resp->speed);
3081 
3082     return 0;
3083 }
3084 
3085 static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
3086 {
3087     struct hclge_sfp_info_cmd *resp;
3088     struct hclge_desc desc;
3089     int ret;
3090 
3091     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
3092     resp = (struct hclge_sfp_info_cmd *)desc.data;
3093 
3094     resp->query_type = QUERY_ACTIVE_SPEED;
3095 
3096     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3097     if (ret == -EOPNOTSUPP) {
3098         dev_warn(&hdev->pdev->dev,
3099              "IMP does not support get SFP info %d\n", ret);
3100         return ret;
3101     } else if (ret) {
3102         dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
3103         return ret;
3104     }
3105 
3106     /* In some case, mac speed get from IMP may be 0, it shouldn't be
3107      * set to mac->speed.
3108      */
3109     if (!le32_to_cpu(resp->speed))
3110         return 0;
3111 
3112     mac->speed = le32_to_cpu(resp->speed);
3113     /* if resp->speed_ability is 0, it means it's an old version
3114      * firmware, do not update these params
3115      */
3116     if (resp->speed_ability) {
3117         mac->module_type = le32_to_cpu(resp->module_type);
3118         mac->speed_ability = le32_to_cpu(resp->speed_ability);
3119         mac->autoneg = resp->autoneg;
3120         mac->support_autoneg = resp->autoneg_ability;
3121         mac->speed_type = QUERY_ACTIVE_SPEED;
3122         if (!resp->active_fec)
3123             mac->fec_mode = 0;
3124         else
3125             mac->fec_mode = BIT(resp->active_fec);
3126     } else {
3127         mac->speed_type = QUERY_SFP_SPEED;
3128     }
3129 
3130     return 0;
3131 }
3132 
3133 static int hclge_get_phy_link_ksettings(struct hnae3_handle *handle,
3134                     struct ethtool_link_ksettings *cmd)
3135 {
3136     struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3137     struct hclge_vport *vport = hclge_get_vport(handle);
3138     struct hclge_phy_link_ksetting_0_cmd *req0;
3139     struct hclge_phy_link_ksetting_1_cmd *req1;
3140     u32 supported, advertising, lp_advertising;
3141     struct hclge_dev *hdev = vport->back;
3142     int ret;
3143 
3144     hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3145                    true);
3146     desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3147     hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3148                    true);
3149 
3150     ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3151     if (ret) {
3152         dev_err(&hdev->pdev->dev,
3153             "failed to get phy link ksetting, ret = %d.\n", ret);
3154         return ret;
3155     }
3156 
3157     req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3158     cmd->base.autoneg = req0->autoneg;
3159     cmd->base.speed = le32_to_cpu(req0->speed);
3160     cmd->base.duplex = req0->duplex;
3161     cmd->base.port = req0->port;
3162     cmd->base.transceiver = req0->transceiver;
3163     cmd->base.phy_address = req0->phy_address;
3164     cmd->base.eth_tp_mdix = req0->eth_tp_mdix;
3165     cmd->base.eth_tp_mdix_ctrl = req0->eth_tp_mdix_ctrl;
3166     supported = le32_to_cpu(req0->supported);
3167     advertising = le32_to_cpu(req0->advertising);
3168     lp_advertising = le32_to_cpu(req0->lp_advertising);
3169     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
3170                         supported);
3171     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
3172                         advertising);
3173     ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
3174                         lp_advertising);
3175 
3176     req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3177     cmd->base.master_slave_cfg = req1->master_slave_cfg;
3178     cmd->base.master_slave_state = req1->master_slave_state;
3179 
3180     return 0;
3181 }
3182 
3183 static int
3184 hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
3185                  const struct ethtool_link_ksettings *cmd)
3186 {
3187     struct hclge_desc desc[HCLGE_PHY_LINK_SETTING_BD_NUM];
3188     struct hclge_vport *vport = hclge_get_vport(handle);
3189     struct hclge_phy_link_ksetting_0_cmd *req0;
3190     struct hclge_phy_link_ksetting_1_cmd *req1;
3191     struct hclge_dev *hdev = vport->back;
3192     u32 advertising;
3193     int ret;
3194 
3195     if (cmd->base.autoneg == AUTONEG_DISABLE &&
3196         ((cmd->base.speed != SPEED_100 && cmd->base.speed != SPEED_10) ||
3197          (cmd->base.duplex != DUPLEX_HALF &&
3198           cmd->base.duplex != DUPLEX_FULL)))
3199         return -EINVAL;
3200 
3201     hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_PHY_LINK_KSETTING,
3202                    false);
3203     desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
3204     hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_PHY_LINK_KSETTING,
3205                    false);
3206 
3207     req0 = (struct hclge_phy_link_ksetting_0_cmd *)desc[0].data;
3208     req0->autoneg = cmd->base.autoneg;
3209     req0->speed = cpu_to_le32(cmd->base.speed);
3210     req0->duplex = cmd->base.duplex;
3211     ethtool_convert_link_mode_to_legacy_u32(&advertising,
3212                         cmd->link_modes.advertising);
3213     req0->advertising = cpu_to_le32(advertising);
3214     req0->eth_tp_mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
3215 
3216     req1 = (struct hclge_phy_link_ksetting_1_cmd *)desc[1].data;
3217     req1->master_slave_cfg = cmd->base.master_slave_cfg;
3218 
3219     ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PHY_LINK_SETTING_BD_NUM);
3220     if (ret) {
3221         dev_err(&hdev->pdev->dev,
3222             "failed to set phy link ksettings, ret = %d.\n", ret);
3223         return ret;
3224     }
3225 
3226     hdev->hw.mac.autoneg = cmd->base.autoneg;
3227     hdev->hw.mac.speed = cmd->base.speed;
3228     hdev->hw.mac.duplex = cmd->base.duplex;
3229     linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
3230 
3231     return 0;
3232 }
3233 
3234 static int hclge_update_tp_port_info(struct hclge_dev *hdev)
3235 {
3236     struct ethtool_link_ksettings cmd;
3237     int ret;
3238 
3239     if (!hnae3_dev_phy_imp_supported(hdev))
3240         return 0;
3241 
3242     ret = hclge_get_phy_link_ksettings(&hdev->vport->nic, &cmd);
3243     if (ret)
3244         return ret;
3245 
3246     hdev->hw.mac.autoneg = cmd.base.autoneg;
3247     hdev->hw.mac.speed = cmd.base.speed;
3248     hdev->hw.mac.duplex = cmd.base.duplex;
3249 
3250     return 0;
3251 }
3252 
3253 static int hclge_tp_port_init(struct hclge_dev *hdev)
3254 {
3255     struct ethtool_link_ksettings cmd;
3256 
3257     if (!hnae3_dev_phy_imp_supported(hdev))
3258         return 0;
3259 
3260     cmd.base.autoneg = hdev->hw.mac.autoneg;
3261     cmd.base.speed = hdev->hw.mac.speed;
3262     cmd.base.duplex = hdev->hw.mac.duplex;
3263     linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
3264 
3265     return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
3266 }
3267 
3268 static int hclge_update_port_info(struct hclge_dev *hdev)
3269 {
3270     struct hclge_mac *mac = &hdev->hw.mac;
3271     int speed;
3272     int ret;
3273 
3274     /* get the port info from SFP cmd if not copper port */
3275     if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
3276         return hclge_update_tp_port_info(hdev);
3277 
3278     /* if IMP does not support get SFP/qSFP info, return directly */
3279     if (!hdev->support_sfp_query)
3280         return 0;
3281 
3282     if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3283         speed = mac->speed;
3284         ret = hclge_get_sfp_info(hdev, mac);
3285     } else {
3286         speed = HCLGE_MAC_SPEED_UNKNOWN;
3287         ret = hclge_get_sfp_speed(hdev, &speed);
3288     }
3289 
3290     if (ret == -EOPNOTSUPP) {
3291         hdev->support_sfp_query = false;
3292         return ret;
3293     } else if (ret) {
3294         return ret;
3295     }
3296 
3297     if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
3298         if (mac->speed_type == QUERY_ACTIVE_SPEED) {
3299             hclge_update_port_capability(hdev, mac);
3300             if (mac->speed != speed)
3301                 (void)hclge_tm_port_shaper_cfg(hdev);
3302             return 0;
3303         }
3304         return hclge_cfg_mac_speed_dup(hdev, mac->speed,
3305                            HCLGE_MAC_FULL);
3306     } else {
3307         if (speed == HCLGE_MAC_SPEED_UNKNOWN)
3308             return 0; /* do nothing if no SFP */
3309 
3310         /* must config full duplex for SFP */
3311         return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
3312     }
3313 }
3314 
3315 static int hclge_get_status(struct hnae3_handle *handle)
3316 {
3317     struct hclge_vport *vport = hclge_get_vport(handle);
3318     struct hclge_dev *hdev = vport->back;
3319 
3320     hclge_update_link_status(hdev);
3321 
3322     return hdev->hw.mac.link;
3323 }
3324 
3325 static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
3326 {
3327     if (!pci_num_vf(hdev->pdev)) {
3328         dev_err(&hdev->pdev->dev,
3329             "SRIOV is disabled, can not get vport(%d) info.\n", vf);
3330         return NULL;
3331     }
3332 
3333     if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
3334         dev_err(&hdev->pdev->dev,
3335             "vf id(%d) is out of range(0 <= vfid < %d)\n",
3336             vf, pci_num_vf(hdev->pdev));
3337         return NULL;
3338     }
3339 
3340     /* VF start from 1 in vport */
3341     vf += HCLGE_VF_VPORT_START_NUM;
3342     return &hdev->vport[vf];
3343 }
3344 
3345 static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
3346                    struct ifla_vf_info *ivf)
3347 {
3348     struct hclge_vport *vport = hclge_get_vport(handle);
3349     struct hclge_dev *hdev = vport->back;
3350 
3351     vport = hclge_get_vf_vport(hdev, vf);
3352     if (!vport)
3353         return -EINVAL;
3354 
3355     ivf->vf = vf;
3356     ivf->linkstate = vport->vf_info.link_state;
3357     ivf->spoofchk = vport->vf_info.spoofchk;
3358     ivf->trusted = vport->vf_info.trusted;
3359     ivf->min_tx_rate = 0;
3360     ivf->max_tx_rate = vport->vf_info.max_tx_rate;
3361     ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
3362     ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
3363     ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
3364     ether_addr_copy(ivf->mac, vport->vf_info.mac);
3365 
3366     return 0;
3367 }
3368 
3369 static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
3370                    int link_state)
3371 {
3372     struct hclge_vport *vport = hclge_get_vport(handle);
3373     struct hclge_dev *hdev = vport->back;
3374     int link_state_old;
3375     int ret;
3376 
3377     vport = hclge_get_vf_vport(hdev, vf);
3378     if (!vport)
3379         return -EINVAL;
3380 
3381     link_state_old = vport->vf_info.link_state;
3382     vport->vf_info.link_state = link_state;
3383 
3384     /* return success directly if the VF is unalive, VF will
3385      * query link state itself when it starts work.
3386      */
3387     if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3388         return 0;
3389 
3390     ret = hclge_push_vf_link_status(vport);
3391     if (ret) {
3392         vport->vf_info.link_state = link_state_old;
3393         dev_err(&hdev->pdev->dev,
3394             "failed to push vf%d link status, ret = %d\n", vf, ret);
3395     }
3396 
3397     return ret;
3398 }
3399 
3400 static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
3401 {
3402     u32 cmdq_src_reg, msix_src_reg, hw_err_src_reg;
3403 
3404     /* fetch the events from their corresponding regs */
3405     cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
3406     msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
3407     hw_err_src_reg = hclge_read_dev(&hdev->hw,
3408                     HCLGE_RAS_PF_OTHER_INT_STS_REG);
3409 
3410     /* Assumption: If by any chance reset and mailbox events are reported
3411      * together then we will only process reset event in this go and will
3412      * defer the processing of the mailbox events. Since, we would have not
3413      * cleared RX CMDQ event this time we would receive again another
3414      * interrupt from H/W just for the mailbox.
3415      *
3416      * check for vector0 reset event sources
3417      */
3418     if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
3419         dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
3420         set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
3421         set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3422         *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3423         hdev->rst_stats.imp_rst_cnt++;
3424         return HCLGE_VECTOR0_EVENT_RST;
3425     }
3426 
3427     if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
3428         dev_info(&hdev->pdev->dev, "global reset interrupt\n");
3429         set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3430         set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
3431         *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3432         hdev->rst_stats.global_rst_cnt++;
3433         return HCLGE_VECTOR0_EVENT_RST;
3434     }
3435 
3436     /* check for vector0 msix event and hardware error event source */
3437     if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK ||
3438         hw_err_src_reg & HCLGE_RAS_REG_ERR_MASK)
3439         return HCLGE_VECTOR0_EVENT_ERR;
3440 
3441     /* check for vector0 ptp event source */
3442     if (BIT(HCLGE_VECTOR0_REG_PTP_INT_B) & msix_src_reg) {
3443         *clearval = msix_src_reg;
3444         return HCLGE_VECTOR0_EVENT_PTP;
3445     }
3446 
3447     /* check for vector0 mailbox(=CMDQ RX) event source */
3448     if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
3449         cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
3450         *clearval = cmdq_src_reg;
3451         return HCLGE_VECTOR0_EVENT_MBX;
3452     }
3453 
3454     /* print other vector0 event source */
3455     dev_info(&hdev->pdev->dev,
3456          "INT status: CMDQ(%#x) HW errors(%#x) other(%#x)\n",
3457          cmdq_src_reg, hw_err_src_reg, msix_src_reg);
3458 
3459     return HCLGE_VECTOR0_EVENT_OTHER;
3460 }
3461 
3462 static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
3463                     u32 regclr)
3464 {
3465     switch (event_type) {
3466     case HCLGE_VECTOR0_EVENT_PTP:
3467     case HCLGE_VECTOR0_EVENT_RST:
3468         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
3469         break;
3470     case HCLGE_VECTOR0_EVENT_MBX:
3471         hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
3472         break;
3473     default:
3474         break;
3475     }
3476 }
3477 
3478 static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
3479 {
3480     hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
3481                 BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
3482                 BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
3483                 BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
3484     hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
3485 }
3486 
3487 static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
3488 {
3489     writel(enable ? 1 : 0, vector->addr);
3490 }
3491 
3492 static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
3493 {
3494     struct hclge_dev *hdev = data;
3495     unsigned long flags;
3496     u32 clearval = 0;
3497     u32 event_cause;
3498 
3499     hclge_enable_vector(&hdev->misc_vector, false);
3500     event_cause = hclge_check_event_cause(hdev, &clearval);
3501 
3502     /* vector 0 interrupt is shared with reset and mailbox source events. */
3503     switch (event_cause) {
3504     case HCLGE_VECTOR0_EVENT_ERR:
3505         hclge_errhand_task_schedule(hdev);
3506         break;
3507     case HCLGE_VECTOR0_EVENT_RST:
3508         hclge_reset_task_schedule(hdev);
3509         break;
3510     case HCLGE_VECTOR0_EVENT_PTP:
3511         spin_lock_irqsave(&hdev->ptp->lock, flags);
3512         hclge_ptp_clean_tx_hwts(hdev);
3513         spin_unlock_irqrestore(&hdev->ptp->lock, flags);
3514         break;
3515     case HCLGE_VECTOR0_EVENT_MBX:
3516         /* If we are here then,
3517          * 1. Either we are not handling any mbx task and we are not
3518          *    scheduled as well
3519          *                        OR
3520          * 2. We could be handling a mbx task but nothing more is
3521          *    scheduled.
3522          * In both cases, we should schedule mbx task as there are more
3523          * mbx messages reported by this interrupt.
3524          */
3525         hclge_mbx_task_schedule(hdev);
3526         break;
3527     default:
3528         dev_warn(&hdev->pdev->dev,
3529              "received unknown or unhandled event of vector0\n");
3530         break;
3531     }
3532 
3533     hclge_clear_event_cause(hdev, event_cause, clearval);
3534 
3535     /* Enable interrupt if it is not caused by reset event or error event */
3536     if (event_cause == HCLGE_VECTOR0_EVENT_PTP ||
3537         event_cause == HCLGE_VECTOR0_EVENT_MBX ||
3538         event_cause == HCLGE_VECTOR0_EVENT_OTHER)
3539         hclge_enable_vector(&hdev->misc_vector, true);
3540 
3541     return IRQ_HANDLED;
3542 }
3543 
3544 static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
3545 {
3546     if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
3547         dev_warn(&hdev->pdev->dev,
3548              "vector(vector_id %d) has been freed.\n", vector_id);
3549         return;
3550     }
3551 
3552     hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
3553     hdev->num_msi_left += 1;
3554     hdev->num_msi_used -= 1;
3555 }
3556 
3557 static void hclge_get_misc_vector(struct hclge_dev *hdev)
3558 {
3559     struct hclge_misc_vector *vector = &hdev->misc_vector;
3560 
3561     vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
3562 
3563     vector->addr = hdev->hw.hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
3564     hdev->vector_status[0] = 0;
3565 
3566     hdev->num_msi_left -= 1;
3567     hdev->num_msi_used += 1;
3568 }
3569 
3570 static int hclge_misc_irq_init(struct hclge_dev *hdev)
3571 {
3572     int ret;
3573 
3574     hclge_get_misc_vector(hdev);
3575 
3576     /* this would be explicitly freed in the end */
3577     snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
3578          HCLGE_NAME, pci_name(hdev->pdev));
3579     ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
3580               0, hdev->misc_vector.name, hdev);
3581     if (ret) {
3582         hclge_free_vector(hdev, 0);
3583         dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
3584             hdev->misc_vector.vector_irq);
3585     }
3586 
3587     return ret;
3588 }
3589 
3590 static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
3591 {
3592     free_irq(hdev->misc_vector.vector_irq, hdev);
3593     hclge_free_vector(hdev, 0);
3594 }
3595 
3596 int hclge_notify_client(struct hclge_dev *hdev,
3597             enum hnae3_reset_notify_type type)
3598 {
3599     struct hnae3_handle *handle = &hdev->vport[0].nic;
3600     struct hnae3_client *client = hdev->nic_client;
3601     int ret;
3602 
3603     if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
3604         return 0;
3605 
3606     if (!client->ops->reset_notify)
3607         return -EOPNOTSUPP;
3608 
3609     ret = client->ops->reset_notify(handle, type);
3610     if (ret)
3611         dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
3612             type, ret);
3613 
3614     return ret;
3615 }
3616 
3617 static int hclge_notify_roce_client(struct hclge_dev *hdev,
3618                     enum hnae3_reset_notify_type type)
3619 {
3620     struct hnae3_handle *handle = &hdev->vport[0].roce;
3621     struct hnae3_client *client = hdev->roce_client;
3622     int ret;
3623 
3624     if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
3625         return 0;
3626 
3627     if (!client->ops->reset_notify)
3628         return -EOPNOTSUPP;
3629 
3630     ret = client->ops->reset_notify(handle, type);
3631     if (ret)
3632         dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
3633             type, ret);
3634 
3635     return ret;
3636 }
3637 
3638 static int hclge_reset_wait(struct hclge_dev *hdev)
3639 {
3640 #define HCLGE_RESET_WATI_MS 100
3641 #define HCLGE_RESET_WAIT_CNT    350
3642 
3643     u32 val, reg, reg_bit;
3644     u32 cnt = 0;
3645 
3646     switch (hdev->reset_type) {
3647     case HNAE3_IMP_RESET:
3648         reg = HCLGE_GLOBAL_RESET_REG;
3649         reg_bit = HCLGE_IMP_RESET_BIT;
3650         break;
3651     case HNAE3_GLOBAL_RESET:
3652         reg = HCLGE_GLOBAL_RESET_REG;
3653         reg_bit = HCLGE_GLOBAL_RESET_BIT;
3654         break;
3655     case HNAE3_FUNC_RESET:
3656         reg = HCLGE_FUN_RST_ING;
3657         reg_bit = HCLGE_FUN_RST_ING_B;
3658         break;
3659     default:
3660         dev_err(&hdev->pdev->dev,
3661             "Wait for unsupported reset type: %d\n",
3662             hdev->reset_type);
3663         return -EINVAL;
3664     }
3665 
3666     val = hclge_read_dev(&hdev->hw, reg);
3667     while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
3668         msleep(HCLGE_RESET_WATI_MS);
3669         val = hclge_read_dev(&hdev->hw, reg);
3670         cnt++;
3671     }
3672 
3673     if (cnt >= HCLGE_RESET_WAIT_CNT) {
3674         dev_warn(&hdev->pdev->dev,
3675              "Wait for reset timeout: %d\n", hdev->reset_type);
3676         return -EBUSY;
3677     }
3678 
3679     return 0;
3680 }
3681 
3682 static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
3683 {
3684     struct hclge_vf_rst_cmd *req;
3685     struct hclge_desc desc;
3686 
3687     req = (struct hclge_vf_rst_cmd *)desc.data;
3688     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
3689     req->dest_vfid = func_id;
3690 
3691     if (reset)
3692         req->vf_rst = 0x1;
3693 
3694     return hclge_cmd_send(&hdev->hw, &desc, 1);
3695 }
3696 
3697 static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
3698 {
3699     int i;
3700 
3701     for (i = HCLGE_VF_VPORT_START_NUM; i < hdev->num_alloc_vport; i++) {
3702         struct hclge_vport *vport = &hdev->vport[i];
3703         int ret;
3704 
3705         /* Send cmd to set/clear VF's FUNC_RST_ING */
3706         ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
3707         if (ret) {
3708             dev_err(&hdev->pdev->dev,
3709                 "set vf(%u) rst failed %d!\n",
3710                 vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3711                 ret);
3712             return ret;
3713         }
3714 
3715         if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
3716             continue;
3717 
3718         /* Inform VF to process the reset.
3719          * hclge_inform_reset_assert_to_vf may fail if VF
3720          * driver is not loaded.
3721          */
3722         ret = hclge_inform_reset_assert_to_vf(vport);
3723         if (ret)
3724             dev_warn(&hdev->pdev->dev,
3725                  "inform reset to vf(%u) failed %d!\n",
3726                  vport->vport_id - HCLGE_VF_VPORT_START_NUM,
3727                  ret);
3728     }
3729 
3730     return 0;
3731 }
3732 
3733 static void hclge_mailbox_service_task(struct hclge_dev *hdev)
3734 {
3735     if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
3736         test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state) ||
3737         test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
3738         return;
3739 
3740     if (time_is_before_jiffies(hdev->last_mbx_scheduled +
3741                    HCLGE_MBX_SCHED_TIMEOUT))
3742         dev_warn(&hdev->pdev->dev,
3743              "mbx service task is scheduled after %ums on cpu%u!\n",
3744              jiffies_to_msecs(jiffies - hdev->last_mbx_scheduled),
3745              smp_processor_id());
3746 
3747     hclge_mbx_handler(hdev);
3748 
3749     clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
3750 }
3751 
3752 static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
3753 {
3754     struct hclge_pf_rst_sync_cmd *req;
3755     struct hclge_desc desc;
3756     int cnt = 0;
3757     int ret;
3758 
3759     req = (struct hclge_pf_rst_sync_cmd *)desc.data;
3760     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
3761 
3762     do {
3763         /* vf need to down netdev by mbx during PF or FLR reset */
3764         hclge_mailbox_service_task(hdev);
3765 
3766         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3767         /* for compatible with old firmware, wait
3768          * 100 ms for VF to stop IO
3769          */
3770         if (ret == -EOPNOTSUPP) {
3771             msleep(HCLGE_RESET_SYNC_TIME);
3772             return;
3773         } else if (ret) {
3774             dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
3775                  ret);
3776             return;
3777         } else if (req->all_vf_ready) {
3778             return;
3779         }
3780         msleep(HCLGE_PF_RESET_SYNC_TIME);
3781         hclge_comm_cmd_reuse_desc(&desc, true);
3782     } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
3783 
3784     dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
3785 }
3786 
3787 void hclge_report_hw_error(struct hclge_dev *hdev,
3788                enum hnae3_hw_error_type type)
3789 {
3790     struct hnae3_client *client = hdev->nic_client;
3791 
3792     if (!client || !client->ops->process_hw_error ||
3793         !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
3794         return;
3795 
3796     client->ops->process_hw_error(&hdev->vport[0].nic, type);
3797 }
3798 
3799 static void hclge_handle_imp_error(struct hclge_dev *hdev)
3800 {
3801     u32 reg_val;
3802 
3803     reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3804     if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
3805         hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
3806         reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
3807         hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3808     }
3809 
3810     if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
3811         hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
3812         reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
3813         hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
3814     }
3815 }
3816 
3817 int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
3818 {
3819     struct hclge_desc desc;
3820     struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
3821     int ret;
3822 
3823     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
3824     hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
3825     req->fun_reset_vfid = func_id;
3826 
3827     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3828     if (ret)
3829         dev_err(&hdev->pdev->dev,
3830             "send function reset cmd fail, status =%d\n", ret);
3831 
3832     return ret;
3833 }
3834 
3835 static void hclge_do_reset(struct hclge_dev *hdev)
3836 {
3837     struct hnae3_handle *handle = &hdev->vport[0].nic;
3838     struct pci_dev *pdev = hdev->pdev;
3839     u32 val;
3840 
3841     if (hclge_get_hw_reset_stat(handle)) {
3842         dev_info(&pdev->dev, "hardware reset not finish\n");
3843         dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
3844              hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
3845              hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
3846         return;
3847     }
3848 
3849     switch (hdev->reset_type) {
3850     case HNAE3_IMP_RESET:
3851         dev_info(&pdev->dev, "IMP reset requested\n");
3852         val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3853         hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1);
3854         hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val);
3855         break;
3856     case HNAE3_GLOBAL_RESET:
3857         dev_info(&pdev->dev, "global reset requested\n");
3858         val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
3859         hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
3860         hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
3861         break;
3862     case HNAE3_FUNC_RESET:
3863         dev_info(&pdev->dev, "PF reset requested\n");
3864         /* schedule again to check later */
3865         set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
3866         hclge_reset_task_schedule(hdev);
3867         break;
3868     default:
3869         dev_warn(&pdev->dev,
3870              "unsupported reset type: %d\n", hdev->reset_type);
3871         break;
3872     }
3873 }
3874 
3875 static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
3876                            unsigned long *addr)
3877 {
3878     enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
3879     struct hclge_dev *hdev = ae_dev->priv;
3880 
3881     /* return the highest priority reset level amongst all */
3882     if (test_bit(HNAE3_IMP_RESET, addr)) {
3883         rst_level = HNAE3_IMP_RESET;
3884         clear_bit(HNAE3_IMP_RESET, addr);
3885         clear_bit(HNAE3_GLOBAL_RESET, addr);
3886         clear_bit(HNAE3_FUNC_RESET, addr);
3887     } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
3888         rst_level = HNAE3_GLOBAL_RESET;
3889         clear_bit(HNAE3_GLOBAL_RESET, addr);
3890         clear_bit(HNAE3_FUNC_RESET, addr);
3891     } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
3892         rst_level = HNAE3_FUNC_RESET;
3893         clear_bit(HNAE3_FUNC_RESET, addr);
3894     } else if (test_bit(HNAE3_FLR_RESET, addr)) {
3895         rst_level = HNAE3_FLR_RESET;
3896         clear_bit(HNAE3_FLR_RESET, addr);
3897     }
3898 
3899     if (hdev->reset_type != HNAE3_NONE_RESET &&
3900         rst_level < hdev->reset_type)
3901         return HNAE3_NONE_RESET;
3902 
3903     return rst_level;
3904 }
3905 
3906 static void hclge_clear_reset_cause(struct hclge_dev *hdev)
3907 {
3908     u32 clearval = 0;
3909 
3910     switch (hdev->reset_type) {
3911     case HNAE3_IMP_RESET:
3912         clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
3913         break;
3914     case HNAE3_GLOBAL_RESET:
3915         clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
3916         break;
3917     default:
3918         break;
3919     }
3920 
3921     if (!clearval)
3922         return;
3923 
3924     /* For revision 0x20, the reset interrupt source
3925      * can only be cleared after hardware reset done
3926      */
3927     if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
3928         hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
3929                 clearval);
3930 
3931     hclge_enable_vector(&hdev->misc_vector, true);
3932 }
3933 
3934 static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
3935 {
3936     u32 reg_val;
3937 
3938     reg_val = hclge_read_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG);
3939     if (enable)
3940         reg_val |= HCLGE_COMM_NIC_SW_RST_RDY;
3941     else
3942         reg_val &= ~HCLGE_COMM_NIC_SW_RST_RDY;
3943 
3944     hclge_write_dev(&hdev->hw, HCLGE_COMM_NIC_CSQ_DEPTH_REG, reg_val);
3945 }
3946 
3947 static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
3948 {
3949     int ret;
3950 
3951     ret = hclge_set_all_vf_rst(hdev, true);
3952     if (ret)
3953         return ret;
3954 
3955     hclge_func_reset_sync_vf(hdev);
3956 
3957     return 0;
3958 }
3959 
3960 static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
3961 {
3962     u32 reg_val;
3963     int ret = 0;
3964 
3965     switch (hdev->reset_type) {
3966     case HNAE3_FUNC_RESET:
3967         ret = hclge_func_reset_notify_vf(hdev);
3968         if (ret)
3969             return ret;
3970 
3971         ret = hclge_func_reset_cmd(hdev, 0);
3972         if (ret) {
3973             dev_err(&hdev->pdev->dev,
3974                 "asserting function reset fail %d!\n", ret);
3975             return ret;
3976         }
3977 
3978         /* After performaning pf reset, it is not necessary to do the
3979          * mailbox handling or send any command to firmware, because
3980          * any mailbox handling or command to firmware is only valid
3981          * after hclge_comm_cmd_init is called.
3982          */
3983         set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
3984         hdev->rst_stats.pf_rst_cnt++;
3985         break;
3986     case HNAE3_FLR_RESET:
3987         ret = hclge_func_reset_notify_vf(hdev);
3988         if (ret)
3989             return ret;
3990         break;
3991     case HNAE3_IMP_RESET:
3992         hclge_handle_imp_error(hdev);
3993         reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
3994         hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
3995                 BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
3996         break;
3997     default:
3998         break;
3999     }
4000 
4001     /* inform hardware that preparatory work is done */
4002     msleep(HCLGE_RESET_SYNC_TIME);
4003     hclge_reset_handshake(hdev, true);
4004     dev_info(&hdev->pdev->dev, "prepare wait ok\n");
4005 
4006     return ret;
4007 }
4008 
4009 static void hclge_show_rst_info(struct hclge_dev *hdev)
4010 {
4011     char *buf;
4012 
4013     buf = kzalloc(HCLGE_DBG_RESET_INFO_LEN, GFP_KERNEL);
4014     if (!buf)
4015         return;
4016 
4017     hclge_dbg_dump_rst_info(hdev, buf, HCLGE_DBG_RESET_INFO_LEN);
4018 
4019     dev_info(&hdev->pdev->dev, "dump reset info:\n%s", buf);
4020 
4021     kfree(buf);
4022 }
4023 
4024 static bool hclge_reset_err_handle(struct hclge_dev *hdev)
4025 {
4026 #define MAX_RESET_FAIL_CNT 5
4027 
4028     if (hdev->reset_pending) {
4029         dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
4030              hdev->reset_pending);
4031         return true;
4032     } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
4033            HCLGE_RESET_INT_M) {
4034         dev_info(&hdev->pdev->dev,
4035              "reset failed because new reset interrupt\n");
4036         hclge_clear_reset_cause(hdev);
4037         return false;
4038     } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
4039         hdev->rst_stats.reset_fail_cnt++;
4040         set_bit(hdev->reset_type, &hdev->reset_pending);
4041         dev_info(&hdev->pdev->dev,
4042              "re-schedule reset task(%u)\n",
4043              hdev->rst_stats.reset_fail_cnt);
4044         return true;
4045     }
4046 
4047     hclge_clear_reset_cause(hdev);
4048 
4049     /* recover the handshake status when reset fail */
4050     hclge_reset_handshake(hdev, true);
4051 
4052     dev_err(&hdev->pdev->dev, "Reset fail!\n");
4053 
4054     hclge_show_rst_info(hdev);
4055 
4056     set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4057 
4058     return false;
4059 }
4060 
4061 static void hclge_update_reset_level(struct hclge_dev *hdev)
4062 {
4063     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4064     enum hnae3_reset_type reset_level;
4065 
4066     /* reset request will not be set during reset, so clear
4067      * pending reset request to avoid unnecessary reset
4068      * caused by the same reason.
4069      */
4070     hclge_get_reset_level(ae_dev, &hdev->reset_request);
4071 
4072     /* if default_reset_request has a higher level reset request,
4073      * it should be handled as soon as possible. since some errors
4074      * need this kind of reset to fix.
4075      */
4076     reset_level = hclge_get_reset_level(ae_dev,
4077                         &hdev->default_reset_request);
4078     if (reset_level != HNAE3_NONE_RESET)
4079         set_bit(reset_level, &hdev->reset_request);
4080 }
4081 
4082 static int hclge_set_rst_done(struct hclge_dev *hdev)
4083 {
4084     struct hclge_pf_rst_done_cmd *req;
4085     struct hclge_desc desc;
4086     int ret;
4087 
4088     req = (struct hclge_pf_rst_done_cmd *)desc.data;
4089     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
4090     req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
4091 
4092     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4093     /* To be compatible with the old firmware, which does not support
4094      * command HCLGE_OPC_PF_RST_DONE, just print a warning and
4095      * return success
4096      */
4097     if (ret == -EOPNOTSUPP) {
4098         dev_warn(&hdev->pdev->dev,
4099              "current firmware does not support command(0x%x)!\n",
4100              HCLGE_OPC_PF_RST_DONE);
4101         return 0;
4102     } else if (ret) {
4103         dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
4104             ret);
4105     }
4106 
4107     return ret;
4108 }
4109 
4110 static int hclge_reset_prepare_up(struct hclge_dev *hdev)
4111 {
4112     int ret = 0;
4113 
4114     switch (hdev->reset_type) {
4115     case HNAE3_FUNC_RESET:
4116     case HNAE3_FLR_RESET:
4117         ret = hclge_set_all_vf_rst(hdev, false);
4118         break;
4119     case HNAE3_GLOBAL_RESET:
4120     case HNAE3_IMP_RESET:
4121         ret = hclge_set_rst_done(hdev);
4122         break;
4123     default:
4124         break;
4125     }
4126 
4127     /* clear up the handshake status after re-initialize done */
4128     hclge_reset_handshake(hdev, false);
4129 
4130     return ret;
4131 }
4132 
4133 static int hclge_reset_stack(struct hclge_dev *hdev)
4134 {
4135     int ret;
4136 
4137     ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
4138     if (ret)
4139         return ret;
4140 
4141     ret = hclge_reset_ae_dev(hdev->ae_dev);
4142     if (ret)
4143         return ret;
4144 
4145     return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
4146 }
4147 
4148 static int hclge_reset_prepare(struct hclge_dev *hdev)
4149 {
4150     int ret;
4151 
4152     hdev->rst_stats.reset_cnt++;
4153     /* perform reset of the stack & ae device for a client */
4154     ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
4155     if (ret)
4156         return ret;
4157 
4158     rtnl_lock();
4159     ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
4160     rtnl_unlock();
4161     if (ret)
4162         return ret;
4163 
4164     return hclge_reset_prepare_wait(hdev);
4165 }
4166 
4167 static int hclge_reset_rebuild(struct hclge_dev *hdev)
4168 {
4169     int ret;
4170 
4171     hdev->rst_stats.hw_reset_done_cnt++;
4172 
4173     ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
4174     if (ret)
4175         return ret;
4176 
4177     rtnl_lock();
4178     ret = hclge_reset_stack(hdev);
4179     rtnl_unlock();
4180     if (ret)
4181         return ret;
4182 
4183     hclge_clear_reset_cause(hdev);
4184 
4185     ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
4186     /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
4187      * times
4188      */
4189     if (ret &&
4190         hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
4191         return ret;
4192 
4193     ret = hclge_reset_prepare_up(hdev);
4194     if (ret)
4195         return ret;
4196 
4197     rtnl_lock();
4198     ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
4199     rtnl_unlock();
4200     if (ret)
4201         return ret;
4202 
4203     ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
4204     if (ret)
4205         return ret;
4206 
4207     hdev->last_reset_time = jiffies;
4208     hdev->rst_stats.reset_fail_cnt = 0;
4209     hdev->rst_stats.reset_done_cnt++;
4210     clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
4211 
4212     hclge_update_reset_level(hdev);
4213 
4214     return 0;
4215 }
4216 
4217 static void hclge_reset(struct hclge_dev *hdev)
4218 {
4219     if (hclge_reset_prepare(hdev))
4220         goto err_reset;
4221 
4222     if (hclge_reset_wait(hdev))
4223         goto err_reset;
4224 
4225     if (hclge_reset_rebuild(hdev))
4226         goto err_reset;
4227 
4228     return;
4229 
4230 err_reset:
4231     if (hclge_reset_err_handle(hdev))
4232         hclge_reset_task_schedule(hdev);
4233 }
4234 
4235 static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
4236 {
4237     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
4238     struct hclge_dev *hdev = ae_dev->priv;
4239 
4240     /* We might end up getting called broadly because of 2 below cases:
4241      * 1. Recoverable error was conveyed through APEI and only way to bring
4242      *    normalcy is to reset.
4243      * 2. A new reset request from the stack due to timeout
4244      *
4245      * check if this is a new reset request and we are not here just because
4246      * last reset attempt did not succeed and watchdog hit us again. We will
4247      * know this if last reset request did not occur very recently (watchdog
4248      * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
4249      * In case of new request we reset the "reset level" to PF reset.
4250      * And if it is a repeat reset request of the most recent one then we
4251      * want to make sure we throttle the reset request. Therefore, we will
4252      * not allow it again before 3*HZ times.
4253      */
4254 
4255     if (time_before(jiffies, (hdev->last_reset_time +
4256                   HCLGE_RESET_INTERVAL))) {
4257         mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
4258         return;
4259     }
4260 
4261     if (hdev->default_reset_request) {
4262         hdev->reset_level =
4263             hclge_get_reset_level(ae_dev,
4264                           &hdev->default_reset_request);
4265     } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
4266         hdev->reset_level = HNAE3_FUNC_RESET;
4267     }
4268 
4269     dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
4270          hdev->reset_level);
4271 
4272     /* request reset & schedule reset task */
4273     set_bit(hdev->reset_level, &hdev->reset_request);
4274     hclge_reset_task_schedule(hdev);
4275 
4276     if (hdev->reset_level < HNAE3_GLOBAL_RESET)
4277         hdev->reset_level++;
4278 }
4279 
4280 static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
4281                     enum hnae3_reset_type rst_type)
4282 {
4283     struct hclge_dev *hdev = ae_dev->priv;
4284 
4285     set_bit(rst_type, &hdev->default_reset_request);
4286 }
4287 
4288 static void hclge_reset_timer(struct timer_list *t)
4289 {
4290     struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
4291 
4292     /* if default_reset_request has no value, it means that this reset
4293      * request has already be handled, so just return here
4294      */
4295     if (!hdev->default_reset_request)
4296         return;
4297 
4298     dev_info(&hdev->pdev->dev,
4299          "triggering reset in reset timer\n");
4300     hclge_reset_event(hdev->pdev, NULL);
4301 }
4302 
4303 static void hclge_reset_subtask(struct hclge_dev *hdev)
4304 {
4305     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4306 
4307     /* check if there is any ongoing reset in the hardware. This status can
4308      * be checked from reset_pending. If there is then, we need to wait for
4309      * hardware to complete reset.
4310      *    a. If we are able to figure out in reasonable time that hardware
4311      *       has fully resetted then, we can proceed with driver, client
4312      *       reset.
4313      *    b. else, we can come back later to check this status so re-sched
4314      *       now.
4315      */
4316     hdev->last_reset_time = jiffies;
4317     hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
4318     if (hdev->reset_type != HNAE3_NONE_RESET)
4319         hclge_reset(hdev);
4320 
4321     /* check if we got any *new* reset requests to be honored */
4322     hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
4323     if (hdev->reset_type != HNAE3_NONE_RESET)
4324         hclge_do_reset(hdev);
4325 
4326     hdev->reset_type = HNAE3_NONE_RESET;
4327 }
4328 
4329 static void hclge_handle_err_reset_request(struct hclge_dev *hdev)
4330 {
4331     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4332     enum hnae3_reset_type reset_type;
4333 
4334     if (ae_dev->hw_err_reset_req) {
4335         reset_type = hclge_get_reset_level(ae_dev,
4336                            &ae_dev->hw_err_reset_req);
4337         hclge_set_def_reset_request(ae_dev, reset_type);
4338     }
4339 
4340     if (hdev->default_reset_request && ae_dev->ops->reset_event)
4341         ae_dev->ops->reset_event(hdev->pdev, NULL);
4342 
4343     /* enable interrupt after error handling complete */
4344     hclge_enable_vector(&hdev->misc_vector, true);
4345 }
4346 
4347 static void hclge_handle_err_recovery(struct hclge_dev *hdev)
4348 {
4349     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4350 
4351     ae_dev->hw_err_reset_req = 0;
4352 
4353     if (hclge_find_error_source(hdev)) {
4354         hclge_handle_error_info_log(ae_dev);
4355         hclge_handle_mac_tnl(hdev);
4356     }
4357 
4358     hclge_handle_err_reset_request(hdev);
4359 }
4360 
4361 static void hclge_misc_err_recovery(struct hclge_dev *hdev)
4362 {
4363     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
4364     struct device *dev = &hdev->pdev->dev;
4365     u32 msix_sts_reg;
4366 
4367     msix_sts_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
4368     if (msix_sts_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
4369         if (hclge_handle_hw_msix_error
4370                 (hdev, &hdev->default_reset_request))
4371             dev_info(dev, "received msix interrupt 0x%x\n",
4372                  msix_sts_reg);
4373     }
4374 
4375     hclge_handle_hw_ras_error(ae_dev);
4376 
4377     hclge_handle_err_reset_request(hdev);
4378 }
4379 
4380 static void hclge_errhand_service_task(struct hclge_dev *hdev)
4381 {
4382     if (!test_and_clear_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
4383         return;
4384 
4385     if (hnae3_dev_ras_imp_supported(hdev))
4386         hclge_handle_err_recovery(hdev);
4387     else
4388         hclge_misc_err_recovery(hdev);
4389 }
4390 
4391 static void hclge_reset_service_task(struct hclge_dev *hdev)
4392 {
4393     if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
4394         return;
4395 
4396     if (time_is_before_jiffies(hdev->last_rst_scheduled +
4397                    HCLGE_RESET_SCHED_TIMEOUT))
4398         dev_warn(&hdev->pdev->dev,
4399              "reset service task is scheduled after %ums on cpu%u!\n",
4400              jiffies_to_msecs(jiffies - hdev->last_rst_scheduled),
4401              smp_processor_id());
4402 
4403     down(&hdev->reset_sem);
4404     set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4405 
4406     hclge_reset_subtask(hdev);
4407 
4408     clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
4409     up(&hdev->reset_sem);
4410 }
4411 
4412 static void hclge_update_vport_alive(struct hclge_dev *hdev)
4413 {
4414     int i;
4415 
4416     /* start from vport 1 for PF is always alive */
4417     for (i = 1; i < hdev->num_alloc_vport; i++) {
4418         struct hclge_vport *vport = &hdev->vport[i];
4419 
4420         if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
4421             clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
4422 
4423         /* If vf is not alive, set to default value */
4424         if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
4425             vport->mps = HCLGE_MAC_DEFAULT_FRAME;
4426     }
4427 }
4428 
4429 static void hclge_periodic_service_task(struct hclge_dev *hdev)
4430 {
4431     unsigned long delta = round_jiffies_relative(HZ);
4432 
4433     if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
4434         return;
4435 
4436     /* Always handle the link updating to make sure link state is
4437      * updated when it is triggered by mbx.
4438      */
4439     hclge_update_link_status(hdev);
4440     hclge_sync_mac_table(hdev);
4441     hclge_sync_promisc_mode(hdev);
4442     hclge_sync_fd_table(hdev);
4443 
4444     if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
4445         delta = jiffies - hdev->last_serv_processed;
4446 
4447         if (delta < round_jiffies_relative(HZ)) {
4448             delta = round_jiffies_relative(HZ) - delta;
4449             goto out;
4450         }
4451     }
4452 
4453     hdev->serv_processed_cnt++;
4454     hclge_update_vport_alive(hdev);
4455 
4456     if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
4457         hdev->last_serv_processed = jiffies;
4458         goto out;
4459     }
4460 
4461     if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
4462         hclge_update_stats_for_all(hdev);
4463 
4464     hclge_update_port_info(hdev);
4465     hclge_sync_vlan_filter(hdev);
4466 
4467     if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
4468         hclge_rfs_filter_expire(hdev);
4469 
4470     hdev->last_serv_processed = jiffies;
4471 
4472 out:
4473     hclge_task_schedule(hdev, delta);
4474 }
4475 
4476 static void hclge_ptp_service_task(struct hclge_dev *hdev)
4477 {
4478     unsigned long flags;
4479 
4480     if (!test_bit(HCLGE_STATE_PTP_EN, &hdev->state) ||
4481         !test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state) ||
4482         !time_is_before_jiffies(hdev->ptp->tx_start + HZ))
4483         return;
4484 
4485     /* to prevent concurrence with the irq handler */
4486     spin_lock_irqsave(&hdev->ptp->lock, flags);
4487 
4488     /* check HCLGE_STATE_PTP_TX_HANDLING here again, since the irq
4489      * handler may handle it just before spin_lock_irqsave().
4490      */
4491     if (test_bit(HCLGE_STATE_PTP_TX_HANDLING, &hdev->state))
4492         hclge_ptp_clean_tx_hwts(hdev);
4493 
4494     spin_unlock_irqrestore(&hdev->ptp->lock, flags);
4495 }
4496 
4497 static void hclge_service_task(struct work_struct *work)
4498 {
4499     struct hclge_dev *hdev =
4500         container_of(work, struct hclge_dev, service_task.work);
4501 
4502     hclge_errhand_service_task(hdev);
4503     hclge_reset_service_task(hdev);
4504     hclge_ptp_service_task(hdev);
4505     hclge_mailbox_service_task(hdev);
4506     hclge_periodic_service_task(hdev);
4507 
4508     /* Handle error recovery, reset and mbx again in case periodical task
4509      * delays the handling by calling hclge_task_schedule() in
4510      * hclge_periodic_service_task().
4511      */
4512     hclge_errhand_service_task(hdev);
4513     hclge_reset_service_task(hdev);
4514     hclge_mailbox_service_task(hdev);
4515 }
4516 
4517 struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
4518 {
4519     /* VF handle has no client */
4520     if (!handle->client)
4521         return container_of(handle, struct hclge_vport, nic);
4522     else if (handle->client->type == HNAE3_CLIENT_ROCE)
4523         return container_of(handle, struct hclge_vport, roce);
4524     else
4525         return container_of(handle, struct hclge_vport, nic);
4526 }
4527 
4528 static void hclge_get_vector_info(struct hclge_dev *hdev, u16 idx,
4529                   struct hnae3_vector_info *vector_info)
4530 {
4531 #define HCLGE_PF_MAX_VECTOR_NUM_DEV_V2  64
4532 
4533     vector_info->vector = pci_irq_vector(hdev->pdev, idx);
4534 
4535     /* need an extend offset to config vector >= 64 */
4536     if (idx - 1 < HCLGE_PF_MAX_VECTOR_NUM_DEV_V2)
4537         vector_info->io_addr = hdev->hw.hw.io_base +
4538                 HCLGE_VECTOR_REG_BASE +
4539                 (idx - 1) * HCLGE_VECTOR_REG_OFFSET;
4540     else
4541         vector_info->io_addr = hdev->hw.hw.io_base +
4542                 HCLGE_VECTOR_EXT_REG_BASE +
4543                 (idx - 1) / HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4544                 HCLGE_VECTOR_REG_OFFSET_H +
4545                 (idx - 1) % HCLGE_PF_MAX_VECTOR_NUM_DEV_V2 *
4546                 HCLGE_VECTOR_REG_OFFSET;
4547 
4548     hdev->vector_status[idx] = hdev->vport[0].vport_id;
4549     hdev->vector_irq[idx] = vector_info->vector;
4550 }
4551 
4552 static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
4553                 struct hnae3_vector_info *vector_info)
4554 {
4555     struct hclge_vport *vport = hclge_get_vport(handle);
4556     struct hnae3_vector_info *vector = vector_info;
4557     struct hclge_dev *hdev = vport->back;
4558     int alloc = 0;
4559     u16 i = 0;
4560     u16 j;
4561 
4562     vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
4563     vector_num = min(hdev->num_msi_left, vector_num);
4564 
4565     for (j = 0; j < vector_num; j++) {
4566         while (++i < hdev->num_nic_msi) {
4567             if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
4568                 hclge_get_vector_info(hdev, i, vector);
4569                 vector++;
4570                 alloc++;
4571 
4572                 break;
4573             }
4574         }
4575     }
4576     hdev->num_msi_left -= alloc;
4577     hdev->num_msi_used += alloc;
4578 
4579     return alloc;
4580 }
4581 
4582 static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
4583 {
4584     int i;
4585 
4586     for (i = 0; i < hdev->num_msi; i++)
4587         if (vector == hdev->vector_irq[i])
4588             return i;
4589 
4590     return -EINVAL;
4591 }
4592 
4593 static int hclge_put_vector(struct hnae3_handle *handle, int vector)
4594 {
4595     struct hclge_vport *vport = hclge_get_vport(handle);
4596     struct hclge_dev *hdev = vport->back;
4597     int vector_id;
4598 
4599     vector_id = hclge_get_vector_index(hdev, vector);
4600     if (vector_id < 0) {
4601         dev_err(&hdev->pdev->dev,
4602             "Get vector index fail. vector = %d\n", vector);
4603         return vector_id;
4604     }
4605 
4606     hclge_free_vector(hdev, vector_id);
4607 
4608     return 0;
4609 }
4610 
4611 static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
4612              u8 *key, u8 *hfunc)
4613 {
4614     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4615     struct hclge_vport *vport = hclge_get_vport(handle);
4616     struct hclge_comm_rss_cfg *rss_cfg = &vport->back->rss_cfg;
4617 
4618     hclge_comm_get_rss_hash_info(rss_cfg, key, hfunc);
4619 
4620     hclge_comm_get_rss_indir_tbl(rss_cfg, indir,
4621                      ae_dev->dev_specs.rss_ind_tbl_size);
4622 
4623     return 0;
4624 }
4625 
4626 static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
4627              const  u8 *key, const  u8 hfunc)
4628 {
4629     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
4630     struct hclge_vport *vport = hclge_get_vport(handle);
4631     struct hclge_dev *hdev = vport->back;
4632     struct hclge_comm_rss_cfg *rss_cfg = &hdev->rss_cfg;
4633     int ret, i;
4634 
4635     ret = hclge_comm_set_rss_hash_key(rss_cfg, &hdev->hw.hw, key, hfunc);
4636     if (ret) {
4637         dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
4638         return ret;
4639     }
4640 
4641     /* Update the shadow RSS table with user specified qids */
4642     for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
4643         rss_cfg->rss_indirection_tbl[i] = indir[i];
4644 
4645     /* Update the hardware */
4646     return hclge_comm_set_rss_indir_table(ae_dev, &hdev->hw.hw,
4647                           rss_cfg->rss_indirection_tbl);
4648 }
4649 
4650 static int hclge_set_rss_tuple(struct hnae3_handle *handle,
4651                    struct ethtool_rxnfc *nfc)
4652 {
4653     struct hclge_vport *vport = hclge_get_vport(handle);
4654     struct hclge_dev *hdev = vport->back;
4655     int ret;
4656 
4657     ret = hclge_comm_set_rss_tuple(hdev->ae_dev, &hdev->hw.hw,
4658                        &hdev->rss_cfg, nfc);
4659     if (ret) {
4660         dev_err(&hdev->pdev->dev,
4661             "failed to set rss tuple, ret = %d.\n", ret);
4662         return ret;
4663     }
4664 
4665     hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets);
4666     return 0;
4667 }
4668 
4669 static int hclge_get_rss_tuple(struct hnae3_handle *handle,
4670                    struct ethtool_rxnfc *nfc)
4671 {
4672     struct hclge_vport *vport = hclge_get_vport(handle);
4673     u8 tuple_sets;
4674     int ret;
4675 
4676     nfc->data = 0;
4677 
4678     ret = hclge_comm_get_rss_tuple(&vport->back->rss_cfg, nfc->flow_type,
4679                        &tuple_sets);
4680     if (ret || !tuple_sets)
4681         return ret;
4682 
4683     nfc->data = hclge_comm_convert_rss_tuple(tuple_sets);
4684 
4685     return 0;
4686 }
4687 
4688 static int hclge_get_tc_size(struct hnae3_handle *handle)
4689 {
4690     struct hclge_vport *vport = hclge_get_vport(handle);
4691     struct hclge_dev *hdev = vport->back;
4692 
4693     return hdev->pf_rss_size_max;
4694 }
4695 
4696 static int hclge_init_rss_tc_mode(struct hclge_dev *hdev)
4697 {
4698     struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
4699     struct hclge_vport *vport = hdev->vport;
4700     u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
4701     u16 tc_valid[HCLGE_MAX_TC_NUM] = {0};
4702     u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
4703     struct hnae3_tc_info *tc_info;
4704     u16 roundup_size;
4705     u16 rss_size;
4706     int i;
4707 
4708     tc_info = &vport->nic.kinfo.tc_info;
4709     for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
4710         rss_size = tc_info->tqp_count[i];
4711         tc_valid[i] = 0;
4712 
4713         if (!(hdev->hw_tc_map & BIT(i)))
4714             continue;
4715 
4716         /* tc_size set to hardware is the log2 of roundup power of two
4717          * of rss_size, the acutal queue size is limited by indirection
4718          * table.
4719          */
4720         if (rss_size > ae_dev->dev_specs.rss_ind_tbl_size ||
4721             rss_size == 0) {
4722             dev_err(&hdev->pdev->dev,
4723                 "Configure rss tc size failed, invalid TC_SIZE = %u\n",
4724                 rss_size);
4725             return -EINVAL;
4726         }
4727 
4728         roundup_size = roundup_pow_of_two(rss_size);
4729         roundup_size = ilog2(roundup_size);
4730 
4731         tc_valid[i] = 1;
4732         tc_size[i] = roundup_size;
4733         tc_offset[i] = tc_info->tqp_offset[i];
4734     }
4735 
4736     return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
4737                       tc_size);
4738 }
4739 
4740 int hclge_rss_init_hw(struct hclge_dev *hdev)
4741 {
4742     u16 *rss_indir = hdev->rss_cfg.rss_indirection_tbl;
4743     u8 *key = hdev->rss_cfg.rss_hash_key;
4744     u8 hfunc = hdev->rss_cfg.rss_algo;
4745     int ret;
4746 
4747     ret = hclge_comm_set_rss_indir_table(hdev->ae_dev, &hdev->hw.hw,
4748                          rss_indir);
4749     if (ret)
4750         return ret;
4751 
4752     ret = hclge_comm_set_rss_algo_key(&hdev->hw.hw, hfunc, key);
4753     if (ret)
4754         return ret;
4755 
4756     ret = hclge_comm_set_rss_input_tuple(&hdev->vport[0].nic,
4757                          &hdev->hw.hw, true,
4758                          &hdev->rss_cfg);
4759     if (ret)
4760         return ret;
4761 
4762     return hclge_init_rss_tc_mode(hdev);
4763 }
4764 
4765 int hclge_bind_ring_with_vector(struct hclge_vport *vport,
4766                 int vector_id, bool en,
4767                 struct hnae3_ring_chain_node *ring_chain)
4768 {
4769     struct hclge_dev *hdev = vport->back;
4770     struct hnae3_ring_chain_node *node;
4771     struct hclge_desc desc;
4772     struct hclge_ctrl_vector_chain_cmd *req =
4773         (struct hclge_ctrl_vector_chain_cmd *)desc.data;
4774     enum hclge_comm_cmd_status status;
4775     enum hclge_opcode_type op;
4776     u16 tqp_type_and_id;
4777     int i;
4778 
4779     op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
4780     hclge_cmd_setup_basic_desc(&desc, op, false);
4781     req->int_vector_id_l = hnae3_get_field(vector_id,
4782                            HCLGE_VECTOR_ID_L_M,
4783                            HCLGE_VECTOR_ID_L_S);
4784     req->int_vector_id_h = hnae3_get_field(vector_id,
4785                            HCLGE_VECTOR_ID_H_M,
4786                            HCLGE_VECTOR_ID_H_S);
4787 
4788     i = 0;
4789     for (node = ring_chain; node; node = node->next) {
4790         tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
4791         hnae3_set_field(tqp_type_and_id,  HCLGE_INT_TYPE_M,
4792                 HCLGE_INT_TYPE_S,
4793                 hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
4794         hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
4795                 HCLGE_TQP_ID_S, node->tqp_index);
4796         hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
4797                 HCLGE_INT_GL_IDX_S,
4798                 hnae3_get_field(node->int_gl_idx,
4799                         HNAE3_RING_GL_IDX_M,
4800                         HNAE3_RING_GL_IDX_S));
4801         req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
4802         if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
4803             req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
4804             req->vfid = vport->vport_id;
4805 
4806             status = hclge_cmd_send(&hdev->hw, &desc, 1);
4807             if (status) {
4808                 dev_err(&hdev->pdev->dev,
4809                     "Map TQP fail, status is %d.\n",
4810                     status);
4811                 return -EIO;
4812             }
4813             i = 0;
4814 
4815             hclge_cmd_setup_basic_desc(&desc,
4816                            op,
4817                            false);
4818             req->int_vector_id_l =
4819                 hnae3_get_field(vector_id,
4820                         HCLGE_VECTOR_ID_L_M,
4821                         HCLGE_VECTOR_ID_L_S);
4822             req->int_vector_id_h =
4823                 hnae3_get_field(vector_id,
4824                         HCLGE_VECTOR_ID_H_M,
4825                         HCLGE_VECTOR_ID_H_S);
4826         }
4827     }
4828 
4829     if (i > 0) {
4830         req->int_cause_num = i;
4831         req->vfid = vport->vport_id;
4832         status = hclge_cmd_send(&hdev->hw, &desc, 1);
4833         if (status) {
4834             dev_err(&hdev->pdev->dev,
4835                 "Map TQP fail, status is %d.\n", status);
4836             return -EIO;
4837         }
4838     }
4839 
4840     return 0;
4841 }
4842 
4843 static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
4844                     struct hnae3_ring_chain_node *ring_chain)
4845 {
4846     struct hclge_vport *vport = hclge_get_vport(handle);
4847     struct hclge_dev *hdev = vport->back;
4848     int vector_id;
4849 
4850     vector_id = hclge_get_vector_index(hdev, vector);
4851     if (vector_id < 0) {
4852         dev_err(&hdev->pdev->dev,
4853             "failed to get vector index. vector=%d\n", vector);
4854         return vector_id;
4855     }
4856 
4857     return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
4858 }
4859 
4860 static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
4861                        struct hnae3_ring_chain_node *ring_chain)
4862 {
4863     struct hclge_vport *vport = hclge_get_vport(handle);
4864     struct hclge_dev *hdev = vport->back;
4865     int vector_id, ret;
4866 
4867     if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
4868         return 0;
4869 
4870     vector_id = hclge_get_vector_index(hdev, vector);
4871     if (vector_id < 0) {
4872         dev_err(&handle->pdev->dev,
4873             "Get vector index fail. ret =%d\n", vector_id);
4874         return vector_id;
4875     }
4876 
4877     ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
4878     if (ret)
4879         dev_err(&handle->pdev->dev,
4880             "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
4881             vector_id, ret);
4882 
4883     return ret;
4884 }
4885 
4886 static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev, u8 vf_id,
4887                       bool en_uc, bool en_mc, bool en_bc)
4888 {
4889     struct hclge_vport *vport = &hdev->vport[vf_id];
4890     struct hnae3_handle *handle = &vport->nic;
4891     struct hclge_promisc_cfg_cmd *req;
4892     struct hclge_desc desc;
4893     bool uc_tx_en = en_uc;
4894     u8 promisc_cfg = 0;
4895     int ret;
4896 
4897     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
4898 
4899     req = (struct hclge_promisc_cfg_cmd *)desc.data;
4900     req->vf_id = vf_id;
4901 
4902     if (test_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->priv_flags))
4903         uc_tx_en = false;
4904 
4905     hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_RX_EN, en_uc ? 1 : 0);
4906     hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_RX_EN, en_mc ? 1 : 0);
4907     hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_RX_EN, en_bc ? 1 : 0);
4908     hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_UC_TX_EN, uc_tx_en ? 1 : 0);
4909     hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_MC_TX_EN, en_mc ? 1 : 0);
4910     hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_BC_TX_EN, en_bc ? 1 : 0);
4911     req->extend_promisc = promisc_cfg;
4912 
4913     /* to be compatible with DEVICE_VERSION_V1/2 */
4914     promisc_cfg = 0;
4915     hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_UC, en_uc ? 1 : 0);
4916     hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_MC, en_mc ? 1 : 0);
4917     hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_EN_BC, en_bc ? 1 : 0);
4918     hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_TX_EN, 1);
4919     hnae3_set_bit(promisc_cfg, HCLGE_PROMISC_RX_EN, 1);
4920     req->promisc = promisc_cfg;
4921 
4922     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
4923     if (ret)
4924         dev_err(&hdev->pdev->dev,
4925             "failed to set vport %u promisc mode, ret = %d.\n",
4926             vf_id, ret);
4927 
4928     return ret;
4929 }
4930 
4931 int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
4932                  bool en_mc_pmc, bool en_bc_pmc)
4933 {
4934     return hclge_cmd_set_promisc_mode(vport->back, vport->vport_id,
4935                       en_uc_pmc, en_mc_pmc, en_bc_pmc);
4936 }
4937 
4938 static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
4939                   bool en_mc_pmc)
4940 {
4941     struct hclge_vport *vport = hclge_get_vport(handle);
4942     struct hclge_dev *hdev = vport->back;
4943     bool en_bc_pmc = true;
4944 
4945     /* For device whose version below V2, if broadcast promisc enabled,
4946      * vlan filter is always bypassed. So broadcast promisc should be
4947      * disabled until user enable promisc mode
4948      */
4949     if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
4950         en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
4951 
4952     return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
4953                         en_bc_pmc);
4954 }
4955 
4956 static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
4957 {
4958     struct hclge_vport *vport = hclge_get_vport(handle);
4959 
4960     set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
4961 }
4962 
4963 static void hclge_sync_fd_state(struct hclge_dev *hdev)
4964 {
4965     if (hlist_empty(&hdev->fd_rule_list))
4966         hdev->fd_active_type = HCLGE_FD_RULE_NONE;
4967 }
4968 
4969 static void hclge_fd_inc_rule_cnt(struct hclge_dev *hdev, u16 location)
4970 {
4971     if (!test_bit(location, hdev->fd_bmap)) {
4972         set_bit(location, hdev->fd_bmap);
4973         hdev->hclge_fd_rule_num++;
4974     }
4975 }
4976 
4977 static void hclge_fd_dec_rule_cnt(struct hclge_dev *hdev, u16 location)
4978 {
4979     if (test_bit(location, hdev->fd_bmap)) {
4980         clear_bit(location, hdev->fd_bmap);
4981         hdev->hclge_fd_rule_num--;
4982     }
4983 }
4984 
4985 static void hclge_fd_free_node(struct hclge_dev *hdev,
4986                    struct hclge_fd_rule *rule)
4987 {
4988     hlist_del(&rule->rule_node);
4989     kfree(rule);
4990     hclge_sync_fd_state(hdev);
4991 }
4992 
4993 static void hclge_update_fd_rule_node(struct hclge_dev *hdev,
4994                       struct hclge_fd_rule *old_rule,
4995                       struct hclge_fd_rule *new_rule,
4996                       enum HCLGE_FD_NODE_STATE state)
4997 {
4998     switch (state) {
4999     case HCLGE_FD_TO_ADD:
5000     case HCLGE_FD_ACTIVE:
5001         /* 1) if the new state is TO_ADD, just replace the old rule
5002          * with the same location, no matter its state, because the
5003          * new rule will be configured to the hardware.
5004          * 2) if the new state is ACTIVE, it means the new rule
5005          * has been configured to the hardware, so just replace
5006          * the old rule node with the same location.
5007          * 3) for it doesn't add a new node to the list, so it's
5008          * unnecessary to update the rule number and fd_bmap.
5009          */
5010         new_rule->rule_node.next = old_rule->rule_node.next;
5011         new_rule->rule_node.pprev = old_rule->rule_node.pprev;
5012         memcpy(old_rule, new_rule, sizeof(*old_rule));
5013         kfree(new_rule);
5014         break;
5015     case HCLGE_FD_DELETED:
5016         hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5017         hclge_fd_free_node(hdev, old_rule);
5018         break;
5019     case HCLGE_FD_TO_DEL:
5020         /* if new request is TO_DEL, and old rule is existent
5021          * 1) the state of old rule is TO_DEL, we need do nothing,
5022          * because we delete rule by location, other rule content
5023          * is unncessary.
5024          * 2) the state of old rule is ACTIVE, we need to change its
5025          * state to TO_DEL, so the rule will be deleted when periodic
5026          * task being scheduled.
5027          * 3) the state of old rule is TO_ADD, it means the rule hasn't
5028          * been added to hardware, so we just delete the rule node from
5029          * fd_rule_list directly.
5030          */
5031         if (old_rule->state == HCLGE_FD_TO_ADD) {
5032             hclge_fd_dec_rule_cnt(hdev, old_rule->location);
5033             hclge_fd_free_node(hdev, old_rule);
5034             return;
5035         }
5036         old_rule->state = HCLGE_FD_TO_DEL;
5037         break;
5038     }
5039 }
5040 
5041 static struct hclge_fd_rule *hclge_find_fd_rule(struct hlist_head *hlist,
5042                         u16 location,
5043                         struct hclge_fd_rule **parent)
5044 {
5045     struct hclge_fd_rule *rule;
5046     struct hlist_node *node;
5047 
5048     hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
5049         if (rule->location == location)
5050             return rule;
5051         else if (rule->location > location)
5052             return NULL;
5053         /* record the parent node, use to keep the nodes in fd_rule_list
5054          * in ascend order.
5055          */
5056         *parent = rule;
5057     }
5058 
5059     return NULL;
5060 }
5061 
5062 /* insert fd rule node in ascend order according to rule->location */
5063 static void hclge_fd_insert_rule_node(struct hlist_head *hlist,
5064                       struct hclge_fd_rule *rule,
5065                       struct hclge_fd_rule *parent)
5066 {
5067     INIT_HLIST_NODE(&rule->rule_node);
5068 
5069     if (parent)
5070         hlist_add_behind(&rule->rule_node, &parent->rule_node);
5071     else
5072         hlist_add_head(&rule->rule_node, hlist);
5073 }
5074 
5075 static int hclge_fd_set_user_def_cmd(struct hclge_dev *hdev,
5076                      struct hclge_fd_user_def_cfg *cfg)
5077 {
5078     struct hclge_fd_user_def_cfg_cmd *req;
5079     struct hclge_desc desc;
5080     u16 data = 0;
5081     int ret;
5082 
5083     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_USER_DEF_OP, false);
5084 
5085     req = (struct hclge_fd_user_def_cfg_cmd *)desc.data;
5086 
5087     hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[0].ref_cnt > 0);
5088     hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5089             HCLGE_FD_USER_DEF_OFT_S, cfg[0].offset);
5090     req->ol2_cfg = cpu_to_le16(data);
5091 
5092     data = 0;
5093     hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[1].ref_cnt > 0);
5094     hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5095             HCLGE_FD_USER_DEF_OFT_S, cfg[1].offset);
5096     req->ol3_cfg = cpu_to_le16(data);
5097 
5098     data = 0;
5099     hnae3_set_bit(data, HCLGE_FD_USER_DEF_EN_B, cfg[2].ref_cnt > 0);
5100     hnae3_set_field(data, HCLGE_FD_USER_DEF_OFT_M,
5101             HCLGE_FD_USER_DEF_OFT_S, cfg[2].offset);
5102     req->ol4_cfg = cpu_to_le16(data);
5103 
5104     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5105     if (ret)
5106         dev_err(&hdev->pdev->dev,
5107             "failed to set fd user def data, ret= %d\n", ret);
5108     return ret;
5109 }
5110 
5111 static void hclge_sync_fd_user_def_cfg(struct hclge_dev *hdev, bool locked)
5112 {
5113     int ret;
5114 
5115     if (!test_and_clear_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state))
5116         return;
5117 
5118     if (!locked)
5119         spin_lock_bh(&hdev->fd_rule_lock);
5120 
5121     ret = hclge_fd_set_user_def_cmd(hdev, hdev->fd_cfg.user_def_cfg);
5122     if (ret)
5123         set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5124 
5125     if (!locked)
5126         spin_unlock_bh(&hdev->fd_rule_lock);
5127 }
5128 
5129 static int hclge_fd_check_user_def_refcnt(struct hclge_dev *hdev,
5130                       struct hclge_fd_rule *rule)
5131 {
5132     struct hlist_head *hlist = &hdev->fd_rule_list;
5133     struct hclge_fd_rule *fd_rule, *parent = NULL;
5134     struct hclge_fd_user_def_info *info, *old_info;
5135     struct hclge_fd_user_def_cfg *cfg;
5136 
5137     if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5138         rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5139         return 0;
5140 
5141     /* for valid layer is start from 1, so need minus 1 to get the cfg */
5142     cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5143     info = &rule->ep.user_def;
5144 
5145     if (!cfg->ref_cnt || cfg->offset == info->offset)
5146         return 0;
5147 
5148     if (cfg->ref_cnt > 1)
5149         goto error;
5150 
5151     fd_rule = hclge_find_fd_rule(hlist, rule->location, &parent);
5152     if (fd_rule) {
5153         old_info = &fd_rule->ep.user_def;
5154         if (info->layer == old_info->layer)
5155             return 0;
5156     }
5157 
5158 error:
5159     dev_err(&hdev->pdev->dev,
5160         "No available offset for layer%d fd rule, each layer only support one user def offset.\n",
5161         info->layer + 1);
5162     return -ENOSPC;
5163 }
5164 
5165 static void hclge_fd_inc_user_def_refcnt(struct hclge_dev *hdev,
5166                      struct hclge_fd_rule *rule)
5167 {
5168     struct hclge_fd_user_def_cfg *cfg;
5169 
5170     if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5171         rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5172         return;
5173 
5174     cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5175     if (!cfg->ref_cnt) {
5176         cfg->offset = rule->ep.user_def.offset;
5177         set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5178     }
5179     cfg->ref_cnt++;
5180 }
5181 
5182 static void hclge_fd_dec_user_def_refcnt(struct hclge_dev *hdev,
5183                      struct hclge_fd_rule *rule)
5184 {
5185     struct hclge_fd_user_def_cfg *cfg;
5186 
5187     if (!rule || rule->rule_type != HCLGE_FD_EP_ACTIVE ||
5188         rule->ep.user_def.layer == HCLGE_FD_USER_DEF_NONE)
5189         return;
5190 
5191     cfg = &hdev->fd_cfg.user_def_cfg[rule->ep.user_def.layer - 1];
5192     if (!cfg->ref_cnt)
5193         return;
5194 
5195     cfg->ref_cnt--;
5196     if (!cfg->ref_cnt) {
5197         cfg->offset = 0;
5198         set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
5199     }
5200 }
5201 
5202 static void hclge_update_fd_list(struct hclge_dev *hdev,
5203                  enum HCLGE_FD_NODE_STATE state, u16 location,
5204                  struct hclge_fd_rule *new_rule)
5205 {
5206     struct hlist_head *hlist = &hdev->fd_rule_list;
5207     struct hclge_fd_rule *fd_rule, *parent = NULL;
5208 
5209     fd_rule = hclge_find_fd_rule(hlist, location, &parent);
5210     if (fd_rule) {
5211         hclge_fd_dec_user_def_refcnt(hdev, fd_rule);
5212         if (state == HCLGE_FD_ACTIVE)
5213             hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5214         hclge_sync_fd_user_def_cfg(hdev, true);
5215 
5216         hclge_update_fd_rule_node(hdev, fd_rule, new_rule, state);
5217         return;
5218     }
5219 
5220     /* it's unlikely to fail here, because we have checked the rule
5221      * exist before.
5222      */
5223     if (unlikely(state == HCLGE_FD_TO_DEL || state == HCLGE_FD_DELETED)) {
5224         dev_warn(&hdev->pdev->dev,
5225              "failed to delete fd rule %u, it's inexistent\n",
5226              location);
5227         return;
5228     }
5229 
5230     hclge_fd_inc_user_def_refcnt(hdev, new_rule);
5231     hclge_sync_fd_user_def_cfg(hdev, true);
5232 
5233     hclge_fd_insert_rule_node(hlist, new_rule, parent);
5234     hclge_fd_inc_rule_cnt(hdev, new_rule->location);
5235 
5236     if (state == HCLGE_FD_TO_ADD) {
5237         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
5238         hclge_task_schedule(hdev, 0);
5239     }
5240 }
5241 
5242 static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
5243 {
5244     struct hclge_get_fd_mode_cmd *req;
5245     struct hclge_desc desc;
5246     int ret;
5247 
5248     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
5249 
5250     req = (struct hclge_get_fd_mode_cmd *)desc.data;
5251 
5252     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5253     if (ret) {
5254         dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
5255         return ret;
5256     }
5257 
5258     *fd_mode = req->mode;
5259 
5260     return ret;
5261 }
5262 
5263 static int hclge_get_fd_allocation(struct hclge_dev *hdev,
5264                    u32 *stage1_entry_num,
5265                    u32 *stage2_entry_num,
5266                    u16 *stage1_counter_num,
5267                    u16 *stage2_counter_num)
5268 {
5269     struct hclge_get_fd_allocation_cmd *req;
5270     struct hclge_desc desc;
5271     int ret;
5272 
5273     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
5274 
5275     req = (struct hclge_get_fd_allocation_cmd *)desc.data;
5276 
5277     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5278     if (ret) {
5279         dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
5280             ret);
5281         return ret;
5282     }
5283 
5284     *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
5285     *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
5286     *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
5287     *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
5288 
5289     return ret;
5290 }
5291 
5292 static int hclge_set_fd_key_config(struct hclge_dev *hdev,
5293                    enum HCLGE_FD_STAGE stage_num)
5294 {
5295     struct hclge_set_fd_key_config_cmd *req;
5296     struct hclge_fd_key_cfg *stage;
5297     struct hclge_desc desc;
5298     int ret;
5299 
5300     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
5301 
5302     req = (struct hclge_set_fd_key_config_cmd *)desc.data;
5303     stage = &hdev->fd_cfg.key_cfg[stage_num];
5304     req->stage = stage_num;
5305     req->key_select = stage->key_sel;
5306     req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
5307     req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
5308     req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
5309     req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
5310     req->tuple_mask = cpu_to_le32(~stage->tuple_active);
5311     req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
5312 
5313     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5314     if (ret)
5315         dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
5316 
5317     return ret;
5318 }
5319 
5320 static void hclge_fd_disable_user_def(struct hclge_dev *hdev)
5321 {
5322     struct hclge_fd_user_def_cfg *cfg = hdev->fd_cfg.user_def_cfg;
5323 
5324     spin_lock_bh(&hdev->fd_rule_lock);
5325     memset(cfg, 0, sizeof(hdev->fd_cfg.user_def_cfg));
5326     spin_unlock_bh(&hdev->fd_rule_lock);
5327 
5328     hclge_fd_set_user_def_cmd(hdev, cfg);
5329 }
5330 
5331 static int hclge_init_fd_config(struct hclge_dev *hdev)
5332 {
5333 #define LOW_2_WORDS     0x03
5334     struct hclge_fd_key_cfg *key_cfg;
5335     int ret;
5336 
5337     if (!hnae3_dev_fd_supported(hdev))
5338         return 0;
5339 
5340     ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
5341     if (ret)
5342         return ret;
5343 
5344     switch (hdev->fd_cfg.fd_mode) {
5345     case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
5346         hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
5347         break;
5348     case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
5349         hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
5350         break;
5351     default:
5352         dev_err(&hdev->pdev->dev,
5353             "Unsupported flow director mode %u\n",
5354             hdev->fd_cfg.fd_mode);
5355         return -EOPNOTSUPP;
5356     }
5357 
5358     key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
5359     key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE;
5360     key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
5361     key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
5362     key_cfg->outer_sipv6_word_en = 0;
5363     key_cfg->outer_dipv6_word_en = 0;
5364 
5365     key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
5366                 BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
5367                 BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5368                 BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5369 
5370     /* If use max 400bit key, we can support tuples for ether type */
5371     if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5372         key_cfg->tuple_active |=
5373                 BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
5374         if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
5375             key_cfg->tuple_active |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5376     }
5377 
5378     /* roce_type is used to filter roce frames
5379      * dst_vport is used to specify the rule
5380      */
5381     key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
5382 
5383     ret = hclge_get_fd_allocation(hdev,
5384                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
5385                       &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
5386                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
5387                       &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
5388     if (ret)
5389         return ret;
5390 
5391     return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
5392 }
5393 
5394 static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
5395                 int loc, u8 *key, bool is_add)
5396 {
5397     struct hclge_fd_tcam_config_1_cmd *req1;
5398     struct hclge_fd_tcam_config_2_cmd *req2;
5399     struct hclge_fd_tcam_config_3_cmd *req3;
5400     struct hclge_desc desc[3];
5401     int ret;
5402 
5403     hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
5404     desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5405     hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
5406     desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
5407     hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
5408 
5409     req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
5410     req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
5411     req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
5412 
5413     req1->stage = stage;
5414     req1->xy_sel = sel_x ? 1 : 0;
5415     hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
5416     req1->index = cpu_to_le32(loc);
5417     req1->entry_vld = sel_x ? is_add : 0;
5418 
5419     if (key) {
5420         memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
5421         memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
5422                sizeof(req2->tcam_data));
5423         memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
5424                sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
5425     }
5426 
5427     ret = hclge_cmd_send(&hdev->hw, desc, 3);
5428     if (ret)
5429         dev_err(&hdev->pdev->dev,
5430             "config tcam key fail, ret=%d\n",
5431             ret);
5432 
5433     return ret;
5434 }
5435 
5436 static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
5437                   struct hclge_fd_ad_data *action)
5438 {
5439     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
5440     struct hclge_fd_ad_config_cmd *req;
5441     struct hclge_desc desc;
5442     u64 ad_data = 0;
5443     int ret;
5444 
5445     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
5446 
5447     req = (struct hclge_fd_ad_config_cmd *)desc.data;
5448     req->index = cpu_to_le32(loc);
5449     req->stage = stage;
5450 
5451     hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
5452               action->write_rule_id_to_bd);
5453     hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
5454             action->rule_id);
5455     if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) {
5456         hnae3_set_bit(ad_data, HCLGE_FD_AD_TC_OVRD_B,
5457                   action->override_tc);
5458         hnae3_set_field(ad_data, HCLGE_FD_AD_TC_SIZE_M,
5459                 HCLGE_FD_AD_TC_SIZE_S, (u32)action->tc_size);
5460     }
5461     ad_data <<= 32;
5462     hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
5463     hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
5464               action->forward_to_direct_queue);
5465     hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
5466             action->queue_id);
5467     hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
5468     hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
5469             HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
5470     hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
5471     hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
5472             action->counter_id);
5473 
5474     req->ad_data = cpu_to_le64(ad_data);
5475     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
5476     if (ret)
5477         dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
5478 
5479     return ret;
5480 }
5481 
5482 static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
5483                    struct hclge_fd_rule *rule)
5484 {
5485     int offset, moffset, ip_offset;
5486     enum HCLGE_FD_KEY_OPT key_opt;
5487     u16 tmp_x_s, tmp_y_s;
5488     u32 tmp_x_l, tmp_y_l;
5489     u8 *p = (u8 *)rule;
5490     int i;
5491 
5492     if (rule->unused_tuple & BIT(tuple_bit))
5493         return true;
5494 
5495     key_opt = tuple_key_info[tuple_bit].key_opt;
5496     offset = tuple_key_info[tuple_bit].offset;
5497     moffset = tuple_key_info[tuple_bit].moffset;
5498 
5499     switch (key_opt) {
5500     case KEY_OPT_U8:
5501         calc_x(*key_x, p[offset], p[moffset]);
5502         calc_y(*key_y, p[offset], p[moffset]);
5503 
5504         return true;
5505     case KEY_OPT_LE16:
5506         calc_x(tmp_x_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5507         calc_y(tmp_y_s, *(u16 *)(&p[offset]), *(u16 *)(&p[moffset]));
5508         *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
5509         *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
5510 
5511         return true;
5512     case KEY_OPT_LE32:
5513         calc_x(tmp_x_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5514         calc_y(tmp_y_l, *(u32 *)(&p[offset]), *(u32 *)(&p[moffset]));
5515         *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5516         *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5517 
5518         return true;
5519     case KEY_OPT_MAC:
5520         for (i = 0; i < ETH_ALEN; i++) {
5521             calc_x(key_x[ETH_ALEN - 1 - i], p[offset + i],
5522                    p[moffset + i]);
5523             calc_y(key_y[ETH_ALEN - 1 - i], p[offset + i],
5524                    p[moffset + i]);
5525         }
5526 
5527         return true;
5528     case KEY_OPT_IP:
5529         ip_offset = IPV4_INDEX * sizeof(u32);
5530         calc_x(tmp_x_l, *(u32 *)(&p[offset + ip_offset]),
5531                *(u32 *)(&p[moffset + ip_offset]));
5532         calc_y(tmp_y_l, *(u32 *)(&p[offset + ip_offset]),
5533                *(u32 *)(&p[moffset + ip_offset]));
5534         *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
5535         *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
5536 
5537         return true;
5538     default:
5539         return false;
5540     }
5541 }
5542 
5543 static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
5544                  u8 vf_id, u8 network_port_id)
5545 {
5546     u32 port_number = 0;
5547 
5548     if (port_type == HOST_PORT) {
5549         hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
5550                 pf_id);
5551         hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
5552                 vf_id);
5553         hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
5554     } else {
5555         hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
5556                 HCLGE_NETWORK_PORT_ID_S, network_port_id);
5557         hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
5558     }
5559 
5560     return port_number;
5561 }
5562 
5563 static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
5564                        __le32 *key_x, __le32 *key_y,
5565                        struct hclge_fd_rule *rule)
5566 {
5567     u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
5568     u8 cur_pos = 0, tuple_size, shift_bits;
5569     unsigned int i;
5570 
5571     for (i = 0; i < MAX_META_DATA; i++) {
5572         tuple_size = meta_data_key_info[i].key_length;
5573         tuple_bit = key_cfg->meta_data_active & BIT(i);
5574 
5575         switch (tuple_bit) {
5576         case BIT(ROCE_TYPE):
5577             hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
5578             cur_pos += tuple_size;
5579             break;
5580         case BIT(DST_VPORT):
5581             port_number = hclge_get_port_number(HOST_PORT, 0,
5582                                 rule->vf_id, 0);
5583             hnae3_set_field(meta_data,
5584                     GENMASK(cur_pos + tuple_size, cur_pos),
5585                     cur_pos, port_number);
5586             cur_pos += tuple_size;
5587             break;
5588         default:
5589             break;
5590         }
5591     }
5592 
5593     calc_x(tmp_x, meta_data, 0xFFFFFFFF);
5594     calc_y(tmp_y, meta_data, 0xFFFFFFFF);
5595     shift_bits = sizeof(meta_data) * 8 - cur_pos;
5596 
5597     *key_x = cpu_to_le32(tmp_x << shift_bits);
5598     *key_y = cpu_to_le32(tmp_y << shift_bits);
5599 }
5600 
5601 /* A complete key is combined with meta data key and tuple key.
5602  * Meta data key is stored at the MSB region, and tuple key is stored at
5603  * the LSB region, unused bits will be filled 0.
5604  */
5605 static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
5606                 struct hclge_fd_rule *rule)
5607 {
5608     struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
5609     u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
5610     u8 *cur_key_x, *cur_key_y;
5611     u8 meta_data_region;
5612     u8 tuple_size;
5613     int ret;
5614     u32 i;
5615 
5616     memset(key_x, 0, sizeof(key_x));
5617     memset(key_y, 0, sizeof(key_y));
5618     cur_key_x = key_x;
5619     cur_key_y = key_y;
5620 
5621     for (i = 0; i < MAX_TUPLE; i++) {
5622         bool tuple_valid;
5623 
5624         tuple_size = tuple_key_info[i].key_length / 8;
5625         if (!(key_cfg->tuple_active & BIT(i)))
5626             continue;
5627 
5628         tuple_valid = hclge_fd_convert_tuple(i, cur_key_x,
5629                              cur_key_y, rule);
5630         if (tuple_valid) {
5631             cur_key_x += tuple_size;
5632             cur_key_y += tuple_size;
5633         }
5634     }
5635 
5636     meta_data_region = hdev->fd_cfg.max_key_length / 8 -
5637             MAX_META_DATA_LENGTH / 8;
5638 
5639     hclge_fd_convert_meta_data(key_cfg,
5640                    (__le32 *)(key_x + meta_data_region),
5641                    (__le32 *)(key_y + meta_data_region),
5642                    rule);
5643 
5644     ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
5645                    true);
5646     if (ret) {
5647         dev_err(&hdev->pdev->dev,
5648             "fd key_y config fail, loc=%u, ret=%d\n",
5649             rule->queue_id, ret);
5650         return ret;
5651     }
5652 
5653     ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
5654                    true);
5655     if (ret)
5656         dev_err(&hdev->pdev->dev,
5657             "fd key_x config fail, loc=%u, ret=%d\n",
5658             rule->queue_id, ret);
5659     return ret;
5660 }
5661 
5662 static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
5663                    struct hclge_fd_rule *rule)
5664 {
5665     struct hclge_vport *vport = hdev->vport;
5666     struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
5667     struct hclge_fd_ad_data ad_data;
5668 
5669     memset(&ad_data, 0, sizeof(struct hclge_fd_ad_data));
5670     ad_data.ad_id = rule->location;
5671 
5672     if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
5673         ad_data.drop_packet = true;
5674     } else if (rule->action == HCLGE_FD_ACTION_SELECT_TC) {
5675         ad_data.override_tc = true;
5676         ad_data.queue_id =
5677             kinfo->tc_info.tqp_offset[rule->cls_flower.tc];
5678         ad_data.tc_size =
5679             ilog2(kinfo->tc_info.tqp_count[rule->cls_flower.tc]);
5680     } else {
5681         ad_data.forward_to_direct_queue = true;
5682         ad_data.queue_id = rule->queue_id;
5683     }
5684 
5685     if (hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1]) {
5686         ad_data.use_counter = true;
5687         ad_data.counter_id = rule->vf_id %
5688                      hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1];
5689     } else {
5690         ad_data.use_counter = false;
5691         ad_data.counter_id = 0;
5692     }
5693 
5694     ad_data.use_next_stage = false;
5695     ad_data.next_input_key = 0;
5696 
5697     ad_data.write_rule_id_to_bd = true;
5698     ad_data.rule_id = rule->location;
5699 
5700     return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
5701 }
5702 
5703 static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
5704                        u32 *unused_tuple)
5705 {
5706     if (!spec || !unused_tuple)
5707         return -EINVAL;
5708 
5709     *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5710 
5711     if (!spec->ip4src)
5712         *unused_tuple |= BIT(INNER_SRC_IP);
5713 
5714     if (!spec->ip4dst)
5715         *unused_tuple |= BIT(INNER_DST_IP);
5716 
5717     if (!spec->psrc)
5718         *unused_tuple |= BIT(INNER_SRC_PORT);
5719 
5720     if (!spec->pdst)
5721         *unused_tuple |= BIT(INNER_DST_PORT);
5722 
5723     if (!spec->tos)
5724         *unused_tuple |= BIT(INNER_IP_TOS);
5725 
5726     return 0;
5727 }
5728 
5729 static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
5730                     u32 *unused_tuple)
5731 {
5732     if (!spec || !unused_tuple)
5733         return -EINVAL;
5734 
5735     *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5736         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5737 
5738     if (!spec->ip4src)
5739         *unused_tuple |= BIT(INNER_SRC_IP);
5740 
5741     if (!spec->ip4dst)
5742         *unused_tuple |= BIT(INNER_DST_IP);
5743 
5744     if (!spec->tos)
5745         *unused_tuple |= BIT(INNER_IP_TOS);
5746 
5747     if (!spec->proto)
5748         *unused_tuple |= BIT(INNER_IP_PROTO);
5749 
5750     if (spec->l4_4_bytes)
5751         return -EOPNOTSUPP;
5752 
5753     if (spec->ip_ver != ETH_RX_NFC_IP4)
5754         return -EOPNOTSUPP;
5755 
5756     return 0;
5757 }
5758 
5759 static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
5760                        u32 *unused_tuple)
5761 {
5762     if (!spec || !unused_tuple)
5763         return -EINVAL;
5764 
5765     *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
5766 
5767     /* check whether src/dst ip address used */
5768     if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5769         *unused_tuple |= BIT(INNER_SRC_IP);
5770 
5771     if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5772         *unused_tuple |= BIT(INNER_DST_IP);
5773 
5774     if (!spec->psrc)
5775         *unused_tuple |= BIT(INNER_SRC_PORT);
5776 
5777     if (!spec->pdst)
5778         *unused_tuple |= BIT(INNER_DST_PORT);
5779 
5780     if (!spec->tclass)
5781         *unused_tuple |= BIT(INNER_IP_TOS);
5782 
5783     return 0;
5784 }
5785 
5786 static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
5787                     u32 *unused_tuple)
5788 {
5789     if (!spec || !unused_tuple)
5790         return -EINVAL;
5791 
5792     *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
5793             BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
5794 
5795     /* check whether src/dst ip address used */
5796     if (ipv6_addr_any((struct in6_addr *)spec->ip6src))
5797         *unused_tuple |= BIT(INNER_SRC_IP);
5798 
5799     if (ipv6_addr_any((struct in6_addr *)spec->ip6dst))
5800         *unused_tuple |= BIT(INNER_DST_IP);
5801 
5802     if (!spec->l4_proto)
5803         *unused_tuple |= BIT(INNER_IP_PROTO);
5804 
5805     if (!spec->tclass)
5806         *unused_tuple |= BIT(INNER_IP_TOS);
5807 
5808     if (spec->l4_4_bytes)
5809         return -EOPNOTSUPP;
5810 
5811     return 0;
5812 }
5813 
5814 static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
5815 {
5816     if (!spec || !unused_tuple)
5817         return -EINVAL;
5818 
5819     *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
5820         BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
5821         BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
5822 
5823     if (is_zero_ether_addr(spec->h_source))
5824         *unused_tuple |= BIT(INNER_SRC_MAC);
5825 
5826     if (is_zero_ether_addr(spec->h_dest))
5827         *unused_tuple |= BIT(INNER_DST_MAC);
5828 
5829     if (!spec->h_proto)
5830         *unused_tuple |= BIT(INNER_ETH_TYPE);
5831 
5832     return 0;
5833 }
5834 
5835 static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
5836                     struct ethtool_rx_flow_spec *fs,
5837                     u32 *unused_tuple)
5838 {
5839     if (fs->flow_type & FLOW_EXT) {
5840         if (fs->h_ext.vlan_etype) {
5841             dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
5842             return -EOPNOTSUPP;
5843         }
5844 
5845         if (!fs->h_ext.vlan_tci)
5846             *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5847 
5848         if (fs->m_ext.vlan_tci &&
5849             be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
5850             dev_err(&hdev->pdev->dev,
5851                 "failed to config vlan_tci, invalid vlan_tci: %u, max is %d.\n",
5852                 ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
5853             return -EINVAL;
5854         }
5855     } else {
5856         *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
5857     }
5858 
5859     if (fs->flow_type & FLOW_MAC_EXT) {
5860         if (hdev->fd_cfg.fd_mode !=
5861             HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
5862             dev_err(&hdev->pdev->dev,
5863                 "FLOW_MAC_EXT is not supported in current fd mode!\n");
5864             return -EOPNOTSUPP;
5865         }
5866 
5867         if (is_zero_ether_addr(fs->h_ext.h_dest))
5868             *unused_tuple |= BIT(INNER_DST_MAC);
5869         else
5870             *unused_tuple &= ~BIT(INNER_DST_MAC);
5871     }
5872 
5873     return 0;
5874 }
5875 
5876 static int hclge_fd_get_user_def_layer(u32 flow_type, u32 *unused_tuple,
5877                        struct hclge_fd_user_def_info *info)
5878 {
5879     switch (flow_type) {
5880     case ETHER_FLOW:
5881         info->layer = HCLGE_FD_USER_DEF_L2;
5882         *unused_tuple &= ~BIT(INNER_L2_RSV);
5883         break;
5884     case IP_USER_FLOW:
5885     case IPV6_USER_FLOW:
5886         info->layer = HCLGE_FD_USER_DEF_L3;
5887         *unused_tuple &= ~BIT(INNER_L3_RSV);
5888         break;
5889     case TCP_V4_FLOW:
5890     case UDP_V4_FLOW:
5891     case TCP_V6_FLOW:
5892     case UDP_V6_FLOW:
5893         info->layer = HCLGE_FD_USER_DEF_L4;
5894         *unused_tuple &= ~BIT(INNER_L4_RSV);
5895         break;
5896     default:
5897         return -EOPNOTSUPP;
5898     }
5899 
5900     return 0;
5901 }
5902 
5903 static bool hclge_fd_is_user_def_all_masked(struct ethtool_rx_flow_spec *fs)
5904 {
5905     return be32_to_cpu(fs->m_ext.data[1] | fs->m_ext.data[0]) == 0;
5906 }
5907 
5908 static int hclge_fd_parse_user_def_field(struct hclge_dev *hdev,
5909                      struct ethtool_rx_flow_spec *fs,
5910                      u32 *unused_tuple,
5911                      struct hclge_fd_user_def_info *info)
5912 {
5913     u32 tuple_active = hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1].tuple_active;
5914     u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5915     u16 data, offset, data_mask, offset_mask;
5916     int ret;
5917 
5918     info->layer = HCLGE_FD_USER_DEF_NONE;
5919     *unused_tuple |= HCLGE_FD_TUPLE_USER_DEF_TUPLES;
5920 
5921     if (!(fs->flow_type & FLOW_EXT) || hclge_fd_is_user_def_all_masked(fs))
5922         return 0;
5923 
5924     /* user-def data from ethtool is 64 bit value, the bit0~15 is used
5925      * for data, and bit32~47 is used for offset.
5926      */
5927     data = be32_to_cpu(fs->h_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
5928     data_mask = be32_to_cpu(fs->m_ext.data[1]) & HCLGE_FD_USER_DEF_DATA;
5929     offset = be32_to_cpu(fs->h_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
5930     offset_mask = be32_to_cpu(fs->m_ext.data[0]) & HCLGE_FD_USER_DEF_OFFSET;
5931 
5932     if (!(tuple_active & HCLGE_FD_TUPLE_USER_DEF_TUPLES)) {
5933         dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
5934         return -EOPNOTSUPP;
5935     }
5936 
5937     if (offset > HCLGE_FD_MAX_USER_DEF_OFFSET) {
5938         dev_err(&hdev->pdev->dev,
5939             "user-def offset[%u] should be no more than %u\n",
5940             offset, HCLGE_FD_MAX_USER_DEF_OFFSET);
5941         return -EINVAL;
5942     }
5943 
5944     if (offset_mask != HCLGE_FD_USER_DEF_OFFSET_UNMASK) {
5945         dev_err(&hdev->pdev->dev, "user-def offset can't be masked\n");
5946         return -EINVAL;
5947     }
5948 
5949     ret = hclge_fd_get_user_def_layer(flow_type, unused_tuple, info);
5950     if (ret) {
5951         dev_err(&hdev->pdev->dev,
5952             "unsupported flow type for user-def bytes, ret = %d\n",
5953             ret);
5954         return ret;
5955     }
5956 
5957     info->data = data;
5958     info->data_mask = data_mask;
5959     info->offset = offset;
5960 
5961     return 0;
5962 }
5963 
5964 static int hclge_fd_check_spec(struct hclge_dev *hdev,
5965                    struct ethtool_rx_flow_spec *fs,
5966                    u32 *unused_tuple,
5967                    struct hclge_fd_user_def_info *info)
5968 {
5969     u32 flow_type;
5970     int ret;
5971 
5972     if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
5973         dev_err(&hdev->pdev->dev,
5974             "failed to config fd rules, invalid rule location: %u, max is %u\n.",
5975             fs->location,
5976             hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
5977         return -EINVAL;
5978     }
5979 
5980     ret = hclge_fd_parse_user_def_field(hdev, fs, unused_tuple, info);
5981     if (ret)
5982         return ret;
5983 
5984     flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
5985     switch (flow_type) {
5986     case SCTP_V4_FLOW:
5987     case TCP_V4_FLOW:
5988     case UDP_V4_FLOW:
5989         ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
5990                           unused_tuple);
5991         break;
5992     case IP_USER_FLOW:
5993         ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
5994                            unused_tuple);
5995         break;
5996     case SCTP_V6_FLOW:
5997     case TCP_V6_FLOW:
5998     case UDP_V6_FLOW:
5999         ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
6000                           unused_tuple);
6001         break;
6002     case IPV6_USER_FLOW:
6003         ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
6004                            unused_tuple);
6005         break;
6006     case ETHER_FLOW:
6007         if (hdev->fd_cfg.fd_mode !=
6008             HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
6009             dev_err(&hdev->pdev->dev,
6010                 "ETHER_FLOW is not supported in current fd mode!\n");
6011             return -EOPNOTSUPP;
6012         }
6013 
6014         ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
6015                          unused_tuple);
6016         break;
6017     default:
6018         dev_err(&hdev->pdev->dev,
6019             "unsupported protocol type, protocol type = %#x\n",
6020             flow_type);
6021         return -EOPNOTSUPP;
6022     }
6023 
6024     if (ret) {
6025         dev_err(&hdev->pdev->dev,
6026             "failed to check flow union tuple, ret = %d\n",
6027             ret);
6028         return ret;
6029     }
6030 
6031     return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
6032 }
6033 
6034 static void hclge_fd_get_tcpip4_tuple(struct hclge_dev *hdev,
6035                       struct ethtool_rx_flow_spec *fs,
6036                       struct hclge_fd_rule *rule, u8 ip_proto)
6037 {
6038     rule->tuples.src_ip[IPV4_INDEX] =
6039             be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
6040     rule->tuples_mask.src_ip[IPV4_INDEX] =
6041             be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
6042 
6043     rule->tuples.dst_ip[IPV4_INDEX] =
6044             be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
6045     rule->tuples_mask.dst_ip[IPV4_INDEX] =
6046             be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
6047 
6048     rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
6049     rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
6050 
6051     rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
6052     rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
6053 
6054     rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
6055     rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
6056 
6057     rule->tuples.ether_proto = ETH_P_IP;
6058     rule->tuples_mask.ether_proto = 0xFFFF;
6059 
6060     rule->tuples.ip_proto = ip_proto;
6061     rule->tuples_mask.ip_proto = 0xFF;
6062 }
6063 
6064 static void hclge_fd_get_ip4_tuple(struct hclge_dev *hdev,
6065                    struct ethtool_rx_flow_spec *fs,
6066                    struct hclge_fd_rule *rule)
6067 {
6068     rule->tuples.src_ip[IPV4_INDEX] =
6069             be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
6070     rule->tuples_mask.src_ip[IPV4_INDEX] =
6071             be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
6072 
6073     rule->tuples.dst_ip[IPV4_INDEX] =
6074             be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
6075     rule->tuples_mask.dst_ip[IPV4_INDEX] =
6076             be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
6077 
6078     rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
6079     rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
6080 
6081     rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
6082     rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
6083 
6084     rule->tuples.ether_proto = ETH_P_IP;
6085     rule->tuples_mask.ether_proto = 0xFFFF;
6086 }
6087 
6088 static void hclge_fd_get_tcpip6_tuple(struct hclge_dev *hdev,
6089                       struct ethtool_rx_flow_spec *fs,
6090                       struct hclge_fd_rule *rule, u8 ip_proto)
6091 {
6092     be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.tcp_ip6_spec.ip6src,
6093               IPV6_SIZE);
6094     be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.tcp_ip6_spec.ip6src,
6095               IPV6_SIZE);
6096 
6097     be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.tcp_ip6_spec.ip6dst,
6098               IPV6_SIZE);
6099     be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.tcp_ip6_spec.ip6dst,
6100               IPV6_SIZE);
6101 
6102     rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
6103     rule->tuples_mask.src_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
6104 
6105     rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
6106     rule->tuples_mask.dst_port = be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
6107 
6108     rule->tuples.ether_proto = ETH_P_IPV6;
6109     rule->tuples_mask.ether_proto = 0xFFFF;
6110 
6111     rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6112     rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6113 
6114     rule->tuples.ip_proto = ip_proto;
6115     rule->tuples_mask.ip_proto = 0xFF;
6116 }
6117 
6118 static void hclge_fd_get_ip6_tuple(struct hclge_dev *hdev,
6119                    struct ethtool_rx_flow_spec *fs,
6120                    struct hclge_fd_rule *rule)
6121 {
6122     be32_to_cpu_array(rule->tuples.src_ip, fs->h_u.usr_ip6_spec.ip6src,
6123               IPV6_SIZE);
6124     be32_to_cpu_array(rule->tuples_mask.src_ip, fs->m_u.usr_ip6_spec.ip6src,
6125               IPV6_SIZE);
6126 
6127     be32_to_cpu_array(rule->tuples.dst_ip, fs->h_u.usr_ip6_spec.ip6dst,
6128               IPV6_SIZE);
6129     be32_to_cpu_array(rule->tuples_mask.dst_ip, fs->m_u.usr_ip6_spec.ip6dst,
6130               IPV6_SIZE);
6131 
6132     rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
6133     rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
6134 
6135     rule->tuples.ip_tos = fs->h_u.tcp_ip6_spec.tclass;
6136     rule->tuples_mask.ip_tos = fs->m_u.tcp_ip6_spec.tclass;
6137 
6138     rule->tuples.ether_proto = ETH_P_IPV6;
6139     rule->tuples_mask.ether_proto = 0xFFFF;
6140 }
6141 
6142 static void hclge_fd_get_ether_tuple(struct hclge_dev *hdev,
6143                      struct ethtool_rx_flow_spec *fs,
6144                      struct hclge_fd_rule *rule)
6145 {
6146     ether_addr_copy(rule->tuples.src_mac, fs->h_u.ether_spec.h_source);
6147     ether_addr_copy(rule->tuples_mask.src_mac, fs->m_u.ether_spec.h_source);
6148 
6149     ether_addr_copy(rule->tuples.dst_mac, fs->h_u.ether_spec.h_dest);
6150     ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_u.ether_spec.h_dest);
6151 
6152     rule->tuples.ether_proto = be16_to_cpu(fs->h_u.ether_spec.h_proto);
6153     rule->tuples_mask.ether_proto = be16_to_cpu(fs->m_u.ether_spec.h_proto);
6154 }
6155 
6156 static void hclge_fd_get_user_def_tuple(struct hclge_fd_user_def_info *info,
6157                     struct hclge_fd_rule *rule)
6158 {
6159     switch (info->layer) {
6160     case HCLGE_FD_USER_DEF_L2:
6161         rule->tuples.l2_user_def = info->data;
6162         rule->tuples_mask.l2_user_def = info->data_mask;
6163         break;
6164     case HCLGE_FD_USER_DEF_L3:
6165         rule->tuples.l3_user_def = info->data;
6166         rule->tuples_mask.l3_user_def = info->data_mask;
6167         break;
6168     case HCLGE_FD_USER_DEF_L4:
6169         rule->tuples.l4_user_def = (u32)info->data << 16;
6170         rule->tuples_mask.l4_user_def = (u32)info->data_mask << 16;
6171         break;
6172     default:
6173         break;
6174     }
6175 
6176     rule->ep.user_def = *info;
6177 }
6178 
6179 static int hclge_fd_get_tuple(struct hclge_dev *hdev,
6180                   struct ethtool_rx_flow_spec *fs,
6181                   struct hclge_fd_rule *rule,
6182                   struct hclge_fd_user_def_info *info)
6183 {
6184     u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
6185 
6186     switch (flow_type) {
6187     case SCTP_V4_FLOW:
6188         hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_SCTP);
6189         break;
6190     case TCP_V4_FLOW:
6191         hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_TCP);
6192         break;
6193     case UDP_V4_FLOW:
6194         hclge_fd_get_tcpip4_tuple(hdev, fs, rule, IPPROTO_UDP);
6195         break;
6196     case IP_USER_FLOW:
6197         hclge_fd_get_ip4_tuple(hdev, fs, rule);
6198         break;
6199     case SCTP_V6_FLOW:
6200         hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_SCTP);
6201         break;
6202     case TCP_V6_FLOW:
6203         hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_TCP);
6204         break;
6205     case UDP_V6_FLOW:
6206         hclge_fd_get_tcpip6_tuple(hdev, fs, rule, IPPROTO_UDP);
6207         break;
6208     case IPV6_USER_FLOW:
6209         hclge_fd_get_ip6_tuple(hdev, fs, rule);
6210         break;
6211     case ETHER_FLOW:
6212         hclge_fd_get_ether_tuple(hdev, fs, rule);
6213         break;
6214     default:
6215         return -EOPNOTSUPP;
6216     }
6217 
6218     if (fs->flow_type & FLOW_EXT) {
6219         rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
6220         rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
6221         hclge_fd_get_user_def_tuple(info, rule);
6222     }
6223 
6224     if (fs->flow_type & FLOW_MAC_EXT) {
6225         ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
6226         ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
6227     }
6228 
6229     return 0;
6230 }
6231 
6232 static int hclge_fd_config_rule(struct hclge_dev *hdev,
6233                 struct hclge_fd_rule *rule)
6234 {
6235     int ret;
6236 
6237     ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
6238     if (ret)
6239         return ret;
6240 
6241     return hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
6242 }
6243 
6244 static int hclge_add_fd_entry_common(struct hclge_dev *hdev,
6245                      struct hclge_fd_rule *rule)
6246 {
6247     int ret;
6248 
6249     spin_lock_bh(&hdev->fd_rule_lock);
6250 
6251     if (hdev->fd_active_type != rule->rule_type &&
6252         (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6253          hdev->fd_active_type == HCLGE_FD_EP_ACTIVE)) {
6254         dev_err(&hdev->pdev->dev,
6255             "mode conflict(new type %d, active type %d), please delete existent rules first\n",
6256             rule->rule_type, hdev->fd_active_type);
6257         spin_unlock_bh(&hdev->fd_rule_lock);
6258         return -EINVAL;
6259     }
6260 
6261     ret = hclge_fd_check_user_def_refcnt(hdev, rule);
6262     if (ret)
6263         goto out;
6264 
6265     ret = hclge_clear_arfs_rules(hdev);
6266     if (ret)
6267         goto out;
6268 
6269     ret = hclge_fd_config_rule(hdev, rule);
6270     if (ret)
6271         goto out;
6272 
6273     rule->state = HCLGE_FD_ACTIVE;
6274     hdev->fd_active_type = rule->rule_type;
6275     hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6276 
6277 out:
6278     spin_unlock_bh(&hdev->fd_rule_lock);
6279     return ret;
6280 }
6281 
6282 static bool hclge_is_cls_flower_active(struct hnae3_handle *handle)
6283 {
6284     struct hclge_vport *vport = hclge_get_vport(handle);
6285     struct hclge_dev *hdev = vport->back;
6286 
6287     return hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE;
6288 }
6289 
6290 static int hclge_fd_parse_ring_cookie(struct hclge_dev *hdev, u64 ring_cookie,
6291                       u16 *vport_id, u8 *action, u16 *queue_id)
6292 {
6293     struct hclge_vport *vport = hdev->vport;
6294 
6295     if (ring_cookie == RX_CLS_FLOW_DISC) {
6296         *action = HCLGE_FD_ACTION_DROP_PACKET;
6297     } else {
6298         u32 ring = ethtool_get_flow_spec_ring(ring_cookie);
6299         u8 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
6300         u16 tqps;
6301 
6302         /* To keep consistent with user's configuration, minus 1 when
6303          * printing 'vf', because vf id from ethtool is added 1 for vf.
6304          */
6305         if (vf > hdev->num_req_vfs) {
6306             dev_err(&hdev->pdev->dev,
6307                 "Error: vf id (%u) should be less than %u\n",
6308                 vf - 1U, hdev->num_req_vfs);
6309             return -EINVAL;
6310         }
6311 
6312         *vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
6313         tqps = hdev->vport[vf].nic.kinfo.num_tqps;
6314 
6315         if (ring >= tqps) {
6316             dev_err(&hdev->pdev->dev,
6317                 "Error: queue id (%u) > max tqp num (%u)\n",
6318                 ring, tqps - 1U);
6319             return -EINVAL;
6320         }
6321 
6322         *action = HCLGE_FD_ACTION_SELECT_QUEUE;
6323         *queue_id = ring;
6324     }
6325 
6326     return 0;
6327 }
6328 
6329 static int hclge_add_fd_entry(struct hnae3_handle *handle,
6330                   struct ethtool_rxnfc *cmd)
6331 {
6332     struct hclge_vport *vport = hclge_get_vport(handle);
6333     struct hclge_dev *hdev = vport->back;
6334     struct hclge_fd_user_def_info info;
6335     u16 dst_vport_id = 0, q_index = 0;
6336     struct ethtool_rx_flow_spec *fs;
6337     struct hclge_fd_rule *rule;
6338     u32 unused = 0;
6339     u8 action;
6340     int ret;
6341 
6342     if (!hnae3_dev_fd_supported(hdev)) {
6343         dev_err(&hdev->pdev->dev,
6344             "flow table director is not supported\n");
6345         return -EOPNOTSUPP;
6346     }
6347 
6348     if (!hdev->fd_en) {
6349         dev_err(&hdev->pdev->dev,
6350             "please enable flow director first\n");
6351         return -EOPNOTSUPP;
6352     }
6353 
6354     fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6355 
6356     ret = hclge_fd_check_spec(hdev, fs, &unused, &info);
6357     if (ret)
6358         return ret;
6359 
6360     ret = hclge_fd_parse_ring_cookie(hdev, fs->ring_cookie, &dst_vport_id,
6361                      &action, &q_index);
6362     if (ret)
6363         return ret;
6364 
6365     rule = kzalloc(sizeof(*rule), GFP_KERNEL);
6366     if (!rule)
6367         return -ENOMEM;
6368 
6369     ret = hclge_fd_get_tuple(hdev, fs, rule, &info);
6370     if (ret) {
6371         kfree(rule);
6372         return ret;
6373     }
6374 
6375     rule->flow_type = fs->flow_type;
6376     rule->location = fs->location;
6377     rule->unused_tuple = unused;
6378     rule->vf_id = dst_vport_id;
6379     rule->queue_id = q_index;
6380     rule->action = action;
6381     rule->rule_type = HCLGE_FD_EP_ACTIVE;
6382 
6383     ret = hclge_add_fd_entry_common(hdev, rule);
6384     if (ret)
6385         kfree(rule);
6386 
6387     return ret;
6388 }
6389 
6390 static int hclge_del_fd_entry(struct hnae3_handle *handle,
6391                   struct ethtool_rxnfc *cmd)
6392 {
6393     struct hclge_vport *vport = hclge_get_vport(handle);
6394     struct hclge_dev *hdev = vport->back;
6395     struct ethtool_rx_flow_spec *fs;
6396     int ret;
6397 
6398     if (!hnae3_dev_fd_supported(hdev))
6399         return -EOPNOTSUPP;
6400 
6401     fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6402 
6403     if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6404         return -EINVAL;
6405 
6406     spin_lock_bh(&hdev->fd_rule_lock);
6407     if (hdev->fd_active_type == HCLGE_FD_TC_FLOWER_ACTIVE ||
6408         !test_bit(fs->location, hdev->fd_bmap)) {
6409         dev_err(&hdev->pdev->dev,
6410             "Delete fail, rule %u is inexistent\n", fs->location);
6411         spin_unlock_bh(&hdev->fd_rule_lock);
6412         return -ENOENT;
6413     }
6414 
6415     ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
6416                    NULL, false);
6417     if (ret)
6418         goto out;
6419 
6420     hclge_update_fd_list(hdev, HCLGE_FD_DELETED, fs->location, NULL);
6421 
6422 out:
6423     spin_unlock_bh(&hdev->fd_rule_lock);
6424     return ret;
6425 }
6426 
6427 static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
6428                      bool clear_list)
6429 {
6430     struct hclge_fd_rule *rule;
6431     struct hlist_node *node;
6432     u16 location;
6433 
6434     if (!hnae3_dev_fd_supported(hdev))
6435         return;
6436 
6437     spin_lock_bh(&hdev->fd_rule_lock);
6438 
6439     for_each_set_bit(location, hdev->fd_bmap,
6440              hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
6441         hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
6442                      NULL, false);
6443 
6444     if (clear_list) {
6445         hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
6446                       rule_node) {
6447             hlist_del(&rule->rule_node);
6448             kfree(rule);
6449         }
6450         hdev->fd_active_type = HCLGE_FD_RULE_NONE;
6451         hdev->hclge_fd_rule_num = 0;
6452         bitmap_zero(hdev->fd_bmap,
6453                 hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
6454     }
6455 
6456     spin_unlock_bh(&hdev->fd_rule_lock);
6457 }
6458 
6459 static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
6460 {
6461     hclge_clear_fd_rules_in_list(hdev, true);
6462     hclge_fd_disable_user_def(hdev);
6463 }
6464 
6465 static int hclge_restore_fd_entries(struct hnae3_handle *handle)
6466 {
6467     struct hclge_vport *vport = hclge_get_vport(handle);
6468     struct hclge_dev *hdev = vport->back;
6469     struct hclge_fd_rule *rule;
6470     struct hlist_node *node;
6471 
6472     /* Return ok here, because reset error handling will check this
6473      * return value. If error is returned here, the reset process will
6474      * fail.
6475      */
6476     if (!hnae3_dev_fd_supported(hdev))
6477         return 0;
6478 
6479     /* if fd is disabled, should not restore it when reset */
6480     if (!hdev->fd_en)
6481         return 0;
6482 
6483     spin_lock_bh(&hdev->fd_rule_lock);
6484     hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6485         if (rule->state == HCLGE_FD_ACTIVE)
6486             rule->state = HCLGE_FD_TO_ADD;
6487     }
6488     spin_unlock_bh(&hdev->fd_rule_lock);
6489     set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6490 
6491     return 0;
6492 }
6493 
6494 static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
6495                  struct ethtool_rxnfc *cmd)
6496 {
6497     struct hclge_vport *vport = hclge_get_vport(handle);
6498     struct hclge_dev *hdev = vport->back;
6499 
6500     if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
6501         return -EOPNOTSUPP;
6502 
6503     cmd->rule_cnt = hdev->hclge_fd_rule_num;
6504     cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6505 
6506     return 0;
6507 }
6508 
6509 static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
6510                      struct ethtool_tcpip4_spec *spec,
6511                      struct ethtool_tcpip4_spec *spec_mask)
6512 {
6513     spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6514     spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6515             0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6516 
6517     spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6518     spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6519             0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6520 
6521     spec->psrc = cpu_to_be16(rule->tuples.src_port);
6522     spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6523             0 : cpu_to_be16(rule->tuples_mask.src_port);
6524 
6525     spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6526     spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6527             0 : cpu_to_be16(rule->tuples_mask.dst_port);
6528 
6529     spec->tos = rule->tuples.ip_tos;
6530     spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6531             0 : rule->tuples_mask.ip_tos;
6532 }
6533 
6534 static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
6535                   struct ethtool_usrip4_spec *spec,
6536                   struct ethtool_usrip4_spec *spec_mask)
6537 {
6538     spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
6539     spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
6540             0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
6541 
6542     spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
6543     spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
6544             0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
6545 
6546     spec->tos = rule->tuples.ip_tos;
6547     spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6548             0 : rule->tuples_mask.ip_tos;
6549 
6550     spec->proto = rule->tuples.ip_proto;
6551     spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6552             0 : rule->tuples_mask.ip_proto;
6553 
6554     spec->ip_ver = ETH_RX_NFC_IP4;
6555 }
6556 
6557 static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
6558                      struct ethtool_tcpip6_spec *spec,
6559                      struct ethtool_tcpip6_spec *spec_mask)
6560 {
6561     cpu_to_be32_array(spec->ip6src,
6562               rule->tuples.src_ip, IPV6_SIZE);
6563     cpu_to_be32_array(spec->ip6dst,
6564               rule->tuples.dst_ip, IPV6_SIZE);
6565     if (rule->unused_tuple & BIT(INNER_SRC_IP))
6566         memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6567     else
6568         cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
6569                   IPV6_SIZE);
6570 
6571     if (rule->unused_tuple & BIT(INNER_DST_IP))
6572         memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6573     else
6574         cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
6575                   IPV6_SIZE);
6576 
6577     spec->tclass = rule->tuples.ip_tos;
6578     spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6579             0 : rule->tuples_mask.ip_tos;
6580 
6581     spec->psrc = cpu_to_be16(rule->tuples.src_port);
6582     spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
6583             0 : cpu_to_be16(rule->tuples_mask.src_port);
6584 
6585     spec->pdst = cpu_to_be16(rule->tuples.dst_port);
6586     spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
6587             0 : cpu_to_be16(rule->tuples_mask.dst_port);
6588 }
6589 
6590 static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
6591                   struct ethtool_usrip6_spec *spec,
6592                   struct ethtool_usrip6_spec *spec_mask)
6593 {
6594     cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
6595     cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
6596     if (rule->unused_tuple & BIT(INNER_SRC_IP))
6597         memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
6598     else
6599         cpu_to_be32_array(spec_mask->ip6src,
6600                   rule->tuples_mask.src_ip, IPV6_SIZE);
6601 
6602     if (rule->unused_tuple & BIT(INNER_DST_IP))
6603         memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
6604     else
6605         cpu_to_be32_array(spec_mask->ip6dst,
6606                   rule->tuples_mask.dst_ip, IPV6_SIZE);
6607 
6608     spec->tclass = rule->tuples.ip_tos;
6609     spec_mask->tclass = rule->unused_tuple & BIT(INNER_IP_TOS) ?
6610             0 : rule->tuples_mask.ip_tos;
6611 
6612     spec->l4_proto = rule->tuples.ip_proto;
6613     spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
6614             0 : rule->tuples_mask.ip_proto;
6615 }
6616 
6617 static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
6618                     struct ethhdr *spec,
6619                     struct ethhdr *spec_mask)
6620 {
6621     ether_addr_copy(spec->h_source, rule->tuples.src_mac);
6622     ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
6623 
6624     if (rule->unused_tuple & BIT(INNER_SRC_MAC))
6625         eth_zero_addr(spec_mask->h_source);
6626     else
6627         ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
6628 
6629     if (rule->unused_tuple & BIT(INNER_DST_MAC))
6630         eth_zero_addr(spec_mask->h_dest);
6631     else
6632         ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
6633 
6634     spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
6635     spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
6636             0 : cpu_to_be16(rule->tuples_mask.ether_proto);
6637 }
6638 
6639 static void hclge_fd_get_user_def_info(struct ethtool_rx_flow_spec *fs,
6640                        struct hclge_fd_rule *rule)
6641 {
6642     if ((rule->unused_tuple & HCLGE_FD_TUPLE_USER_DEF_TUPLES) ==
6643         HCLGE_FD_TUPLE_USER_DEF_TUPLES) {
6644         fs->h_ext.data[0] = 0;
6645         fs->h_ext.data[1] = 0;
6646         fs->m_ext.data[0] = 0;
6647         fs->m_ext.data[1] = 0;
6648     } else {
6649         fs->h_ext.data[0] = cpu_to_be32(rule->ep.user_def.offset);
6650         fs->h_ext.data[1] = cpu_to_be32(rule->ep.user_def.data);
6651         fs->m_ext.data[0] =
6652                 cpu_to_be32(HCLGE_FD_USER_DEF_OFFSET_UNMASK);
6653         fs->m_ext.data[1] = cpu_to_be32(rule->ep.user_def.data_mask);
6654     }
6655 }
6656 
6657 static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
6658                   struct hclge_fd_rule *rule)
6659 {
6660     if (fs->flow_type & FLOW_EXT) {
6661         fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
6662         fs->m_ext.vlan_tci =
6663                 rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
6664                 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
6665 
6666         hclge_fd_get_user_def_info(fs, rule);
6667     }
6668 
6669     if (fs->flow_type & FLOW_MAC_EXT) {
6670         ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
6671         if (rule->unused_tuple & BIT(INNER_DST_MAC))
6672             eth_zero_addr(fs->m_u.ether_spec.h_dest);
6673         else
6674             ether_addr_copy(fs->m_u.ether_spec.h_dest,
6675                     rule->tuples_mask.dst_mac);
6676     }
6677 }
6678 
6679 static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
6680                            u16 location)
6681 {
6682     struct hclge_fd_rule *rule = NULL;
6683     struct hlist_node *node2;
6684 
6685     hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
6686         if (rule->location == location)
6687             return rule;
6688         else if (rule->location > location)
6689             return NULL;
6690     }
6691 
6692     return NULL;
6693 }
6694 
6695 static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
6696                      struct hclge_fd_rule *rule)
6697 {
6698     if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
6699         fs->ring_cookie = RX_CLS_FLOW_DISC;
6700     } else {
6701         u64 vf_id;
6702 
6703         fs->ring_cookie = rule->queue_id;
6704         vf_id = rule->vf_id;
6705         vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
6706         fs->ring_cookie |= vf_id;
6707     }
6708 }
6709 
6710 static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
6711                   struct ethtool_rxnfc *cmd)
6712 {
6713     struct hclge_vport *vport = hclge_get_vport(handle);
6714     struct hclge_fd_rule *rule = NULL;
6715     struct hclge_dev *hdev = vport->back;
6716     struct ethtool_rx_flow_spec *fs;
6717 
6718     if (!hnae3_dev_fd_supported(hdev))
6719         return -EOPNOTSUPP;
6720 
6721     fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
6722 
6723     spin_lock_bh(&hdev->fd_rule_lock);
6724 
6725     rule = hclge_get_fd_rule(hdev, fs->location);
6726     if (!rule) {
6727         spin_unlock_bh(&hdev->fd_rule_lock);
6728         return -ENOENT;
6729     }
6730 
6731     fs->flow_type = rule->flow_type;
6732     switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
6733     case SCTP_V4_FLOW:
6734     case TCP_V4_FLOW:
6735     case UDP_V4_FLOW:
6736         hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
6737                      &fs->m_u.tcp_ip4_spec);
6738         break;
6739     case IP_USER_FLOW:
6740         hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
6741                       &fs->m_u.usr_ip4_spec);
6742         break;
6743     case SCTP_V6_FLOW:
6744     case TCP_V6_FLOW:
6745     case UDP_V6_FLOW:
6746         hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
6747                      &fs->m_u.tcp_ip6_spec);
6748         break;
6749     case IPV6_USER_FLOW:
6750         hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
6751                       &fs->m_u.usr_ip6_spec);
6752         break;
6753     /* The flow type of fd rule has been checked before adding in to rule
6754      * list. As other flow types have been handled, it must be ETHER_FLOW
6755      * for the default case
6756      */
6757     default:
6758         hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
6759                     &fs->m_u.ether_spec);
6760         break;
6761     }
6762 
6763     hclge_fd_get_ext_info(fs, rule);
6764 
6765     hclge_fd_get_ring_cookie(fs, rule);
6766 
6767     spin_unlock_bh(&hdev->fd_rule_lock);
6768 
6769     return 0;
6770 }
6771 
6772 static int hclge_get_all_rules(struct hnae3_handle *handle,
6773                    struct ethtool_rxnfc *cmd, u32 *rule_locs)
6774 {
6775     struct hclge_vport *vport = hclge_get_vport(handle);
6776     struct hclge_dev *hdev = vport->back;
6777     struct hclge_fd_rule *rule;
6778     struct hlist_node *node2;
6779     int cnt = 0;
6780 
6781     if (!hnae3_dev_fd_supported(hdev))
6782         return -EOPNOTSUPP;
6783 
6784     cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
6785 
6786     spin_lock_bh(&hdev->fd_rule_lock);
6787     hlist_for_each_entry_safe(rule, node2,
6788                   &hdev->fd_rule_list, rule_node) {
6789         if (cnt == cmd->rule_cnt) {
6790             spin_unlock_bh(&hdev->fd_rule_lock);
6791             return -EMSGSIZE;
6792         }
6793 
6794         if (rule->state == HCLGE_FD_TO_DEL)
6795             continue;
6796 
6797         rule_locs[cnt] = rule->location;
6798         cnt++;
6799     }
6800 
6801     spin_unlock_bh(&hdev->fd_rule_lock);
6802 
6803     cmd->rule_cnt = cnt;
6804 
6805     return 0;
6806 }
6807 
6808 static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
6809                      struct hclge_fd_rule_tuples *tuples)
6810 {
6811 #define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
6812 #define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
6813 
6814     tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
6815     tuples->ip_proto = fkeys->basic.ip_proto;
6816     tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
6817 
6818     if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
6819         tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
6820         tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
6821     } else {
6822         int i;
6823 
6824         for (i = 0; i < IPV6_SIZE; i++) {
6825             tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
6826             tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
6827         }
6828     }
6829 }
6830 
6831 /* traverse all rules, check whether an existed rule has the same tuples */
6832 static struct hclge_fd_rule *
6833 hclge_fd_search_flow_keys(struct hclge_dev *hdev,
6834               const struct hclge_fd_rule_tuples *tuples)
6835 {
6836     struct hclge_fd_rule *rule = NULL;
6837     struct hlist_node *node;
6838 
6839     hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6840         if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
6841             return rule;
6842     }
6843 
6844     return NULL;
6845 }
6846 
6847 static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
6848                      struct hclge_fd_rule *rule)
6849 {
6850     rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
6851                  BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
6852                  BIT(INNER_SRC_PORT);
6853     rule->action = 0;
6854     rule->vf_id = 0;
6855     rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
6856     rule->state = HCLGE_FD_TO_ADD;
6857     if (tuples->ether_proto == ETH_P_IP) {
6858         if (tuples->ip_proto == IPPROTO_TCP)
6859             rule->flow_type = TCP_V4_FLOW;
6860         else
6861             rule->flow_type = UDP_V4_FLOW;
6862     } else {
6863         if (tuples->ip_proto == IPPROTO_TCP)
6864             rule->flow_type = TCP_V6_FLOW;
6865         else
6866             rule->flow_type = UDP_V6_FLOW;
6867     }
6868     memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
6869     memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
6870 }
6871 
6872 static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
6873                       u16 flow_id, struct flow_keys *fkeys)
6874 {
6875     struct hclge_vport *vport = hclge_get_vport(handle);
6876     struct hclge_fd_rule_tuples new_tuples = {};
6877     struct hclge_dev *hdev = vport->back;
6878     struct hclge_fd_rule *rule;
6879     u16 bit_id;
6880 
6881     if (!hnae3_dev_fd_supported(hdev))
6882         return -EOPNOTSUPP;
6883 
6884     /* when there is already fd rule existed add by user,
6885      * arfs should not work
6886      */
6887     spin_lock_bh(&hdev->fd_rule_lock);
6888     if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE &&
6889         hdev->fd_active_type != HCLGE_FD_RULE_NONE) {
6890         spin_unlock_bh(&hdev->fd_rule_lock);
6891         return -EOPNOTSUPP;
6892     }
6893 
6894     hclge_fd_get_flow_tuples(fkeys, &new_tuples);
6895 
6896     /* check is there flow director filter existed for this flow,
6897      * if not, create a new filter for it;
6898      * if filter exist with different queue id, modify the filter;
6899      * if filter exist with same queue id, do nothing
6900      */
6901     rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
6902     if (!rule) {
6903         bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
6904         if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
6905             spin_unlock_bh(&hdev->fd_rule_lock);
6906             return -ENOSPC;
6907         }
6908 
6909         rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
6910         if (!rule) {
6911             spin_unlock_bh(&hdev->fd_rule_lock);
6912             return -ENOMEM;
6913         }
6914 
6915         rule->location = bit_id;
6916         rule->arfs.flow_id = flow_id;
6917         rule->queue_id = queue_id;
6918         hclge_fd_build_arfs_rule(&new_tuples, rule);
6919         hclge_update_fd_list(hdev, rule->state, rule->location, rule);
6920         hdev->fd_active_type = HCLGE_FD_ARFS_ACTIVE;
6921     } else if (rule->queue_id != queue_id) {
6922         rule->queue_id = queue_id;
6923         rule->state = HCLGE_FD_TO_ADD;
6924         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6925         hclge_task_schedule(hdev, 0);
6926     }
6927     spin_unlock_bh(&hdev->fd_rule_lock);
6928     return rule->location;
6929 }
6930 
6931 static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
6932 {
6933 #ifdef CONFIG_RFS_ACCEL
6934     struct hnae3_handle *handle = &hdev->vport[0].nic;
6935     struct hclge_fd_rule *rule;
6936     struct hlist_node *node;
6937 
6938     spin_lock_bh(&hdev->fd_rule_lock);
6939     if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
6940         spin_unlock_bh(&hdev->fd_rule_lock);
6941         return;
6942     }
6943     hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6944         if (rule->state != HCLGE_FD_ACTIVE)
6945             continue;
6946         if (rps_may_expire_flow(handle->netdev, rule->queue_id,
6947                     rule->arfs.flow_id, rule->location)) {
6948             rule->state = HCLGE_FD_TO_DEL;
6949             set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
6950         }
6951     }
6952     spin_unlock_bh(&hdev->fd_rule_lock);
6953 #endif
6954 }
6955 
6956 /* make sure being called after lock up with fd_rule_lock */
6957 static int hclge_clear_arfs_rules(struct hclge_dev *hdev)
6958 {
6959 #ifdef CONFIG_RFS_ACCEL
6960     struct hclge_fd_rule *rule;
6961     struct hlist_node *node;
6962     int ret;
6963 
6964     if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE)
6965         return 0;
6966 
6967     hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
6968         switch (rule->state) {
6969         case HCLGE_FD_TO_DEL:
6970         case HCLGE_FD_ACTIVE:
6971             ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
6972                            rule->location, NULL, false);
6973             if (ret)
6974                 return ret;
6975             fallthrough;
6976         case HCLGE_FD_TO_ADD:
6977             hclge_fd_dec_rule_cnt(hdev, rule->location);
6978             hlist_del(&rule->rule_node);
6979             kfree(rule);
6980             break;
6981         default:
6982             break;
6983         }
6984     }
6985     hclge_sync_fd_state(hdev);
6986 
6987 #endif
6988     return 0;
6989 }
6990 
6991 static void hclge_get_cls_key_basic(const struct flow_rule *flow,
6992                     struct hclge_fd_rule *rule)
6993 {
6994     if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_BASIC)) {
6995         struct flow_match_basic match;
6996         u16 ethtype_key, ethtype_mask;
6997 
6998         flow_rule_match_basic(flow, &match);
6999         ethtype_key = ntohs(match.key->n_proto);
7000         ethtype_mask = ntohs(match.mask->n_proto);
7001 
7002         if (ethtype_key == ETH_P_ALL) {
7003             ethtype_key = 0;
7004             ethtype_mask = 0;
7005         }
7006         rule->tuples.ether_proto = ethtype_key;
7007         rule->tuples_mask.ether_proto = ethtype_mask;
7008         rule->tuples.ip_proto = match.key->ip_proto;
7009         rule->tuples_mask.ip_proto = match.mask->ip_proto;
7010     } else {
7011         rule->unused_tuple |= BIT(INNER_IP_PROTO);
7012         rule->unused_tuple |= BIT(INNER_ETH_TYPE);
7013     }
7014 }
7015 
7016 static void hclge_get_cls_key_mac(const struct flow_rule *flow,
7017                   struct hclge_fd_rule *rule)
7018 {
7019     if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
7020         struct flow_match_eth_addrs match;
7021 
7022         flow_rule_match_eth_addrs(flow, &match);
7023         ether_addr_copy(rule->tuples.dst_mac, match.key->dst);
7024         ether_addr_copy(rule->tuples_mask.dst_mac, match.mask->dst);
7025         ether_addr_copy(rule->tuples.src_mac, match.key->src);
7026         ether_addr_copy(rule->tuples_mask.src_mac, match.mask->src);
7027     } else {
7028         rule->unused_tuple |= BIT(INNER_DST_MAC);
7029         rule->unused_tuple |= BIT(INNER_SRC_MAC);
7030     }
7031 }
7032 
7033 static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
7034                    struct hclge_fd_rule *rule)
7035 {
7036     if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_VLAN)) {
7037         struct flow_match_vlan match;
7038 
7039         flow_rule_match_vlan(flow, &match);
7040         rule->tuples.vlan_tag1 = match.key->vlan_id |
7041                 (match.key->vlan_priority << VLAN_PRIO_SHIFT);
7042         rule->tuples_mask.vlan_tag1 = match.mask->vlan_id |
7043                 (match.mask->vlan_priority << VLAN_PRIO_SHIFT);
7044     } else {
7045         rule->unused_tuple |= BIT(INNER_VLAN_TAG_FST);
7046     }
7047 }
7048 
7049 static void hclge_get_cls_key_ip(const struct flow_rule *flow,
7050                  struct hclge_fd_rule *rule)
7051 {
7052     u16 addr_type = 0;
7053 
7054     if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_CONTROL)) {
7055         struct flow_match_control match;
7056 
7057         flow_rule_match_control(flow, &match);
7058         addr_type = match.key->addr_type;
7059     }
7060 
7061     if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
7062         struct flow_match_ipv4_addrs match;
7063 
7064         flow_rule_match_ipv4_addrs(flow, &match);
7065         rule->tuples.src_ip[IPV4_INDEX] = be32_to_cpu(match.key->src);
7066         rule->tuples_mask.src_ip[IPV4_INDEX] =
7067                         be32_to_cpu(match.mask->src);
7068         rule->tuples.dst_ip[IPV4_INDEX] = be32_to_cpu(match.key->dst);
7069         rule->tuples_mask.dst_ip[IPV4_INDEX] =
7070                         be32_to_cpu(match.mask->dst);
7071     } else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
7072         struct flow_match_ipv6_addrs match;
7073 
7074         flow_rule_match_ipv6_addrs(flow, &match);
7075         be32_to_cpu_array(rule->tuples.src_ip, match.key->src.s6_addr32,
7076                   IPV6_SIZE);
7077         be32_to_cpu_array(rule->tuples_mask.src_ip,
7078                   match.mask->src.s6_addr32, IPV6_SIZE);
7079         be32_to_cpu_array(rule->tuples.dst_ip, match.key->dst.s6_addr32,
7080                   IPV6_SIZE);
7081         be32_to_cpu_array(rule->tuples_mask.dst_ip,
7082                   match.mask->dst.s6_addr32, IPV6_SIZE);
7083     } else {
7084         rule->unused_tuple |= BIT(INNER_SRC_IP);
7085         rule->unused_tuple |= BIT(INNER_DST_IP);
7086     }
7087 }
7088 
7089 static void hclge_get_cls_key_port(const struct flow_rule *flow,
7090                    struct hclge_fd_rule *rule)
7091 {
7092     if (flow_rule_match_key(flow, FLOW_DISSECTOR_KEY_PORTS)) {
7093         struct flow_match_ports match;
7094 
7095         flow_rule_match_ports(flow, &match);
7096 
7097         rule->tuples.src_port = be16_to_cpu(match.key->src);
7098         rule->tuples_mask.src_port = be16_to_cpu(match.mask->src);
7099         rule->tuples.dst_port = be16_to_cpu(match.key->dst);
7100         rule->tuples_mask.dst_port = be16_to_cpu(match.mask->dst);
7101     } else {
7102         rule->unused_tuple |= BIT(INNER_SRC_PORT);
7103         rule->unused_tuple |= BIT(INNER_DST_PORT);
7104     }
7105 }
7106 
7107 static int hclge_parse_cls_flower(struct hclge_dev *hdev,
7108                   struct flow_cls_offload *cls_flower,
7109                   struct hclge_fd_rule *rule)
7110 {
7111     struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
7112     struct flow_dissector *dissector = flow->match.dissector;
7113 
7114     if (dissector->used_keys &
7115         ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
7116           BIT(FLOW_DISSECTOR_KEY_BASIC) |
7117           BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
7118           BIT(FLOW_DISSECTOR_KEY_VLAN) |
7119           BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
7120           BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
7121           BIT(FLOW_DISSECTOR_KEY_PORTS))) {
7122         dev_err(&hdev->pdev->dev, "unsupported key set: %#x\n",
7123             dissector->used_keys);
7124         return -EOPNOTSUPP;
7125     }
7126 
7127     hclge_get_cls_key_basic(flow, rule);
7128     hclge_get_cls_key_mac(flow, rule);
7129     hclge_get_cls_key_vlan(flow, rule);
7130     hclge_get_cls_key_ip(flow, rule);
7131     hclge_get_cls_key_port(flow, rule);
7132 
7133     return 0;
7134 }
7135 
7136 static int hclge_check_cls_flower(struct hclge_dev *hdev,
7137                   struct flow_cls_offload *cls_flower, int tc)
7138 {
7139     u32 prio = cls_flower->common.prio;
7140 
7141     if (tc < 0 || tc > hdev->tc_max) {
7142         dev_err(&hdev->pdev->dev, "invalid traffic class\n");
7143         return -EINVAL;
7144     }
7145 
7146     if (prio == 0 ||
7147         prio > hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
7148         dev_err(&hdev->pdev->dev,
7149             "prio %u should be in range[1, %u]\n",
7150             prio, hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
7151         return -EINVAL;
7152     }
7153 
7154     if (test_bit(prio - 1, hdev->fd_bmap)) {
7155         dev_err(&hdev->pdev->dev, "prio %u is already used\n", prio);
7156         return -EINVAL;
7157     }
7158     return 0;
7159 }
7160 
7161 static int hclge_add_cls_flower(struct hnae3_handle *handle,
7162                 struct flow_cls_offload *cls_flower,
7163                 int tc)
7164 {
7165     struct hclge_vport *vport = hclge_get_vport(handle);
7166     struct hclge_dev *hdev = vport->back;
7167     struct hclge_fd_rule *rule;
7168     int ret;
7169 
7170     ret = hclge_check_cls_flower(hdev, cls_flower, tc);
7171     if (ret) {
7172         dev_err(&hdev->pdev->dev,
7173             "failed to check cls flower params, ret = %d\n", ret);
7174         return ret;
7175     }
7176 
7177     rule = kzalloc(sizeof(*rule), GFP_KERNEL);
7178     if (!rule)
7179         return -ENOMEM;
7180 
7181     ret = hclge_parse_cls_flower(hdev, cls_flower, rule);
7182     if (ret) {
7183         kfree(rule);
7184         return ret;
7185     }
7186 
7187     rule->action = HCLGE_FD_ACTION_SELECT_TC;
7188     rule->cls_flower.tc = tc;
7189     rule->location = cls_flower->common.prio - 1;
7190     rule->vf_id = 0;
7191     rule->cls_flower.cookie = cls_flower->cookie;
7192     rule->rule_type = HCLGE_FD_TC_FLOWER_ACTIVE;
7193 
7194     ret = hclge_add_fd_entry_common(hdev, rule);
7195     if (ret)
7196         kfree(rule);
7197 
7198     return ret;
7199 }
7200 
7201 static struct hclge_fd_rule *hclge_find_cls_flower(struct hclge_dev *hdev,
7202                            unsigned long cookie)
7203 {
7204     struct hclge_fd_rule *rule;
7205     struct hlist_node *node;
7206 
7207     hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
7208         if (rule->cls_flower.cookie == cookie)
7209             return rule;
7210     }
7211 
7212     return NULL;
7213 }
7214 
7215 static int hclge_del_cls_flower(struct hnae3_handle *handle,
7216                 struct flow_cls_offload *cls_flower)
7217 {
7218     struct hclge_vport *vport = hclge_get_vport(handle);
7219     struct hclge_dev *hdev = vport->back;
7220     struct hclge_fd_rule *rule;
7221     int ret;
7222 
7223     spin_lock_bh(&hdev->fd_rule_lock);
7224 
7225     rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
7226     if (!rule) {
7227         spin_unlock_bh(&hdev->fd_rule_lock);
7228         return -EINVAL;
7229     }
7230 
7231     ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
7232                    NULL, false);
7233     if (ret) {
7234         spin_unlock_bh(&hdev->fd_rule_lock);
7235         return ret;
7236     }
7237 
7238     hclge_update_fd_list(hdev, HCLGE_FD_DELETED, rule->location, NULL);
7239     spin_unlock_bh(&hdev->fd_rule_lock);
7240 
7241     return 0;
7242 }
7243 
7244 static void hclge_sync_fd_list(struct hclge_dev *hdev, struct hlist_head *hlist)
7245 {
7246     struct hclge_fd_rule *rule;
7247     struct hlist_node *node;
7248     int ret = 0;
7249 
7250     if (!test_and_clear_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state))
7251         return;
7252 
7253     spin_lock_bh(&hdev->fd_rule_lock);
7254 
7255     hlist_for_each_entry_safe(rule, node, hlist, rule_node) {
7256         switch (rule->state) {
7257         case HCLGE_FD_TO_ADD:
7258             ret = hclge_fd_config_rule(hdev, rule);
7259             if (ret)
7260                 goto out;
7261             rule->state = HCLGE_FD_ACTIVE;
7262             break;
7263         case HCLGE_FD_TO_DEL:
7264             ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
7265                            rule->location, NULL, false);
7266             if (ret)
7267                 goto out;
7268             hclge_fd_dec_rule_cnt(hdev, rule->location);
7269             hclge_fd_free_node(hdev, rule);
7270             break;
7271         default:
7272             break;
7273         }
7274     }
7275 
7276 out:
7277     if (ret)
7278         set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
7279 
7280     spin_unlock_bh(&hdev->fd_rule_lock);
7281 }
7282 
7283 static void hclge_sync_fd_table(struct hclge_dev *hdev)
7284 {
7285     if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
7286         bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
7287 
7288         hclge_clear_fd_rules_in_list(hdev, clear_list);
7289     }
7290 
7291     hclge_sync_fd_user_def_cfg(hdev, false);
7292 
7293     hclge_sync_fd_list(hdev, &hdev->fd_rule_list);
7294 }
7295 
7296 static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
7297 {
7298     struct hclge_vport *vport = hclge_get_vport(handle);
7299     struct hclge_dev *hdev = vport->back;
7300 
7301     return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
7302            hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
7303 }
7304 
7305 static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
7306 {
7307     struct hclge_vport *vport = hclge_get_vport(handle);
7308     struct hclge_dev *hdev = vport->back;
7309 
7310     return test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
7311 }
7312 
7313 static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
7314 {
7315     struct hclge_vport *vport = hclge_get_vport(handle);
7316     struct hclge_dev *hdev = vport->back;
7317 
7318     return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
7319 }
7320 
7321 static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
7322 {
7323     struct hclge_vport *vport = hclge_get_vport(handle);
7324     struct hclge_dev *hdev = vport->back;
7325 
7326     return hdev->rst_stats.hw_reset_done_cnt;
7327 }
7328 
7329 static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
7330 {
7331     struct hclge_vport *vport = hclge_get_vport(handle);
7332     struct hclge_dev *hdev = vport->back;
7333 
7334     hdev->fd_en = enable;
7335 
7336     if (!enable)
7337         set_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state);
7338     else
7339         hclge_restore_fd_entries(handle);
7340 
7341     hclge_task_schedule(hdev, 0);
7342 }
7343 
7344 static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
7345 {
7346     struct hclge_desc desc;
7347     struct hclge_config_mac_mode_cmd *req =
7348         (struct hclge_config_mac_mode_cmd *)desc.data;
7349     u32 loop_en = 0;
7350     int ret;
7351 
7352     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
7353 
7354     if (enable) {
7355         hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
7356         hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
7357         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
7358         hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
7359         hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
7360         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
7361         hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
7362         hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
7363         hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
7364         hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
7365     }
7366 
7367     req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7368 
7369     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7370     if (ret)
7371         dev_err(&hdev->pdev->dev,
7372             "mac enable fail, ret =%d.\n", ret);
7373 }
7374 
7375 static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
7376                      u8 switch_param, u8 param_mask)
7377 {
7378     struct hclge_mac_vlan_switch_cmd *req;
7379     struct hclge_desc desc;
7380     u32 func_id;
7381     int ret;
7382 
7383     func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
7384     req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
7385 
7386     /* read current config parameter */
7387     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
7388                    true);
7389     req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
7390     req->func_id = cpu_to_le32(func_id);
7391 
7392     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7393     if (ret) {
7394         dev_err(&hdev->pdev->dev,
7395             "read mac vlan switch parameter fail, ret = %d\n", ret);
7396         return ret;
7397     }
7398 
7399     /* modify and write new config parameter */
7400     hclge_comm_cmd_reuse_desc(&desc, false);
7401     req->switch_param = (req->switch_param & param_mask) | switch_param;
7402     req->param_mask = param_mask;
7403 
7404     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7405     if (ret)
7406         dev_err(&hdev->pdev->dev,
7407             "set mac vlan switch parameter fail, ret = %d\n", ret);
7408     return ret;
7409 }
7410 
7411 static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
7412                        int link_ret)
7413 {
7414 #define HCLGE_PHY_LINK_STATUS_NUM  200
7415 
7416     struct phy_device *phydev = hdev->hw.mac.phydev;
7417     int i = 0;
7418     int ret;
7419 
7420     do {
7421         ret = phy_read_status(phydev);
7422         if (ret) {
7423             dev_err(&hdev->pdev->dev,
7424                 "phy update link status fail, ret = %d\n", ret);
7425             return;
7426         }
7427 
7428         if (phydev->link == link_ret)
7429             break;
7430 
7431         msleep(HCLGE_LINK_STATUS_MS);
7432     } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
7433 }
7434 
7435 static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
7436 {
7437 #define HCLGE_MAC_LINK_STATUS_NUM  100
7438 
7439     int link_status;
7440     int i = 0;
7441     int ret;
7442 
7443     do {
7444         ret = hclge_get_mac_link_status(hdev, &link_status);
7445         if (ret)
7446             return ret;
7447         if (link_status == link_ret)
7448             return 0;
7449 
7450         msleep(HCLGE_LINK_STATUS_MS);
7451     } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
7452     return -EBUSY;
7453 }
7454 
7455 static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
7456                       bool is_phy)
7457 {
7458     int link_ret;
7459 
7460     link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
7461 
7462     if (is_phy)
7463         hclge_phy_link_status_wait(hdev, link_ret);
7464 
7465     return hclge_mac_link_status_wait(hdev, link_ret);
7466 }
7467 
7468 static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
7469 {
7470     struct hclge_config_mac_mode_cmd *req;
7471     struct hclge_desc desc;
7472     u32 loop_en;
7473     int ret;
7474 
7475     req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
7476     /* 1 Read out the MAC mode config at first */
7477     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
7478     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7479     if (ret) {
7480         dev_err(&hdev->pdev->dev,
7481             "mac loopback get fail, ret =%d.\n", ret);
7482         return ret;
7483     }
7484 
7485     /* 2 Then setup the loopback flag */
7486     loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
7487     hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
7488 
7489     req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
7490 
7491     /* 3 Config mac work mode with loopback flag
7492      * and its original configure parameters
7493      */
7494     hclge_comm_cmd_reuse_desc(&desc, false);
7495     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7496     if (ret)
7497         dev_err(&hdev->pdev->dev,
7498             "mac loopback set fail, ret =%d.\n", ret);
7499     return ret;
7500 }
7501 
7502 static int hclge_cfg_common_loopback_cmd_send(struct hclge_dev *hdev, bool en,
7503                           enum hnae3_loop loop_mode)
7504 {
7505     struct hclge_common_lb_cmd *req;
7506     struct hclge_desc desc;
7507     u8 loop_mode_b;
7508     int ret;
7509 
7510     req = (struct hclge_common_lb_cmd *)desc.data;
7511     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK, false);
7512 
7513     switch (loop_mode) {
7514     case HNAE3_LOOP_SERIAL_SERDES:
7515         loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
7516         break;
7517     case HNAE3_LOOP_PARALLEL_SERDES:
7518         loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
7519         break;
7520     case HNAE3_LOOP_PHY:
7521         loop_mode_b = HCLGE_CMD_GE_PHY_INNER_LOOP_B;
7522         break;
7523     default:
7524         dev_err(&hdev->pdev->dev,
7525             "unsupported loopback mode %d\n", loop_mode);
7526         return -ENOTSUPP;
7527     }
7528 
7529     req->mask = loop_mode_b;
7530     if (en)
7531         req->enable = loop_mode_b;
7532 
7533     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7534     if (ret)
7535         dev_err(&hdev->pdev->dev,
7536             "failed to send loopback cmd, loop_mode = %d, ret = %d\n",
7537             loop_mode, ret);
7538 
7539     return ret;
7540 }
7541 
7542 static int hclge_cfg_common_loopback_wait(struct hclge_dev *hdev)
7543 {
7544 #define HCLGE_COMMON_LB_RETRY_MS    10
7545 #define HCLGE_COMMON_LB_RETRY_NUM   100
7546 
7547     struct hclge_common_lb_cmd *req;
7548     struct hclge_desc desc;
7549     u32 i = 0;
7550     int ret;
7551 
7552     req = (struct hclge_common_lb_cmd *)desc.data;
7553 
7554     do {
7555         msleep(HCLGE_COMMON_LB_RETRY_MS);
7556         hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_COMMON_LOOPBACK,
7557                        true);
7558         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
7559         if (ret) {
7560             dev_err(&hdev->pdev->dev,
7561                 "failed to get loopback done status, ret = %d\n",
7562                 ret);
7563             return ret;
7564         }
7565     } while (++i < HCLGE_COMMON_LB_RETRY_NUM &&
7566          !(req->result & HCLGE_CMD_COMMON_LB_DONE_B));
7567 
7568     if (!(req->result & HCLGE_CMD_COMMON_LB_DONE_B)) {
7569         dev_err(&hdev->pdev->dev, "wait loopback timeout\n");
7570         return -EBUSY;
7571     } else if (!(req->result & HCLGE_CMD_COMMON_LB_SUCCESS_B)) {
7572         dev_err(&hdev->pdev->dev, "failed to do loopback test\n");
7573         return -EIO;
7574     }
7575 
7576     return 0;
7577 }
7578 
7579 static int hclge_cfg_common_loopback(struct hclge_dev *hdev, bool en,
7580                      enum hnae3_loop loop_mode)
7581 {
7582     int ret;
7583 
7584     ret = hclge_cfg_common_loopback_cmd_send(hdev, en, loop_mode);
7585     if (ret)
7586         return ret;
7587 
7588     return hclge_cfg_common_loopback_wait(hdev);
7589 }
7590 
7591 static int hclge_set_common_loopback(struct hclge_dev *hdev, bool en,
7592                      enum hnae3_loop loop_mode)
7593 {
7594     int ret;
7595 
7596     ret = hclge_cfg_common_loopback(hdev, en, loop_mode);
7597     if (ret)
7598         return ret;
7599 
7600     hclge_cfg_mac_mode(hdev, en);
7601 
7602     ret = hclge_mac_phy_link_status_wait(hdev, en, false);
7603     if (ret)
7604         dev_err(&hdev->pdev->dev,
7605             "serdes loopback config mac mode timeout\n");
7606 
7607     return ret;
7608 }
7609 
7610 static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
7611                      struct phy_device *phydev)
7612 {
7613     int ret;
7614 
7615     if (!phydev->suspended) {
7616         ret = phy_suspend(phydev);
7617         if (ret)
7618             return ret;
7619     }
7620 
7621     ret = phy_resume(phydev);
7622     if (ret)
7623         return ret;
7624 
7625     return phy_loopback(phydev, true);
7626 }
7627 
7628 static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
7629                       struct phy_device *phydev)
7630 {
7631     int ret;
7632 
7633     ret = phy_loopback(phydev, false);
7634     if (ret)
7635         return ret;
7636 
7637     return phy_suspend(phydev);
7638 }
7639 
7640 static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
7641 {
7642     struct phy_device *phydev = hdev->hw.mac.phydev;
7643     int ret;
7644 
7645     if (!phydev) {
7646         if (hnae3_dev_phy_imp_supported(hdev))
7647             return hclge_set_common_loopback(hdev, en,
7648                              HNAE3_LOOP_PHY);
7649         return -ENOTSUPP;
7650     }
7651 
7652     if (en)
7653         ret = hclge_enable_phy_loopback(hdev, phydev);
7654     else
7655         ret = hclge_disable_phy_loopback(hdev, phydev);
7656     if (ret) {
7657         dev_err(&hdev->pdev->dev,
7658             "set phy loopback fail, ret = %d\n", ret);
7659         return ret;
7660     }
7661 
7662     hclge_cfg_mac_mode(hdev, en);
7663 
7664     ret = hclge_mac_phy_link_status_wait(hdev, en, true);
7665     if (ret)
7666         dev_err(&hdev->pdev->dev,
7667             "phy loopback config mac mode timeout\n");
7668 
7669     return ret;
7670 }
7671 
7672 static int hclge_tqp_enable_cmd_send(struct hclge_dev *hdev, u16 tqp_id,
7673                      u16 stream_id, bool enable)
7674 {
7675     struct hclge_desc desc;
7676     struct hclge_cfg_com_tqp_queue_cmd *req =
7677         (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
7678 
7679     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
7680     req->tqp_id = cpu_to_le16(tqp_id);
7681     req->stream_id = cpu_to_le16(stream_id);
7682     if (enable)
7683         req->enable |= 1U << HCLGE_TQP_ENABLE_B;
7684 
7685     return hclge_cmd_send(&hdev->hw, &desc, 1);
7686 }
7687 
7688 static int hclge_tqp_enable(struct hnae3_handle *handle, bool enable)
7689 {
7690     struct hclge_vport *vport = hclge_get_vport(handle);
7691     struct hclge_dev *hdev = vport->back;
7692     int ret;
7693     u16 i;
7694 
7695     for (i = 0; i < handle->kinfo.num_tqps; i++) {
7696         ret = hclge_tqp_enable_cmd_send(hdev, i, 0, enable);
7697         if (ret)
7698             return ret;
7699     }
7700     return 0;
7701 }
7702 
7703 static int hclge_set_loopback(struct hnae3_handle *handle,
7704                   enum hnae3_loop loop_mode, bool en)
7705 {
7706     struct hclge_vport *vport = hclge_get_vport(handle);
7707     struct hclge_dev *hdev = vport->back;
7708     int ret;
7709 
7710     /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
7711      * default, SSU loopback is enabled, so if the SMAC and the DMAC are
7712      * the same, the packets are looped back in the SSU. If SSU loopback
7713      * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
7714      */
7715     if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
7716         u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
7717 
7718         ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
7719                         HCLGE_SWITCH_ALW_LPBK_MASK);
7720         if (ret)
7721             return ret;
7722     }
7723 
7724     switch (loop_mode) {
7725     case HNAE3_LOOP_APP:
7726         ret = hclge_set_app_loopback(hdev, en);
7727         break;
7728     case HNAE3_LOOP_SERIAL_SERDES:
7729     case HNAE3_LOOP_PARALLEL_SERDES:
7730         ret = hclge_set_common_loopback(hdev, en, loop_mode);
7731         break;
7732     case HNAE3_LOOP_PHY:
7733         ret = hclge_set_phy_loopback(hdev, en);
7734         break;
7735     default:
7736         ret = -ENOTSUPP;
7737         dev_err(&hdev->pdev->dev,
7738             "loop_mode %d is not supported\n", loop_mode);
7739         break;
7740     }
7741 
7742     if (ret)
7743         return ret;
7744 
7745     ret = hclge_tqp_enable(handle, en);
7746     if (ret)
7747         dev_err(&hdev->pdev->dev, "failed to %s tqp in loopback, ret = %d\n",
7748             en ? "enable" : "disable", ret);
7749 
7750     return ret;
7751 }
7752 
7753 static int hclge_set_default_loopback(struct hclge_dev *hdev)
7754 {
7755     int ret;
7756 
7757     ret = hclge_set_app_loopback(hdev, false);
7758     if (ret)
7759         return ret;
7760 
7761     ret = hclge_cfg_common_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
7762     if (ret)
7763         return ret;
7764 
7765     return hclge_cfg_common_loopback(hdev, false,
7766                      HNAE3_LOOP_PARALLEL_SERDES);
7767 }
7768 
7769 static void hclge_flush_link_update(struct hclge_dev *hdev)
7770 {
7771 #define HCLGE_FLUSH_LINK_TIMEOUT    100000
7772 
7773     unsigned long last = hdev->serv_processed_cnt;
7774     int i = 0;
7775 
7776     while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
7777            i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
7778            last == hdev->serv_processed_cnt)
7779         usleep_range(1, 1);
7780 }
7781 
7782 static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
7783 {
7784     struct hclge_vport *vport = hclge_get_vport(handle);
7785     struct hclge_dev *hdev = vport->back;
7786 
7787     if (enable) {
7788         hclge_task_schedule(hdev, 0);
7789     } else {
7790         /* Set the DOWN flag here to disable link updating */
7791         set_bit(HCLGE_STATE_DOWN, &hdev->state);
7792 
7793         /* flush memory to make sure DOWN is seen by service task */
7794         smp_mb__before_atomic();
7795         hclge_flush_link_update(hdev);
7796     }
7797 }
7798 
7799 static int hclge_ae_start(struct hnae3_handle *handle)
7800 {
7801     struct hclge_vport *vport = hclge_get_vport(handle);
7802     struct hclge_dev *hdev = vport->back;
7803 
7804     /* mac enable */
7805     hclge_cfg_mac_mode(hdev, true);
7806     clear_bit(HCLGE_STATE_DOWN, &hdev->state);
7807     hdev->hw.mac.link = 0;
7808 
7809     /* reset tqp stats */
7810     hclge_comm_reset_tqp_stats(handle);
7811 
7812     hclge_mac_start_phy(hdev);
7813 
7814     return 0;
7815 }
7816 
7817 static void hclge_ae_stop(struct hnae3_handle *handle)
7818 {
7819     struct hclge_vport *vport = hclge_get_vport(handle);
7820     struct hclge_dev *hdev = vport->back;
7821 
7822     set_bit(HCLGE_STATE_DOWN, &hdev->state);
7823     spin_lock_bh(&hdev->fd_rule_lock);
7824     hclge_clear_arfs_rules(hdev);
7825     spin_unlock_bh(&hdev->fd_rule_lock);
7826 
7827     /* If it is not PF reset or FLR, the firmware will disable the MAC,
7828      * so it only need to stop phy here.
7829      */
7830     if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) &&
7831         hdev->reset_type != HNAE3_FUNC_RESET &&
7832         hdev->reset_type != HNAE3_FLR_RESET) {
7833         hclge_mac_stop_phy(hdev);
7834         hclge_update_link_status(hdev);
7835         return;
7836     }
7837 
7838     hclge_reset_tqp(handle);
7839 
7840     hclge_config_mac_tnl_int(hdev, false);
7841 
7842     /* Mac disable */
7843     hclge_cfg_mac_mode(hdev, false);
7844 
7845     hclge_mac_stop_phy(hdev);
7846 
7847     /* reset tqp stats */
7848     hclge_comm_reset_tqp_stats(handle);
7849     hclge_update_link_status(hdev);
7850 }
7851 
7852 int hclge_vport_start(struct hclge_vport *vport)
7853 {
7854     struct hclge_dev *hdev = vport->back;
7855 
7856     set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7857     set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
7858     vport->last_active_jiffies = jiffies;
7859 
7860     if (test_bit(vport->vport_id, hdev->vport_config_block)) {
7861         if (vport->vport_id) {
7862             hclge_restore_mac_table_common(vport);
7863             hclge_restore_vport_vlan_table(vport);
7864         } else {
7865             hclge_restore_hw_table(hdev);
7866         }
7867     }
7868 
7869     clear_bit(vport->vport_id, hdev->vport_config_block);
7870 
7871     return 0;
7872 }
7873 
7874 void hclge_vport_stop(struct hclge_vport *vport)
7875 {
7876     clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
7877 }
7878 
7879 static int hclge_client_start(struct hnae3_handle *handle)
7880 {
7881     struct hclge_vport *vport = hclge_get_vport(handle);
7882 
7883     return hclge_vport_start(vport);
7884 }
7885 
7886 static void hclge_client_stop(struct hnae3_handle *handle)
7887 {
7888     struct hclge_vport *vport = hclge_get_vport(handle);
7889 
7890     hclge_vport_stop(vport);
7891 }
7892 
7893 static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
7894                      u16 cmdq_resp, u8  resp_code,
7895                      enum hclge_mac_vlan_tbl_opcode op)
7896 {
7897     struct hclge_dev *hdev = vport->back;
7898 
7899     if (cmdq_resp) {
7900         dev_err(&hdev->pdev->dev,
7901             "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
7902             cmdq_resp);
7903         return -EIO;
7904     }
7905 
7906     if (op == HCLGE_MAC_VLAN_ADD) {
7907         if (!resp_code || resp_code == 1)
7908             return 0;
7909         else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
7910              resp_code == HCLGE_ADD_MC_OVERFLOW)
7911             return -ENOSPC;
7912 
7913         dev_err(&hdev->pdev->dev,
7914             "add mac addr failed for undefined, code=%u.\n",
7915             resp_code);
7916         return -EIO;
7917     } else if (op == HCLGE_MAC_VLAN_REMOVE) {
7918         if (!resp_code) {
7919             return 0;
7920         } else if (resp_code == 1) {
7921             dev_dbg(&hdev->pdev->dev,
7922                 "remove mac addr failed for miss.\n");
7923             return -ENOENT;
7924         }
7925 
7926         dev_err(&hdev->pdev->dev,
7927             "remove mac addr failed for undefined, code=%u.\n",
7928             resp_code);
7929         return -EIO;
7930     } else if (op == HCLGE_MAC_VLAN_LKUP) {
7931         if (!resp_code) {
7932             return 0;
7933         } else if (resp_code == 1) {
7934             dev_dbg(&hdev->pdev->dev,
7935                 "lookup mac addr failed for miss.\n");
7936             return -ENOENT;
7937         }
7938 
7939         dev_err(&hdev->pdev->dev,
7940             "lookup mac addr failed for undefined, code=%u.\n",
7941             resp_code);
7942         return -EIO;
7943     }
7944 
7945     dev_err(&hdev->pdev->dev,
7946         "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
7947 
7948     return -EINVAL;
7949 }
7950 
7951 static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
7952 {
7953 #define HCLGE_VF_NUM_IN_FIRST_DESC 192
7954 
7955     unsigned int word_num;
7956     unsigned int bit_num;
7957 
7958     if (vfid > 255 || vfid < 0)
7959         return -EIO;
7960 
7961     if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
7962         word_num = vfid / 32;
7963         bit_num  = vfid % 32;
7964         if (clr)
7965             desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7966         else
7967             desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
7968     } else {
7969         word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
7970         bit_num  = vfid % 32;
7971         if (clr)
7972             desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
7973         else
7974             desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
7975     }
7976 
7977     return 0;
7978 }
7979 
7980 static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
7981 {
7982 #define HCLGE_DESC_NUMBER 3
7983 #define HCLGE_FUNC_NUMBER_PER_DESC 6
7984     int i, j;
7985 
7986     for (i = 1; i < HCLGE_DESC_NUMBER; i++)
7987         for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
7988             if (desc[i].data[j])
7989                 return false;
7990 
7991     return true;
7992 }
7993 
7994 static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
7995                    const u8 *addr, bool is_mc)
7996 {
7997     const unsigned char *mac_addr = addr;
7998     u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
7999                (mac_addr[0]) | (mac_addr[1] << 8);
8000     u32 low_val  = mac_addr[4] | (mac_addr[5] << 8);
8001 
8002     hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8003     if (is_mc) {
8004         hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
8005         hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
8006     }
8007 
8008     new_req->mac_addr_hi32 = cpu_to_le32(high_val);
8009     new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
8010 }
8011 
8012 static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
8013                      struct hclge_mac_vlan_tbl_entry_cmd *req)
8014 {
8015     struct hclge_dev *hdev = vport->back;
8016     struct hclge_desc desc;
8017     u8 resp_code;
8018     u16 retval;
8019     int ret;
8020 
8021     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
8022 
8023     memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8024 
8025     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8026     if (ret) {
8027         dev_err(&hdev->pdev->dev,
8028             "del mac addr failed for cmd_send, ret =%d.\n",
8029             ret);
8030         return ret;
8031     }
8032     resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8033     retval = le16_to_cpu(desc.retval);
8034 
8035     return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8036                          HCLGE_MAC_VLAN_REMOVE);
8037 }
8038 
8039 static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
8040                      struct hclge_mac_vlan_tbl_entry_cmd *req,
8041                      struct hclge_desc *desc,
8042                      bool is_mc)
8043 {
8044     struct hclge_dev *hdev = vport->back;
8045     u8 resp_code;
8046     u16 retval;
8047     int ret;
8048 
8049     hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
8050     if (is_mc) {
8051         desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8052         memcpy(desc[0].data,
8053                req,
8054                sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8055         hclge_cmd_setup_basic_desc(&desc[1],
8056                        HCLGE_OPC_MAC_VLAN_ADD,
8057                        true);
8058         desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8059         hclge_cmd_setup_basic_desc(&desc[2],
8060                        HCLGE_OPC_MAC_VLAN_ADD,
8061                        true);
8062         ret = hclge_cmd_send(&hdev->hw, desc, 3);
8063     } else {
8064         memcpy(desc[0].data,
8065                req,
8066                sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8067         ret = hclge_cmd_send(&hdev->hw, desc, 1);
8068     }
8069     if (ret) {
8070         dev_err(&hdev->pdev->dev,
8071             "lookup mac addr failed for cmd_send, ret =%d.\n",
8072             ret);
8073         return ret;
8074     }
8075     resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
8076     retval = le16_to_cpu(desc[0].retval);
8077 
8078     return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
8079                          HCLGE_MAC_VLAN_LKUP);
8080 }
8081 
8082 static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
8083                   struct hclge_mac_vlan_tbl_entry_cmd *req,
8084                   struct hclge_desc *mc_desc)
8085 {
8086     struct hclge_dev *hdev = vport->back;
8087     int cfg_status;
8088     u8 resp_code;
8089     u16 retval;
8090     int ret;
8091 
8092     if (!mc_desc) {
8093         struct hclge_desc desc;
8094 
8095         hclge_cmd_setup_basic_desc(&desc,
8096                        HCLGE_OPC_MAC_VLAN_ADD,
8097                        false);
8098         memcpy(desc.data, req,
8099                sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8100         ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8101         resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
8102         retval = le16_to_cpu(desc.retval);
8103 
8104         cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8105                                resp_code,
8106                                HCLGE_MAC_VLAN_ADD);
8107     } else {
8108         hclge_comm_cmd_reuse_desc(&mc_desc[0], false);
8109         mc_desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8110         hclge_comm_cmd_reuse_desc(&mc_desc[1], false);
8111         mc_desc[1].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
8112         hclge_comm_cmd_reuse_desc(&mc_desc[2], false);
8113         mc_desc[2].flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_NEXT);
8114         memcpy(mc_desc[0].data, req,
8115                sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
8116         ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
8117         resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
8118         retval = le16_to_cpu(mc_desc[0].retval);
8119 
8120         cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
8121                                resp_code,
8122                                HCLGE_MAC_VLAN_ADD);
8123     }
8124 
8125     if (ret) {
8126         dev_err(&hdev->pdev->dev,
8127             "add mac addr failed for cmd_send, ret =%d.\n",
8128             ret);
8129         return ret;
8130     }
8131 
8132     return cfg_status;
8133 }
8134 
8135 static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
8136                    u16 *allocated_size)
8137 {
8138     struct hclge_umv_spc_alc_cmd *req;
8139     struct hclge_desc desc;
8140     int ret;
8141 
8142     req = (struct hclge_umv_spc_alc_cmd *)desc.data;
8143     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
8144 
8145     req->space_size = cpu_to_le32(space_size);
8146 
8147     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
8148     if (ret) {
8149         dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
8150             ret);
8151         return ret;
8152     }
8153 
8154     *allocated_size = le32_to_cpu(desc.data[1]);
8155 
8156     return 0;
8157 }
8158 
8159 static int hclge_init_umv_space(struct hclge_dev *hdev)
8160 {
8161     u16 allocated_size = 0;
8162     int ret;
8163 
8164     ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
8165     if (ret)
8166         return ret;
8167 
8168     if (allocated_size < hdev->wanted_umv_size)
8169         dev_warn(&hdev->pdev->dev,
8170              "failed to alloc umv space, want %u, get %u\n",
8171              hdev->wanted_umv_size, allocated_size);
8172 
8173     hdev->max_umv_size = allocated_size;
8174     hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
8175     hdev->share_umv_size = hdev->priv_umv_size +
8176             hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8177 
8178     if (hdev->ae_dev->dev_specs.mc_mac_size)
8179         set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
8180 
8181     return 0;
8182 }
8183 
8184 static void hclge_reset_umv_space(struct hclge_dev *hdev)
8185 {
8186     struct hclge_vport *vport;
8187     int i;
8188 
8189     for (i = 0; i < hdev->num_alloc_vport; i++) {
8190         vport = &hdev->vport[i];
8191         vport->used_umv_num = 0;
8192     }
8193 
8194     mutex_lock(&hdev->vport_lock);
8195     hdev->share_umv_size = hdev->priv_umv_size +
8196             hdev->max_umv_size % (hdev->num_alloc_vport + 1);
8197     mutex_unlock(&hdev->vport_lock);
8198 
8199     hdev->used_mc_mac_num = 0;
8200 }
8201 
8202 static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
8203 {
8204     struct hclge_dev *hdev = vport->back;
8205     bool is_full;
8206 
8207     if (need_lock)
8208         mutex_lock(&hdev->vport_lock);
8209 
8210     is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
8211            hdev->share_umv_size == 0);
8212 
8213     if (need_lock)
8214         mutex_unlock(&hdev->vport_lock);
8215 
8216     return is_full;
8217 }
8218 
8219 static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
8220 {
8221     struct hclge_dev *hdev = vport->back;
8222 
8223     if (is_free) {
8224         if (vport->used_umv_num > hdev->priv_umv_size)
8225             hdev->share_umv_size++;
8226 
8227         if (vport->used_umv_num > 0)
8228             vport->used_umv_num--;
8229     } else {
8230         if (vport->used_umv_num >= hdev->priv_umv_size &&
8231             hdev->share_umv_size > 0)
8232             hdev->share_umv_size--;
8233         vport->used_umv_num++;
8234     }
8235 }
8236 
8237 static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
8238                           const u8 *mac_addr)
8239 {
8240     struct hclge_mac_node *mac_node, *tmp;
8241 
8242     list_for_each_entry_safe(mac_node, tmp, list, node)
8243         if (ether_addr_equal(mac_addr, mac_node->mac_addr))
8244             return mac_node;
8245 
8246     return NULL;
8247 }
8248 
8249 static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
8250                   enum HCLGE_MAC_NODE_STATE state)
8251 {
8252     switch (state) {
8253     /* from set_rx_mode or tmp_add_list */
8254     case HCLGE_MAC_TO_ADD:
8255         if (mac_node->state == HCLGE_MAC_TO_DEL)
8256             mac_node->state = HCLGE_MAC_ACTIVE;
8257         break;
8258     /* only from set_rx_mode */
8259     case HCLGE_MAC_TO_DEL:
8260         if (mac_node->state == HCLGE_MAC_TO_ADD) {
8261             list_del(&mac_node->node);
8262             kfree(mac_node);
8263         } else {
8264             mac_node->state = HCLGE_MAC_TO_DEL;
8265         }
8266         break;
8267     /* only from tmp_add_list, the mac_node->state won't be
8268      * ACTIVE.
8269      */
8270     case HCLGE_MAC_ACTIVE:
8271         if (mac_node->state == HCLGE_MAC_TO_ADD)
8272             mac_node->state = HCLGE_MAC_ACTIVE;
8273 
8274         break;
8275     }
8276 }
8277 
8278 int hclge_update_mac_list(struct hclge_vport *vport,
8279               enum HCLGE_MAC_NODE_STATE state,
8280               enum HCLGE_MAC_ADDR_TYPE mac_type,
8281               const unsigned char *addr)
8282 {
8283     char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8284     struct hclge_dev *hdev = vport->back;
8285     struct hclge_mac_node *mac_node;
8286     struct list_head *list;
8287 
8288     list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8289         &vport->uc_mac_list : &vport->mc_mac_list;
8290 
8291     spin_lock_bh(&vport->mac_list_lock);
8292 
8293     /* if the mac addr is already in the mac list, no need to add a new
8294      * one into it, just check the mac addr state, convert it to a new
8295      * state, or just remove it, or do nothing.
8296      */
8297     mac_node = hclge_find_mac_node(list, addr);
8298     if (mac_node) {
8299         hclge_update_mac_node(mac_node, state);
8300         spin_unlock_bh(&vport->mac_list_lock);
8301         set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8302         return 0;
8303     }
8304 
8305     /* if this address is never added, unnecessary to delete */
8306     if (state == HCLGE_MAC_TO_DEL) {
8307         spin_unlock_bh(&vport->mac_list_lock);
8308         hnae3_format_mac_addr(format_mac_addr, addr);
8309         dev_err(&hdev->pdev->dev,
8310             "failed to delete address %s from mac list\n",
8311             format_mac_addr);
8312         return -ENOENT;
8313     }
8314 
8315     mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
8316     if (!mac_node) {
8317         spin_unlock_bh(&vport->mac_list_lock);
8318         return -ENOMEM;
8319     }
8320 
8321     set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
8322 
8323     mac_node->state = state;
8324     ether_addr_copy(mac_node->mac_addr, addr);
8325     list_add_tail(&mac_node->node, list);
8326 
8327     spin_unlock_bh(&vport->mac_list_lock);
8328 
8329     return 0;
8330 }
8331 
8332 static int hclge_add_uc_addr(struct hnae3_handle *handle,
8333                  const unsigned char *addr)
8334 {
8335     struct hclge_vport *vport = hclge_get_vport(handle);
8336 
8337     return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
8338                      addr);
8339 }
8340 
8341 int hclge_add_uc_addr_common(struct hclge_vport *vport,
8342                  const unsigned char *addr)
8343 {
8344     char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8345     struct hclge_dev *hdev = vport->back;
8346     struct hclge_mac_vlan_tbl_entry_cmd req;
8347     struct hclge_desc desc;
8348     u16 egress_port = 0;
8349     int ret;
8350 
8351     /* mac addr check */
8352     if (is_zero_ether_addr(addr) ||
8353         is_broadcast_ether_addr(addr) ||
8354         is_multicast_ether_addr(addr)) {
8355         hnae3_format_mac_addr(format_mac_addr, addr);
8356         dev_err(&hdev->pdev->dev,
8357             "Set_uc mac err! invalid mac:%s. is_zero:%d,is_br=%d,is_mul=%d\n",
8358              format_mac_addr, is_zero_ether_addr(addr),
8359              is_broadcast_ether_addr(addr),
8360              is_multicast_ether_addr(addr));
8361         return -EINVAL;
8362     }
8363 
8364     memset(&req, 0, sizeof(req));
8365 
8366     hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
8367             HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
8368 
8369     req.egress_port = cpu_to_le16(egress_port);
8370 
8371     hclge_prepare_mac_addr(&req, addr, false);
8372 
8373     /* Lookup the mac address in the mac_vlan table, and add
8374      * it if the entry is inexistent. Repeated unicast entry
8375      * is not allowed in the mac vlan table.
8376      */
8377     ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
8378     if (ret == -ENOENT) {
8379         mutex_lock(&hdev->vport_lock);
8380         if (!hclge_is_umv_space_full(vport, false)) {
8381             ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
8382             if (!ret)
8383                 hclge_update_umv_space(vport, false);
8384             mutex_unlock(&hdev->vport_lock);
8385             return ret;
8386         }
8387         mutex_unlock(&hdev->vport_lock);
8388 
8389         if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
8390             dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
8391                 hdev->priv_umv_size);
8392 
8393         return -ENOSPC;
8394     }
8395 
8396     /* check if we just hit the duplicate */
8397     if (!ret)
8398         return -EEXIST;
8399 
8400     return ret;
8401 }
8402 
8403 static int hclge_rm_uc_addr(struct hnae3_handle *handle,
8404                 const unsigned char *addr)
8405 {
8406     struct hclge_vport *vport = hclge_get_vport(handle);
8407 
8408     return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
8409                      addr);
8410 }
8411 
8412 int hclge_rm_uc_addr_common(struct hclge_vport *vport,
8413                 const unsigned char *addr)
8414 {
8415     char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8416     struct hclge_dev *hdev = vport->back;
8417     struct hclge_mac_vlan_tbl_entry_cmd req;
8418     int ret;
8419 
8420     /* mac addr check */
8421     if (is_zero_ether_addr(addr) ||
8422         is_broadcast_ether_addr(addr) ||
8423         is_multicast_ether_addr(addr)) {
8424         hnae3_format_mac_addr(format_mac_addr, addr);
8425         dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%s.\n",
8426             format_mac_addr);
8427         return -EINVAL;
8428     }
8429 
8430     memset(&req, 0, sizeof(req));
8431     hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
8432     hclge_prepare_mac_addr(&req, addr, false);
8433     ret = hclge_remove_mac_vlan_tbl(vport, &req);
8434     if (!ret || ret == -ENOENT) {
8435         mutex_lock(&hdev->vport_lock);
8436         hclge_update_umv_space(vport, true);
8437         mutex_unlock(&hdev->vport_lock);
8438         return 0;
8439     }
8440 
8441     return ret;
8442 }
8443 
8444 static int hclge_add_mc_addr(struct hnae3_handle *handle,
8445                  const unsigned char *addr)
8446 {
8447     struct hclge_vport *vport = hclge_get_vport(handle);
8448 
8449     return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
8450                      addr);
8451 }
8452 
8453 int hclge_add_mc_addr_common(struct hclge_vport *vport,
8454                  const unsigned char *addr)
8455 {
8456     char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8457     struct hclge_dev *hdev = vport->back;
8458     struct hclge_mac_vlan_tbl_entry_cmd req;
8459     struct hclge_desc desc[3];
8460     bool is_new_addr = false;
8461     int status;
8462 
8463     /* mac addr check */
8464     if (!is_multicast_ether_addr(addr)) {
8465         hnae3_format_mac_addr(format_mac_addr, addr);
8466         dev_err(&hdev->pdev->dev,
8467             "Add mc mac err! invalid mac:%s.\n",
8468              format_mac_addr);
8469         return -EINVAL;
8470     }
8471     memset(&req, 0, sizeof(req));
8472     hclge_prepare_mac_addr(&req, addr, true);
8473     status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8474     if (status) {
8475         if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
8476             hdev->used_mc_mac_num >=
8477             hdev->ae_dev->dev_specs.mc_mac_size)
8478             goto err_no_space;
8479 
8480         is_new_addr = true;
8481 
8482         /* This mac addr do not exist, add new entry for it */
8483         memset(desc[0].data, 0, sizeof(desc[0].data));
8484         memset(desc[1].data, 0, sizeof(desc[0].data));
8485         memset(desc[2].data, 0, sizeof(desc[0].data));
8486     }
8487     status = hclge_update_desc_vfid(desc, vport->vport_id, false);
8488     if (status)
8489         return status;
8490     status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8491     if (status == -ENOSPC)
8492         goto err_no_space;
8493     else if (!status && is_new_addr)
8494         hdev->used_mc_mac_num++;
8495 
8496     return status;
8497 
8498 err_no_space:
8499     /* if already overflow, not to print each time */
8500     if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE)) {
8501         vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8502         dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
8503     }
8504 
8505     return -ENOSPC;
8506 }
8507 
8508 static int hclge_rm_mc_addr(struct hnae3_handle *handle,
8509                 const unsigned char *addr)
8510 {
8511     struct hclge_vport *vport = hclge_get_vport(handle);
8512 
8513     return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
8514                      addr);
8515 }
8516 
8517 int hclge_rm_mc_addr_common(struct hclge_vport *vport,
8518                 const unsigned char *addr)
8519 {
8520     char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8521     struct hclge_dev *hdev = vport->back;
8522     struct hclge_mac_vlan_tbl_entry_cmd req;
8523     enum hclge_comm_cmd_status status;
8524     struct hclge_desc desc[3];
8525 
8526     /* mac addr check */
8527     if (!is_multicast_ether_addr(addr)) {
8528         hnae3_format_mac_addr(format_mac_addr, addr);
8529         dev_dbg(&hdev->pdev->dev,
8530             "Remove mc mac err! invalid mac:%s.\n",
8531              format_mac_addr);
8532         return -EINVAL;
8533     }
8534 
8535     memset(&req, 0, sizeof(req));
8536     hclge_prepare_mac_addr(&req, addr, true);
8537     status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
8538     if (!status) {
8539         /* This mac addr exist, remove this handle's VFID for it */
8540         status = hclge_update_desc_vfid(desc, vport->vport_id, true);
8541         if (status)
8542             return status;
8543 
8544         if (hclge_is_all_function_id_zero(desc)) {
8545             /* All the vfid is zero, so need to delete this entry */
8546             status = hclge_remove_mac_vlan_tbl(vport, &req);
8547             if (!status)
8548                 hdev->used_mc_mac_num--;
8549         } else {
8550             /* Not all the vfid is zero, update the vfid */
8551             status = hclge_add_mac_vlan_tbl(vport, &req, desc);
8552         }
8553     } else if (status == -ENOENT) {
8554         status = 0;
8555     }
8556 
8557     return status;
8558 }
8559 
8560 static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
8561                       struct list_head *list,
8562                       enum HCLGE_MAC_ADDR_TYPE mac_type)
8563 {
8564     int (*sync)(struct hclge_vport *vport, const unsigned char *addr);
8565     struct hclge_mac_node *mac_node, *tmp;
8566     int ret;
8567 
8568     if (mac_type == HCLGE_MAC_ADDR_UC)
8569         sync = hclge_add_uc_addr_common;
8570     else
8571         sync = hclge_add_mc_addr_common;
8572 
8573     list_for_each_entry_safe(mac_node, tmp, list, node) {
8574         ret = sync(vport, mac_node->mac_addr);
8575         if (!ret) {
8576             mac_node->state = HCLGE_MAC_ACTIVE;
8577         } else {
8578             set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8579                 &vport->state);
8580 
8581             /* If one unicast mac address is existing in hardware,
8582              * we need to try whether other unicast mac addresses
8583              * are new addresses that can be added.
8584              * Multicast mac address can be reusable, even though
8585              * there is no space to add new multicast mac address,
8586              * we should check whether other mac addresses are
8587              * existing in hardware for reuse.
8588              */
8589             if ((mac_type == HCLGE_MAC_ADDR_UC && ret != -EEXIST) ||
8590                 (mac_type == HCLGE_MAC_ADDR_MC && ret != -ENOSPC))
8591                 break;
8592         }
8593     }
8594 }
8595 
8596 static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
8597                     struct list_head *list,
8598                     enum HCLGE_MAC_ADDR_TYPE mac_type)
8599 {
8600     int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8601     struct hclge_mac_node *mac_node, *tmp;
8602     int ret;
8603 
8604     if (mac_type == HCLGE_MAC_ADDR_UC)
8605         unsync = hclge_rm_uc_addr_common;
8606     else
8607         unsync = hclge_rm_mc_addr_common;
8608 
8609     list_for_each_entry_safe(mac_node, tmp, list, node) {
8610         ret = unsync(vport, mac_node->mac_addr);
8611         if (!ret || ret == -ENOENT) {
8612             list_del(&mac_node->node);
8613             kfree(mac_node);
8614         } else {
8615             set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
8616                 &vport->state);
8617             break;
8618         }
8619     }
8620 }
8621 
8622 static bool hclge_sync_from_add_list(struct list_head *add_list,
8623                      struct list_head *mac_list)
8624 {
8625     struct hclge_mac_node *mac_node, *tmp, *new_node;
8626     bool all_added = true;
8627 
8628     list_for_each_entry_safe(mac_node, tmp, add_list, node) {
8629         if (mac_node->state == HCLGE_MAC_TO_ADD)
8630             all_added = false;
8631 
8632         /* if the mac address from tmp_add_list is not in the
8633          * uc/mc_mac_list, it means have received a TO_DEL request
8634          * during the time window of adding the mac address into mac
8635          * table. if mac_node state is ACTIVE, then change it to TO_DEL,
8636          * then it will be removed at next time. else it must be TO_ADD,
8637          * this address hasn't been added into mac table,
8638          * so just remove the mac node.
8639          */
8640         new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8641         if (new_node) {
8642             hclge_update_mac_node(new_node, mac_node->state);
8643             list_del(&mac_node->node);
8644             kfree(mac_node);
8645         } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
8646             mac_node->state = HCLGE_MAC_TO_DEL;
8647             list_move_tail(&mac_node->node, mac_list);
8648         } else {
8649             list_del(&mac_node->node);
8650             kfree(mac_node);
8651         }
8652     }
8653 
8654     return all_added;
8655 }
8656 
8657 static void hclge_sync_from_del_list(struct list_head *del_list,
8658                      struct list_head *mac_list)
8659 {
8660     struct hclge_mac_node *mac_node, *tmp, *new_node;
8661 
8662     list_for_each_entry_safe(mac_node, tmp, del_list, node) {
8663         new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
8664         if (new_node) {
8665             /* If the mac addr exists in the mac list, it means
8666              * received a new TO_ADD request during the time window
8667              * of configuring the mac address. For the mac node
8668              * state is TO_ADD, and the address is already in the
8669              * in the hardware(due to delete fail), so we just need
8670              * to change the mac node state to ACTIVE.
8671              */
8672             new_node->state = HCLGE_MAC_ACTIVE;
8673             list_del(&mac_node->node);
8674             kfree(mac_node);
8675         } else {
8676             list_move_tail(&mac_node->node, mac_list);
8677         }
8678     }
8679 }
8680 
8681 static void hclge_update_overflow_flags(struct hclge_vport *vport,
8682                     enum HCLGE_MAC_ADDR_TYPE mac_type,
8683                     bool is_all_added)
8684 {
8685     if (mac_type == HCLGE_MAC_ADDR_UC) {
8686         if (is_all_added)
8687             vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
8688         else
8689             vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
8690     } else {
8691         if (is_all_added)
8692             vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
8693         else
8694             vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
8695     }
8696 }
8697 
8698 static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
8699                        enum HCLGE_MAC_ADDR_TYPE mac_type)
8700 {
8701     struct hclge_mac_node *mac_node, *tmp, *new_node;
8702     struct list_head tmp_add_list, tmp_del_list;
8703     struct list_head *list;
8704     bool all_added;
8705 
8706     INIT_LIST_HEAD(&tmp_add_list);
8707     INIT_LIST_HEAD(&tmp_del_list);
8708 
8709     /* move the mac addr to the tmp_add_list and tmp_del_list, then
8710      * we can add/delete these mac addr outside the spin lock
8711      */
8712     list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8713         &vport->uc_mac_list : &vport->mc_mac_list;
8714 
8715     spin_lock_bh(&vport->mac_list_lock);
8716 
8717     list_for_each_entry_safe(mac_node, tmp, list, node) {
8718         switch (mac_node->state) {
8719         case HCLGE_MAC_TO_DEL:
8720             list_move_tail(&mac_node->node, &tmp_del_list);
8721             break;
8722         case HCLGE_MAC_TO_ADD:
8723             new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
8724             if (!new_node)
8725                 goto stop_traverse;
8726             ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
8727             new_node->state = mac_node->state;
8728             list_add_tail(&new_node->node, &tmp_add_list);
8729             break;
8730         default:
8731             break;
8732         }
8733     }
8734 
8735 stop_traverse:
8736     spin_unlock_bh(&vport->mac_list_lock);
8737 
8738     /* delete first, in order to get max mac table space for adding */
8739     hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
8740     hclge_sync_vport_mac_list(vport, &tmp_add_list, mac_type);
8741 
8742     /* if some mac addresses were added/deleted fail, move back to the
8743      * mac_list, and retry at next time.
8744      */
8745     spin_lock_bh(&vport->mac_list_lock);
8746 
8747     hclge_sync_from_del_list(&tmp_del_list, list);
8748     all_added = hclge_sync_from_add_list(&tmp_add_list, list);
8749 
8750     spin_unlock_bh(&vport->mac_list_lock);
8751 
8752     hclge_update_overflow_flags(vport, mac_type, all_added);
8753 }
8754 
8755 static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
8756 {
8757     struct hclge_dev *hdev = vport->back;
8758 
8759     if (test_bit(vport->vport_id, hdev->vport_config_block))
8760         return false;
8761 
8762     if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
8763         return true;
8764 
8765     return false;
8766 }
8767 
8768 static void hclge_sync_mac_table(struct hclge_dev *hdev)
8769 {
8770     int i;
8771 
8772     for (i = 0; i < hdev->num_alloc_vport; i++) {
8773         struct hclge_vport *vport = &hdev->vport[i];
8774 
8775         if (!hclge_need_sync_mac_table(vport))
8776             continue;
8777 
8778         hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
8779         hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
8780     }
8781 }
8782 
8783 static void hclge_build_del_list(struct list_head *list,
8784                  bool is_del_list,
8785                  struct list_head *tmp_del_list)
8786 {
8787     struct hclge_mac_node *mac_cfg, *tmp;
8788 
8789     list_for_each_entry_safe(mac_cfg, tmp, list, node) {
8790         switch (mac_cfg->state) {
8791         case HCLGE_MAC_TO_DEL:
8792         case HCLGE_MAC_ACTIVE:
8793             list_move_tail(&mac_cfg->node, tmp_del_list);
8794             break;
8795         case HCLGE_MAC_TO_ADD:
8796             if (is_del_list) {
8797                 list_del(&mac_cfg->node);
8798                 kfree(mac_cfg);
8799             }
8800             break;
8801         }
8802     }
8803 }
8804 
8805 static void hclge_unsync_del_list(struct hclge_vport *vport,
8806                   int (*unsync)(struct hclge_vport *vport,
8807                         const unsigned char *addr),
8808                   bool is_del_list,
8809                   struct list_head *tmp_del_list)
8810 {
8811     struct hclge_mac_node *mac_cfg, *tmp;
8812     int ret;
8813 
8814     list_for_each_entry_safe(mac_cfg, tmp, tmp_del_list, node) {
8815         ret = unsync(vport, mac_cfg->mac_addr);
8816         if (!ret || ret == -ENOENT) {
8817             /* clear all mac addr from hardware, but remain these
8818              * mac addr in the mac list, and restore them after
8819              * vf reset finished.
8820              */
8821             if (!is_del_list &&
8822                 mac_cfg->state == HCLGE_MAC_ACTIVE) {
8823                 mac_cfg->state = HCLGE_MAC_TO_ADD;
8824             } else {
8825                 list_del(&mac_cfg->node);
8826                 kfree(mac_cfg);
8827             }
8828         } else if (is_del_list) {
8829             mac_cfg->state = HCLGE_MAC_TO_DEL;
8830         }
8831     }
8832 }
8833 
8834 void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
8835                   enum HCLGE_MAC_ADDR_TYPE mac_type)
8836 {
8837     int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
8838     struct hclge_dev *hdev = vport->back;
8839     struct list_head tmp_del_list, *list;
8840 
8841     if (mac_type == HCLGE_MAC_ADDR_UC) {
8842         list = &vport->uc_mac_list;
8843         unsync = hclge_rm_uc_addr_common;
8844     } else {
8845         list = &vport->mc_mac_list;
8846         unsync = hclge_rm_mc_addr_common;
8847     }
8848 
8849     INIT_LIST_HEAD(&tmp_del_list);
8850 
8851     if (!is_del_list)
8852         set_bit(vport->vport_id, hdev->vport_config_block);
8853 
8854     spin_lock_bh(&vport->mac_list_lock);
8855 
8856     hclge_build_del_list(list, is_del_list, &tmp_del_list);
8857 
8858     spin_unlock_bh(&vport->mac_list_lock);
8859 
8860     hclge_unsync_del_list(vport, unsync, is_del_list, &tmp_del_list);
8861 
8862     spin_lock_bh(&vport->mac_list_lock);
8863 
8864     hclge_sync_from_del_list(&tmp_del_list, list);
8865 
8866     spin_unlock_bh(&vport->mac_list_lock);
8867 }
8868 
8869 /* remove all mac address when uninitailize */
8870 static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
8871                     enum HCLGE_MAC_ADDR_TYPE mac_type)
8872 {
8873     struct hclge_mac_node *mac_node, *tmp;
8874     struct hclge_dev *hdev = vport->back;
8875     struct list_head tmp_del_list, *list;
8876 
8877     INIT_LIST_HEAD(&tmp_del_list);
8878 
8879     list = (mac_type == HCLGE_MAC_ADDR_UC) ?
8880         &vport->uc_mac_list : &vport->mc_mac_list;
8881 
8882     spin_lock_bh(&vport->mac_list_lock);
8883 
8884     list_for_each_entry_safe(mac_node, tmp, list, node) {
8885         switch (mac_node->state) {
8886         case HCLGE_MAC_TO_DEL:
8887         case HCLGE_MAC_ACTIVE:
8888             list_move_tail(&mac_node->node, &tmp_del_list);
8889             break;
8890         case HCLGE_MAC_TO_ADD:
8891             list_del(&mac_node->node);
8892             kfree(mac_node);
8893             break;
8894         }
8895     }
8896 
8897     spin_unlock_bh(&vport->mac_list_lock);
8898 
8899     hclge_unsync_vport_mac_list(vport, &tmp_del_list, mac_type);
8900 
8901     if (!list_empty(&tmp_del_list))
8902         dev_warn(&hdev->pdev->dev,
8903              "uninit %s mac list for vport %u not completely.\n",
8904              mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
8905              vport->vport_id);
8906 
8907     list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
8908         list_del(&mac_node->node);
8909         kfree(mac_node);
8910     }
8911 }
8912 
8913 static void hclge_uninit_mac_table(struct hclge_dev *hdev)
8914 {
8915     struct hclge_vport *vport;
8916     int i;
8917 
8918     for (i = 0; i < hdev->num_alloc_vport; i++) {
8919         vport = &hdev->vport[i];
8920         hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
8921         hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
8922     }
8923 }
8924 
8925 static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
8926                           u16 cmdq_resp, u8 resp_code)
8927 {
8928 #define HCLGE_ETHERTYPE_SUCCESS_ADD     0
8929 #define HCLGE_ETHERTYPE_ALREADY_ADD     1
8930 #define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW    2
8931 #define HCLGE_ETHERTYPE_KEY_CONFLICT        3
8932 
8933     int return_status;
8934 
8935     if (cmdq_resp) {
8936         dev_err(&hdev->pdev->dev,
8937             "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
8938             cmdq_resp);
8939         return -EIO;
8940     }
8941 
8942     switch (resp_code) {
8943     case HCLGE_ETHERTYPE_SUCCESS_ADD:
8944     case HCLGE_ETHERTYPE_ALREADY_ADD:
8945         return_status = 0;
8946         break;
8947     case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
8948         dev_err(&hdev->pdev->dev,
8949             "add mac ethertype failed for manager table overflow.\n");
8950         return_status = -EIO;
8951         break;
8952     case HCLGE_ETHERTYPE_KEY_CONFLICT:
8953         dev_err(&hdev->pdev->dev,
8954             "add mac ethertype failed for key conflict.\n");
8955         return_status = -EIO;
8956         break;
8957     default:
8958         dev_err(&hdev->pdev->dev,
8959             "add mac ethertype failed for undefined, code=%u.\n",
8960             resp_code);
8961         return_status = -EIO;
8962     }
8963 
8964     return return_status;
8965 }
8966 
8967 static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
8968                 u8 *mac_addr)
8969 {
8970     struct hclge_vport *vport = hclge_get_vport(handle);
8971     char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
8972     struct hclge_dev *hdev = vport->back;
8973 
8974     vport = hclge_get_vf_vport(hdev, vf);
8975     if (!vport)
8976         return -EINVAL;
8977 
8978     hnae3_format_mac_addr(format_mac_addr, mac_addr);
8979     if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
8980         dev_info(&hdev->pdev->dev,
8981              "Specified MAC(=%s) is same as before, no change committed!\n",
8982              format_mac_addr);
8983         return 0;
8984     }
8985 
8986     ether_addr_copy(vport->vf_info.mac, mac_addr);
8987 
8988     /* there is a timewindow for PF to know VF unalive, it may
8989      * cause send mailbox fail, but it doesn't matter, VF will
8990      * query it when reinit.
8991      */
8992     if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
8993         dev_info(&hdev->pdev->dev,
8994              "MAC of VF %d has been set to %s, and it will be reinitialized!\n",
8995              vf, format_mac_addr);
8996         (void)hclge_inform_reset_assert_to_vf(vport);
8997         return 0;
8998     }
8999 
9000     dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
9001          vf, format_mac_addr);
9002     return 0;
9003 }
9004 
9005 static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
9006                  const struct hclge_mac_mgr_tbl_entry_cmd *req)
9007 {
9008     struct hclge_desc desc;
9009     u8 resp_code;
9010     u16 retval;
9011     int ret;
9012 
9013     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
9014     memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
9015 
9016     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9017     if (ret) {
9018         dev_err(&hdev->pdev->dev,
9019             "add mac ethertype failed for cmd_send, ret =%d.\n",
9020             ret);
9021         return ret;
9022     }
9023 
9024     resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
9025     retval = le16_to_cpu(desc.retval);
9026 
9027     return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
9028 }
9029 
9030 static int init_mgr_tbl(struct hclge_dev *hdev)
9031 {
9032     int ret;
9033     int i;
9034 
9035     for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
9036         ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
9037         if (ret) {
9038             dev_err(&hdev->pdev->dev,
9039                 "add mac ethertype failed, ret =%d.\n",
9040                 ret);
9041             return ret;
9042         }
9043     }
9044 
9045     return 0;
9046 }
9047 
9048 static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
9049 {
9050     struct hclge_vport *vport = hclge_get_vport(handle);
9051     struct hclge_dev *hdev = vport->back;
9052 
9053     ether_addr_copy(p, hdev->hw.mac.mac_addr);
9054 }
9055 
9056 int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
9057                        const u8 *old_addr, const u8 *new_addr)
9058 {
9059     struct list_head *list = &vport->uc_mac_list;
9060     struct hclge_mac_node *old_node, *new_node;
9061 
9062     new_node = hclge_find_mac_node(list, new_addr);
9063     if (!new_node) {
9064         new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
9065         if (!new_node)
9066             return -ENOMEM;
9067 
9068         new_node->state = HCLGE_MAC_TO_ADD;
9069         ether_addr_copy(new_node->mac_addr, new_addr);
9070         list_add(&new_node->node, list);
9071     } else {
9072         if (new_node->state == HCLGE_MAC_TO_DEL)
9073             new_node->state = HCLGE_MAC_ACTIVE;
9074 
9075         /* make sure the new addr is in the list head, avoid dev
9076          * addr may be not re-added into mac table for the umv space
9077          * limitation after global/imp reset which will clear mac
9078          * table by hardware.
9079          */
9080         list_move(&new_node->node, list);
9081     }
9082 
9083     if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
9084         old_node = hclge_find_mac_node(list, old_addr);
9085         if (old_node) {
9086             if (old_node->state == HCLGE_MAC_TO_ADD) {
9087                 list_del(&old_node->node);
9088                 kfree(old_node);
9089             } else {
9090                 old_node->state = HCLGE_MAC_TO_DEL;
9091             }
9092         }
9093     }
9094 
9095     set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
9096 
9097     return 0;
9098 }
9099 
9100 static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
9101                   bool is_first)
9102 {
9103     const unsigned char *new_addr = (const unsigned char *)p;
9104     struct hclge_vport *vport = hclge_get_vport(handle);
9105     char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
9106     struct hclge_dev *hdev = vport->back;
9107     unsigned char *old_addr = NULL;
9108     int ret;
9109 
9110     /* mac addr check */
9111     if (is_zero_ether_addr(new_addr) ||
9112         is_broadcast_ether_addr(new_addr) ||
9113         is_multicast_ether_addr(new_addr)) {
9114         hnae3_format_mac_addr(format_mac_addr, new_addr);
9115         dev_err(&hdev->pdev->dev,
9116             "change uc mac err! invalid mac: %s.\n",
9117              format_mac_addr);
9118         return -EINVAL;
9119     }
9120 
9121     ret = hclge_pause_addr_cfg(hdev, new_addr);
9122     if (ret) {
9123         dev_err(&hdev->pdev->dev,
9124             "failed to configure mac pause address, ret = %d\n",
9125             ret);
9126         return ret;
9127     }
9128 
9129     if (!is_first)
9130         old_addr = hdev->hw.mac.mac_addr;
9131 
9132     spin_lock_bh(&vport->mac_list_lock);
9133     ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
9134     if (ret) {
9135         hnae3_format_mac_addr(format_mac_addr, new_addr);
9136         dev_err(&hdev->pdev->dev,
9137             "failed to change the mac addr:%s, ret = %d\n",
9138             format_mac_addr, ret);
9139         spin_unlock_bh(&vport->mac_list_lock);
9140 
9141         if (!is_first)
9142             hclge_pause_addr_cfg(hdev, old_addr);
9143 
9144         return ret;
9145     }
9146     /* we must update dev addr with spin lock protect, preventing dev addr
9147      * being removed by set_rx_mode path.
9148      */
9149     ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
9150     spin_unlock_bh(&vport->mac_list_lock);
9151 
9152     hclge_task_schedule(hdev, 0);
9153 
9154     return 0;
9155 }
9156 
9157 static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
9158 {
9159     struct mii_ioctl_data *data = if_mii(ifr);
9160 
9161     if (!hnae3_dev_phy_imp_supported(hdev))
9162         return -EOPNOTSUPP;
9163 
9164     switch (cmd) {
9165     case SIOCGMIIPHY:
9166         data->phy_id = hdev->hw.mac.phy_addr;
9167         /* this command reads phy id and register at the same time */
9168         fallthrough;
9169     case SIOCGMIIREG:
9170         data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
9171         return 0;
9172 
9173     case SIOCSMIIREG:
9174         return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
9175     default:
9176         return -EOPNOTSUPP;
9177     }
9178 }
9179 
9180 static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
9181               int cmd)
9182 {
9183     struct hclge_vport *vport = hclge_get_vport(handle);
9184     struct hclge_dev *hdev = vport->back;
9185 
9186     switch (cmd) {
9187     case SIOCGHWTSTAMP:
9188         return hclge_ptp_get_cfg(hdev, ifr);
9189     case SIOCSHWTSTAMP:
9190         return hclge_ptp_set_cfg(hdev, ifr);
9191     default:
9192         if (!hdev->hw.mac.phydev)
9193             return hclge_mii_ioctl(hdev, ifr, cmd);
9194     }
9195 
9196     return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
9197 }
9198 
9199 static int hclge_set_port_vlan_filter_bypass(struct hclge_dev *hdev, u8 vf_id,
9200                          bool bypass_en)
9201 {
9202     struct hclge_port_vlan_filter_bypass_cmd *req;
9203     struct hclge_desc desc;
9204     int ret;
9205 
9206     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PORT_VLAN_BYPASS, false);
9207     req = (struct hclge_port_vlan_filter_bypass_cmd *)desc.data;
9208     req->vf_id = vf_id;
9209     hnae3_set_bit(req->bypass_state, HCLGE_INGRESS_BYPASS_B,
9210               bypass_en ? 1 : 0);
9211 
9212     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9213     if (ret)
9214         dev_err(&hdev->pdev->dev,
9215             "failed to set vport%u port vlan filter bypass state, ret = %d.\n",
9216             vf_id, ret);
9217 
9218     return ret;
9219 }
9220 
9221 static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
9222                       u8 fe_type, bool filter_en, u8 vf_id)
9223 {
9224     struct hclge_vlan_filter_ctrl_cmd *req;
9225     struct hclge_desc desc;
9226     int ret;
9227 
9228     /* read current vlan filter parameter */
9229     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
9230     req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
9231     req->vlan_type = vlan_type;
9232     req->vf_id = vf_id;
9233 
9234     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9235     if (ret) {
9236         dev_err(&hdev->pdev->dev, "failed to get vport%u vlan filter config, ret = %d.\n",
9237             vf_id, ret);
9238         return ret;
9239     }
9240 
9241     /* modify and write new config parameter */
9242     hclge_comm_cmd_reuse_desc(&desc, false);
9243     req->vlan_fe = filter_en ?
9244             (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
9245 
9246     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9247     if (ret)
9248         dev_err(&hdev->pdev->dev, "failed to set vport%u vlan filter, ret = %d.\n",
9249             vf_id, ret);
9250 
9251     return ret;
9252 }
9253 
9254 static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable)
9255 {
9256     struct hclge_dev *hdev = vport->back;
9257     struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
9258     int ret;
9259 
9260     if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9261         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9262                           HCLGE_FILTER_FE_EGRESS_V1_B,
9263                           enable, vport->vport_id);
9264 
9265     ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9266                      HCLGE_FILTER_FE_EGRESS, enable,
9267                      vport->vport_id);
9268     if (ret)
9269         return ret;
9270 
9271     if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) {
9272         ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id,
9273                             !enable);
9274     } else if (!vport->vport_id) {
9275         if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps))
9276             enable = false;
9277 
9278         ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9279                          HCLGE_FILTER_FE_INGRESS,
9280                          enable, 0);
9281     }
9282 
9283     return ret;
9284 }
9285 
9286 static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport)
9287 {
9288     struct hnae3_handle *handle = &vport->nic;
9289     struct hclge_vport_vlan_cfg *vlan, *tmp;
9290     struct hclge_dev *hdev = vport->back;
9291 
9292     if (vport->vport_id) {
9293         if (vport->port_base_vlan_cfg.state !=
9294             HNAE3_PORT_BASE_VLAN_DISABLE)
9295             return true;
9296 
9297         if (vport->vf_info.trusted && vport->vf_info.request_uc_en)
9298             return false;
9299     } else if (handle->netdev_flags & HNAE3_USER_UPE) {
9300         return false;
9301     }
9302 
9303     if (!vport->req_vlan_fltr_en)
9304         return false;
9305 
9306     /* compatible with former device, always enable vlan filter */
9307     if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
9308         return true;
9309 
9310     list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
9311         if (vlan->vlan_id != 0)
9312             return true;
9313 
9314     return false;
9315 }
9316 
9317 int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en)
9318 {
9319     struct hclge_dev *hdev = vport->back;
9320     bool need_en;
9321     int ret;
9322 
9323     mutex_lock(&hdev->vport_lock);
9324 
9325     vport->req_vlan_fltr_en = request_en;
9326 
9327     need_en = hclge_need_enable_vport_vlan_filter(vport);
9328     if (need_en == vport->cur_vlan_fltr_en) {
9329         mutex_unlock(&hdev->vport_lock);
9330         return 0;
9331     }
9332 
9333     ret = hclge_set_vport_vlan_filter(vport, need_en);
9334     if (ret) {
9335         mutex_unlock(&hdev->vport_lock);
9336         return ret;
9337     }
9338 
9339     vport->cur_vlan_fltr_en = need_en;
9340 
9341     mutex_unlock(&hdev->vport_lock);
9342 
9343     return 0;
9344 }
9345 
9346 static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
9347 {
9348     struct hclge_vport *vport = hclge_get_vport(handle);
9349 
9350     return hclge_enable_vport_vlan_filter(vport, enable);
9351 }
9352 
9353 static int hclge_set_vf_vlan_filter_cmd(struct hclge_dev *hdev, u16 vfid,
9354                     bool is_kill, u16 vlan,
9355                     struct hclge_desc *desc)
9356 {
9357     struct hclge_vlan_filter_vf_cfg_cmd *req0;
9358     struct hclge_vlan_filter_vf_cfg_cmd *req1;
9359     u8 vf_byte_val;
9360     u8 vf_byte_off;
9361     int ret;
9362 
9363     hclge_cmd_setup_basic_desc(&desc[0],
9364                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9365     hclge_cmd_setup_basic_desc(&desc[1],
9366                    HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
9367 
9368     desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
9369 
9370     vf_byte_off = vfid / 8;
9371     vf_byte_val = 1 << (vfid % 8);
9372 
9373     req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9374     req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
9375 
9376     req0->vlan_id  = cpu_to_le16(vlan);
9377     req0->vlan_cfg = is_kill;
9378 
9379     if (vf_byte_off < HCLGE_MAX_VF_BYTES)
9380         req0->vf_bitmap[vf_byte_off] = vf_byte_val;
9381     else
9382         req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
9383 
9384     ret = hclge_cmd_send(&hdev->hw, desc, 2);
9385     if (ret) {
9386         dev_err(&hdev->pdev->dev,
9387             "Send vf vlan command fail, ret =%d.\n",
9388             ret);
9389         return ret;
9390     }
9391 
9392     return 0;
9393 }
9394 
9395 static int hclge_check_vf_vlan_cmd_status(struct hclge_dev *hdev, u16 vfid,
9396                       bool is_kill, struct hclge_desc *desc)
9397 {
9398     struct hclge_vlan_filter_vf_cfg_cmd *req;
9399 
9400     req = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
9401 
9402     if (!is_kill) {
9403 #define HCLGE_VF_VLAN_NO_ENTRY  2
9404         if (!req->resp_code || req->resp_code == 1)
9405             return 0;
9406 
9407         if (req->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
9408             set_bit(vfid, hdev->vf_vlan_full);
9409             dev_warn(&hdev->pdev->dev,
9410                  "vf vlan table is full, vf vlan filter is disabled\n");
9411             return 0;
9412         }
9413 
9414         dev_err(&hdev->pdev->dev,
9415             "Add vf vlan filter fail, ret =%u.\n",
9416             req->resp_code);
9417     } else {
9418 #define HCLGE_VF_VLAN_DEL_NO_FOUND  1
9419         if (!req->resp_code)
9420             return 0;
9421 
9422         /* vf vlan filter is disabled when vf vlan table is full,
9423          * then new vlan id will not be added into vf vlan table.
9424          * Just return 0 without warning, avoid massive verbose
9425          * print logs when unload.
9426          */
9427         if (req->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
9428             return 0;
9429 
9430         dev_err(&hdev->pdev->dev,
9431             "Kill vf vlan filter fail, ret =%u.\n",
9432             req->resp_code);
9433     }
9434 
9435     return -EIO;
9436 }
9437 
9438 static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
9439                     bool is_kill, u16 vlan)
9440 {
9441     struct hclge_vport *vport = &hdev->vport[vfid];
9442     struct hclge_desc desc[2];
9443     int ret;
9444 
9445     /* if vf vlan table is full, firmware will close vf vlan filter, it
9446      * is unable and unnecessary to add new vlan id to vf vlan filter.
9447      * If spoof check is enable, and vf vlan is full, it shouldn't add
9448      * new vlan, because tx packets with these vlan id will be dropped.
9449      */
9450     if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
9451         if (vport->vf_info.spoofchk && vlan) {
9452             dev_err(&hdev->pdev->dev,
9453                 "Can't add vlan due to spoof check is on and vf vlan table is full\n");
9454             return -EPERM;
9455         }
9456         return 0;
9457     }
9458 
9459     ret = hclge_set_vf_vlan_filter_cmd(hdev, vfid, is_kill, vlan, desc);
9460     if (ret)
9461         return ret;
9462 
9463     return hclge_check_vf_vlan_cmd_status(hdev, vfid, is_kill, desc);
9464 }
9465 
9466 static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
9467                       u16 vlan_id, bool is_kill)
9468 {
9469     struct hclge_vlan_filter_pf_cfg_cmd *req;
9470     struct hclge_desc desc;
9471     u8 vlan_offset_byte_val;
9472     u8 vlan_offset_byte;
9473     u8 vlan_offset_160;
9474     int ret;
9475 
9476     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
9477 
9478     vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
9479     vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
9480                HCLGE_VLAN_BYTE_SIZE;
9481     vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
9482 
9483     req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
9484     req->vlan_offset = vlan_offset_160;
9485     req->vlan_cfg = is_kill;
9486     req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
9487 
9488     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
9489     if (ret)
9490         dev_err(&hdev->pdev->dev,
9491             "port vlan command, send fail, ret =%d.\n", ret);
9492     return ret;
9493 }
9494 
9495 static bool hclge_need_update_port_vlan(struct hclge_dev *hdev, u16 vport_id,
9496                     u16 vlan_id, bool is_kill)
9497 {
9498     /* vlan 0 may be added twice when 8021q module is enabled */
9499     if (!is_kill && !vlan_id &&
9500         test_bit(vport_id, hdev->vlan_table[vlan_id]))
9501         return false;
9502 
9503     if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
9504         dev_warn(&hdev->pdev->dev,
9505              "Add port vlan failed, vport %u is already in vlan %u\n",
9506              vport_id, vlan_id);
9507         return false;
9508     }
9509 
9510     if (is_kill &&
9511         !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
9512         dev_warn(&hdev->pdev->dev,
9513              "Delete port vlan failed, vport %u is not in vlan %u\n",
9514              vport_id, vlan_id);
9515         return false;
9516     }
9517 
9518     return true;
9519 }
9520 
9521 static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
9522                     u16 vport_id, u16 vlan_id,
9523                     bool is_kill)
9524 {
9525     u16 vport_idx, vport_num = 0;
9526     int ret;
9527 
9528     if (is_kill && !vlan_id)
9529         return 0;
9530 
9531     if (vlan_id >= VLAN_N_VID)
9532         return -EINVAL;
9533 
9534     ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id);
9535     if (ret) {
9536         dev_err(&hdev->pdev->dev,
9537             "Set %u vport vlan filter config fail, ret =%d.\n",
9538             vport_id, ret);
9539         return ret;
9540     }
9541 
9542     if (!hclge_need_update_port_vlan(hdev, vport_id, vlan_id, is_kill))
9543         return 0;
9544 
9545     for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
9546         vport_num++;
9547 
9548     if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
9549         ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
9550                          is_kill);
9551 
9552     return ret;
9553 }
9554 
9555 static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
9556 {
9557     struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
9558     struct hclge_vport_vtag_tx_cfg_cmd *req;
9559     struct hclge_dev *hdev = vport->back;
9560     struct hclge_desc desc;
9561     u16 bmap_index;
9562     int status;
9563 
9564     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
9565 
9566     req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
9567     req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
9568     req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
9569     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
9570               vcfg->accept_tag1 ? 1 : 0);
9571     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
9572               vcfg->accept_untag1 ? 1 : 0);
9573     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
9574               vcfg->accept_tag2 ? 1 : 0);
9575     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
9576               vcfg->accept_untag2 ? 1 : 0);
9577     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
9578               vcfg->insert_tag1_en ? 1 : 0);
9579     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
9580               vcfg->insert_tag2_en ? 1 : 0);
9581     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_TAG_SHIFT_MODE_EN_B,
9582               vcfg->tag_shift_mode_en ? 1 : 0);
9583     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
9584 
9585     req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9586     bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9587             HCLGE_VF_NUM_PER_BYTE;
9588     req->vf_bitmap[bmap_index] =
9589         1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9590 
9591     status = hclge_cmd_send(&hdev->hw, &desc, 1);
9592     if (status)
9593         dev_err(&hdev->pdev->dev,
9594             "Send port txvlan cfg command fail, ret =%d\n",
9595             status);
9596 
9597     return status;
9598 }
9599 
9600 static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
9601 {
9602     struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
9603     struct hclge_vport_vtag_rx_cfg_cmd *req;
9604     struct hclge_dev *hdev = vport->back;
9605     struct hclge_desc desc;
9606     u16 bmap_index;
9607     int status;
9608 
9609     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
9610 
9611     req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
9612     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
9613               vcfg->strip_tag1_en ? 1 : 0);
9614     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
9615               vcfg->strip_tag2_en ? 1 : 0);
9616     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
9617               vcfg->vlan1_vlan_prionly ? 1 : 0);
9618     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
9619               vcfg->vlan2_vlan_prionly ? 1 : 0);
9620     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG1_EN_B,
9621               vcfg->strip_tag1_discard_en ? 1 : 0);
9622     hnae3_set_bit(req->vport_vlan_cfg, HCLGE_DISCARD_TAG2_EN_B,
9623               vcfg->strip_tag2_discard_en ? 1 : 0);
9624 
9625     req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
9626     bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
9627             HCLGE_VF_NUM_PER_BYTE;
9628     req->vf_bitmap[bmap_index] =
9629         1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
9630 
9631     status = hclge_cmd_send(&hdev->hw, &desc, 1);
9632     if (status)
9633         dev_err(&hdev->pdev->dev,
9634             "Send port rxvlan cfg command fail, ret =%d\n",
9635             status);
9636 
9637     return status;
9638 }
9639 
9640 static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
9641                   u16 port_base_vlan_state,
9642                   u16 vlan_tag, u8 qos)
9643 {
9644     int ret;
9645 
9646     if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9647         vport->txvlan_cfg.accept_tag1 = true;
9648         vport->txvlan_cfg.insert_tag1_en = false;
9649         vport->txvlan_cfg.default_tag1 = 0;
9650     } else {
9651         struct hnae3_ae_dev *ae_dev = pci_get_drvdata(vport->nic.pdev);
9652 
9653         vport->txvlan_cfg.accept_tag1 =
9654             ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3;
9655         vport->txvlan_cfg.insert_tag1_en = true;
9656         vport->txvlan_cfg.default_tag1 = (qos << VLAN_PRIO_SHIFT) |
9657                          vlan_tag;
9658     }
9659 
9660     vport->txvlan_cfg.accept_untag1 = true;
9661 
9662     /* accept_tag2 and accept_untag2 are not supported on
9663      * pdev revision(0x20), new revision support them,
9664      * this two fields can not be configured by user.
9665      */
9666     vport->txvlan_cfg.accept_tag2 = true;
9667     vport->txvlan_cfg.accept_untag2 = true;
9668     vport->txvlan_cfg.insert_tag2_en = false;
9669     vport->txvlan_cfg.default_tag2 = 0;
9670     vport->txvlan_cfg.tag_shift_mode_en = true;
9671 
9672     if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9673         vport->rxvlan_cfg.strip_tag1_en = false;
9674         vport->rxvlan_cfg.strip_tag2_en =
9675                 vport->rxvlan_cfg.rx_vlan_offload_en;
9676         vport->rxvlan_cfg.strip_tag2_discard_en = false;
9677     } else {
9678         vport->rxvlan_cfg.strip_tag1_en =
9679                 vport->rxvlan_cfg.rx_vlan_offload_en;
9680         vport->rxvlan_cfg.strip_tag2_en = true;
9681         vport->rxvlan_cfg.strip_tag2_discard_en = true;
9682     }
9683 
9684     vport->rxvlan_cfg.strip_tag1_discard_en = false;
9685     vport->rxvlan_cfg.vlan1_vlan_prionly = false;
9686     vport->rxvlan_cfg.vlan2_vlan_prionly = false;
9687 
9688     ret = hclge_set_vlan_tx_offload_cfg(vport);
9689     if (ret)
9690         return ret;
9691 
9692     return hclge_set_vlan_rx_offload_cfg(vport);
9693 }
9694 
9695 static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
9696 {
9697     struct hclge_rx_vlan_type_cfg_cmd *rx_req;
9698     struct hclge_tx_vlan_type_cfg_cmd *tx_req;
9699     struct hclge_desc desc;
9700     int status;
9701 
9702     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
9703     rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
9704     rx_req->ot_fst_vlan_type =
9705         cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
9706     rx_req->ot_sec_vlan_type =
9707         cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
9708     rx_req->in_fst_vlan_type =
9709         cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
9710     rx_req->in_sec_vlan_type =
9711         cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
9712 
9713     status = hclge_cmd_send(&hdev->hw, &desc, 1);
9714     if (status) {
9715         dev_err(&hdev->pdev->dev,
9716             "Send rxvlan protocol type command fail, ret =%d\n",
9717             status);
9718         return status;
9719     }
9720 
9721     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
9722 
9723     tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
9724     tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
9725     tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
9726 
9727     status = hclge_cmd_send(&hdev->hw, &desc, 1);
9728     if (status)
9729         dev_err(&hdev->pdev->dev,
9730             "Send txvlan protocol type command fail, ret =%d\n",
9731             status);
9732 
9733     return status;
9734 }
9735 
9736 static int hclge_init_vlan_filter(struct hclge_dev *hdev)
9737 {
9738     struct hclge_vport *vport;
9739     int ret;
9740     int i;
9741 
9742     if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
9743         return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9744                           HCLGE_FILTER_FE_EGRESS_V1_B,
9745                           true, 0);
9746 
9747     /* for revision 0x21, vf vlan filter is per function */
9748     for (i = 0; i < hdev->num_alloc_vport; i++) {
9749         vport = &hdev->vport[i];
9750         ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
9751                          HCLGE_FILTER_FE_EGRESS, true,
9752                          vport->vport_id);
9753         if (ret)
9754             return ret;
9755         vport->cur_vlan_fltr_en = true;
9756     }
9757 
9758     return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
9759                       HCLGE_FILTER_FE_INGRESS, true, 0);
9760 }
9761 
9762 static int hclge_init_vlan_type(struct hclge_dev *hdev)
9763 {
9764     hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
9765     hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
9766     hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
9767     hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
9768     hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
9769     hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
9770 
9771     return hclge_set_vlan_protocol_type(hdev);
9772 }
9773 
9774 static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
9775 {
9776     struct hclge_port_base_vlan_config *cfg;
9777     struct hclge_vport *vport;
9778     int ret;
9779     int i;
9780 
9781     for (i = 0; i < hdev->num_alloc_vport; i++) {
9782         vport = &hdev->vport[i];
9783         cfg = &vport->port_base_vlan_cfg;
9784 
9785         ret = hclge_vlan_offload_cfg(vport, cfg->state,
9786                          cfg->vlan_info.vlan_tag,
9787                          cfg->vlan_info.qos);
9788         if (ret)
9789             return ret;
9790     }
9791     return 0;
9792 }
9793 
9794 static int hclge_init_vlan_config(struct hclge_dev *hdev)
9795 {
9796     struct hnae3_handle *handle = &hdev->vport[0].nic;
9797     int ret;
9798 
9799     ret = hclge_init_vlan_filter(hdev);
9800     if (ret)
9801         return ret;
9802 
9803     ret = hclge_init_vlan_type(hdev);
9804     if (ret)
9805         return ret;
9806 
9807     ret = hclge_init_vport_vlan_offload(hdev);
9808     if (ret)
9809         return ret;
9810 
9811     return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
9812 }
9813 
9814 static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9815                        bool writen_to_tbl)
9816 {
9817     struct hclge_vport_vlan_cfg *vlan, *tmp;
9818     struct hclge_dev *hdev = vport->back;
9819 
9820     mutex_lock(&hdev->vport_lock);
9821 
9822     list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9823         if (vlan->vlan_id == vlan_id) {
9824             mutex_unlock(&hdev->vport_lock);
9825             return;
9826         }
9827     }
9828 
9829     vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
9830     if (!vlan) {
9831         mutex_unlock(&hdev->vport_lock);
9832         return;
9833     }
9834 
9835     vlan->hd_tbl_status = writen_to_tbl;
9836     vlan->vlan_id = vlan_id;
9837 
9838     list_add_tail(&vlan->node, &vport->vlan_list);
9839     mutex_unlock(&hdev->vport_lock);
9840 }
9841 
9842 static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
9843 {
9844     struct hclge_vport_vlan_cfg *vlan, *tmp;
9845     struct hclge_dev *hdev = vport->back;
9846     int ret;
9847 
9848     mutex_lock(&hdev->vport_lock);
9849 
9850     list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9851         if (!vlan->hd_tbl_status) {
9852             ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9853                                vport->vport_id,
9854                                vlan->vlan_id, false);
9855             if (ret) {
9856                 dev_err(&hdev->pdev->dev,
9857                     "restore vport vlan list failed, ret=%d\n",
9858                     ret);
9859 
9860                 mutex_unlock(&hdev->vport_lock);
9861                 return ret;
9862             }
9863         }
9864         vlan->hd_tbl_status = true;
9865     }
9866 
9867     mutex_unlock(&hdev->vport_lock);
9868 
9869     return 0;
9870 }
9871 
9872 static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
9873                       bool is_write_tbl)
9874 {
9875     struct hclge_vport_vlan_cfg *vlan, *tmp;
9876     struct hclge_dev *hdev = vport->back;
9877 
9878     mutex_lock(&hdev->vport_lock);
9879 
9880     list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9881         if (vlan->vlan_id == vlan_id) {
9882             if (is_write_tbl && vlan->hd_tbl_status)
9883                 hclge_set_vlan_filter_hw(hdev,
9884                              htons(ETH_P_8021Q),
9885                              vport->vport_id,
9886                              vlan_id,
9887                              true);
9888 
9889             list_del(&vlan->node);
9890             kfree(vlan);
9891             break;
9892         }
9893     }
9894 
9895     mutex_unlock(&hdev->vport_lock);
9896 }
9897 
9898 void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
9899 {
9900     struct hclge_vport_vlan_cfg *vlan, *tmp;
9901     struct hclge_dev *hdev = vport->back;
9902 
9903     mutex_lock(&hdev->vport_lock);
9904 
9905     list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9906         if (vlan->hd_tbl_status)
9907             hclge_set_vlan_filter_hw(hdev,
9908                          htons(ETH_P_8021Q),
9909                          vport->vport_id,
9910                          vlan->vlan_id,
9911                          true);
9912 
9913         vlan->hd_tbl_status = false;
9914         if (is_del_list) {
9915             list_del(&vlan->node);
9916             kfree(vlan);
9917         }
9918     }
9919     clear_bit(vport->vport_id, hdev->vf_vlan_full);
9920     mutex_unlock(&hdev->vport_lock);
9921 }
9922 
9923 void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
9924 {
9925     struct hclge_vport_vlan_cfg *vlan, *tmp;
9926     struct hclge_vport *vport;
9927     int i;
9928 
9929     mutex_lock(&hdev->vport_lock);
9930 
9931     for (i = 0; i < hdev->num_alloc_vport; i++) {
9932         vport = &hdev->vport[i];
9933         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9934             list_del(&vlan->node);
9935             kfree(vlan);
9936         }
9937     }
9938 
9939     mutex_unlock(&hdev->vport_lock);
9940 }
9941 
9942 void hclge_restore_vport_port_base_vlan_config(struct hclge_dev *hdev)
9943 {
9944     struct hclge_vlan_info *vlan_info;
9945     struct hclge_vport *vport;
9946     u16 vlan_proto;
9947     u16 vlan_id;
9948     u16 state;
9949     int vf_id;
9950     int ret;
9951 
9952     /* PF should restore all vfs port base vlan */
9953     for (vf_id = 0; vf_id < hdev->num_alloc_vfs; vf_id++) {
9954         vport = &hdev->vport[vf_id + HCLGE_VF_VPORT_START_NUM];
9955         vlan_info = vport->port_base_vlan_cfg.tbl_sta ?
9956                 &vport->port_base_vlan_cfg.vlan_info :
9957                 &vport->port_base_vlan_cfg.old_vlan_info;
9958 
9959         vlan_id = vlan_info->vlan_tag;
9960         vlan_proto = vlan_info->vlan_proto;
9961         state = vport->port_base_vlan_cfg.state;
9962 
9963         if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
9964             clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
9965             ret = hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
9966                                vport->vport_id,
9967                                vlan_id, false);
9968             vport->port_base_vlan_cfg.tbl_sta = ret == 0;
9969         }
9970     }
9971 }
9972 
9973 void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
9974 {
9975     struct hclge_vport_vlan_cfg *vlan, *tmp;
9976     struct hclge_dev *hdev = vport->back;
9977     int ret;
9978 
9979     mutex_lock(&hdev->vport_lock);
9980 
9981     if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
9982         list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
9983             ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
9984                                vport->vport_id,
9985                                vlan->vlan_id, false);
9986             if (ret)
9987                 break;
9988             vlan->hd_tbl_status = true;
9989         }
9990     }
9991 
9992     mutex_unlock(&hdev->vport_lock);
9993 }
9994 
9995 /* For global reset and imp reset, hardware will clear the mac table,
9996  * so we change the mac address state from ACTIVE to TO_ADD, then they
9997  * can be restored in the service task after reset complete. Furtherly,
9998  * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
9999  * be restored after reset, so just remove these mac nodes from mac_list.
10000  */
10001 static void hclge_mac_node_convert_for_reset(struct list_head *list)
10002 {
10003     struct hclge_mac_node *mac_node, *tmp;
10004 
10005     list_for_each_entry_safe(mac_node, tmp, list, node) {
10006         if (mac_node->state == HCLGE_MAC_ACTIVE) {
10007             mac_node->state = HCLGE_MAC_TO_ADD;
10008         } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
10009             list_del(&mac_node->node);
10010             kfree(mac_node);
10011         }
10012     }
10013 }
10014 
10015 void hclge_restore_mac_table_common(struct hclge_vport *vport)
10016 {
10017     spin_lock_bh(&vport->mac_list_lock);
10018 
10019     hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
10020     hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
10021     set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
10022 
10023     spin_unlock_bh(&vport->mac_list_lock);
10024 }
10025 
10026 static void hclge_restore_hw_table(struct hclge_dev *hdev)
10027 {
10028     struct hclge_vport *vport = &hdev->vport[0];
10029     struct hnae3_handle *handle = &vport->nic;
10030 
10031     hclge_restore_mac_table_common(vport);
10032     hclge_restore_vport_port_base_vlan_config(hdev);
10033     hclge_restore_vport_vlan_table(vport);
10034     set_bit(HCLGE_STATE_FD_USER_DEF_CHANGED, &hdev->state);
10035     hclge_restore_fd_entries(handle);
10036 }
10037 
10038 int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
10039 {
10040     struct hclge_vport *vport = hclge_get_vport(handle);
10041 
10042     if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10043         vport->rxvlan_cfg.strip_tag1_en = false;
10044         vport->rxvlan_cfg.strip_tag2_en = enable;
10045         vport->rxvlan_cfg.strip_tag2_discard_en = false;
10046     } else {
10047         vport->rxvlan_cfg.strip_tag1_en = enable;
10048         vport->rxvlan_cfg.strip_tag2_en = true;
10049         vport->rxvlan_cfg.strip_tag2_discard_en = true;
10050     }
10051 
10052     vport->rxvlan_cfg.strip_tag1_discard_en = false;
10053     vport->rxvlan_cfg.vlan1_vlan_prionly = false;
10054     vport->rxvlan_cfg.vlan2_vlan_prionly = false;
10055     vport->rxvlan_cfg.rx_vlan_offload_en = enable;
10056 
10057     return hclge_set_vlan_rx_offload_cfg(vport);
10058 }
10059 
10060 static void hclge_set_vport_vlan_fltr_change(struct hclge_vport *vport)
10061 {
10062     struct hclge_dev *hdev = vport->back;
10063 
10064     if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps))
10065         set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state);
10066 }
10067 
10068 static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
10069                         u16 port_base_vlan_state,
10070                         struct hclge_vlan_info *new_info,
10071                         struct hclge_vlan_info *old_info)
10072 {
10073     struct hclge_dev *hdev = vport->back;
10074     int ret;
10075 
10076     if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
10077         hclge_rm_vport_all_vlan_table(vport, false);
10078         /* force clear VLAN 0 */
10079         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, true, 0);
10080         if (ret)
10081             return ret;
10082         return hclge_set_vlan_filter_hw(hdev,
10083                          htons(new_info->vlan_proto),
10084                          vport->vport_id,
10085                          new_info->vlan_tag,
10086                          false);
10087     }
10088 
10089     vport->port_base_vlan_cfg.tbl_sta = false;
10090 
10091     /* force add VLAN 0 */
10092     ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, false, 0);
10093     if (ret)
10094         return ret;
10095 
10096     ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
10097                        vport->vport_id, old_info->vlan_tag,
10098                        true);
10099     if (ret)
10100         return ret;
10101 
10102     return hclge_add_vport_all_vlan_table(vport);
10103 }
10104 
10105 static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
10106                       const struct hclge_vlan_info *old_cfg)
10107 {
10108     if (new_cfg->vlan_tag != old_cfg->vlan_tag)
10109         return true;
10110 
10111     if (new_cfg->vlan_tag == 0 && (new_cfg->qos == 0 || old_cfg->qos == 0))
10112         return true;
10113 
10114     return false;
10115 }
10116 
10117 static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
10118                        struct hclge_vlan_info *new_info,
10119                        struct hclge_vlan_info *old_info)
10120 {
10121     struct hclge_dev *hdev = vport->back;
10122     int ret;
10123 
10124     /* add new VLAN tag */
10125     ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
10126                        vport->vport_id, new_info->vlan_tag,
10127                        false);
10128     if (ret)
10129         return ret;
10130 
10131     vport->port_base_vlan_cfg.tbl_sta = false;
10132     /* remove old VLAN tag */
10133     if (old_info->vlan_tag == 0)
10134         ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
10135                            true, 0);
10136     else
10137         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10138                            vport->vport_id,
10139                            old_info->vlan_tag, true);
10140     if (ret)
10141         dev_err(&hdev->pdev->dev,
10142             "failed to clear vport%u port base vlan %u, ret = %d.\n",
10143             vport->vport_id, old_info->vlan_tag, ret);
10144 
10145     return ret;
10146 }
10147 
10148 int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
10149                     struct hclge_vlan_info *vlan_info)
10150 {
10151     struct hnae3_handle *nic = &vport->nic;
10152     struct hclge_vlan_info *old_vlan_info;
10153     int ret;
10154 
10155     old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10156 
10157     ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
10158                      vlan_info->qos);
10159     if (ret)
10160         return ret;
10161 
10162     if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
10163         goto out;
10164 
10165     if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
10166         ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
10167                               old_vlan_info);
10168     else
10169         ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
10170                                old_vlan_info);
10171     if (ret)
10172         return ret;
10173 
10174 out:
10175     vport->port_base_vlan_cfg.state = state;
10176     if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
10177         nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
10178     else
10179         nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
10180 
10181     vport->port_base_vlan_cfg.old_vlan_info = *old_vlan_info;
10182     vport->port_base_vlan_cfg.vlan_info = *vlan_info;
10183     vport->port_base_vlan_cfg.tbl_sta = true;
10184     hclge_set_vport_vlan_fltr_change(vport);
10185 
10186     return 0;
10187 }
10188 
10189 static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
10190                       enum hnae3_port_base_vlan_state state,
10191                       u16 vlan, u8 qos)
10192 {
10193     if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10194         if (!vlan && !qos)
10195             return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10196 
10197         return HNAE3_PORT_BASE_VLAN_ENABLE;
10198     }
10199 
10200     if (!vlan && !qos)
10201         return HNAE3_PORT_BASE_VLAN_DISABLE;
10202 
10203     if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan &&
10204         vport->port_base_vlan_cfg.vlan_info.qos == qos)
10205         return HNAE3_PORT_BASE_VLAN_NOCHANGE;
10206 
10207     return HNAE3_PORT_BASE_VLAN_MODIFY;
10208 }
10209 
10210 static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
10211                     u16 vlan, u8 qos, __be16 proto)
10212 {
10213     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
10214     struct hclge_vport *vport = hclge_get_vport(handle);
10215     struct hclge_dev *hdev = vport->back;
10216     struct hclge_vlan_info vlan_info;
10217     u16 state;
10218     int ret;
10219 
10220     if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
10221         return -EOPNOTSUPP;
10222 
10223     vport = hclge_get_vf_vport(hdev, vfid);
10224     if (!vport)
10225         return -EINVAL;
10226 
10227     /* qos is a 3 bits value, so can not be bigger than 7 */
10228     if (vlan > VLAN_N_VID - 1 || qos > 7)
10229         return -EINVAL;
10230     if (proto != htons(ETH_P_8021Q))
10231         return -EPROTONOSUPPORT;
10232 
10233     state = hclge_get_port_base_vlan_state(vport,
10234                            vport->port_base_vlan_cfg.state,
10235                            vlan, qos);
10236     if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
10237         return 0;
10238 
10239     vlan_info.vlan_tag = vlan;
10240     vlan_info.qos = qos;
10241     vlan_info.vlan_proto = ntohs(proto);
10242 
10243     ret = hclge_update_port_base_vlan_cfg(vport, state, &vlan_info);
10244     if (ret) {
10245         dev_err(&hdev->pdev->dev,
10246             "failed to update port base vlan for vf %d, ret = %d\n",
10247             vfid, ret);
10248         return ret;
10249     }
10250 
10251     /* there is a timewindow for PF to know VF unalive, it may
10252      * cause send mailbox fail, but it doesn't matter, VF will
10253      * query it when reinit.
10254      * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
10255      * VLAN state.
10256      */
10257     if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
10258         test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
10259         (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
10260                             vport->vport_id,
10261                             state, &vlan_info);
10262 
10263     return 0;
10264 }
10265 
10266 static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
10267 {
10268     struct hclge_vlan_info *vlan_info;
10269     struct hclge_vport *vport;
10270     int ret;
10271     int vf;
10272 
10273     /* clear port base vlan for all vf */
10274     for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
10275         vport = &hdev->vport[vf];
10276         vlan_info = &vport->port_base_vlan_cfg.vlan_info;
10277 
10278         ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10279                            vport->vport_id,
10280                            vlan_info->vlan_tag, true);
10281         if (ret)
10282             dev_err(&hdev->pdev->dev,
10283                 "failed to clear vf vlan for vf%d, ret = %d\n",
10284                 vf - HCLGE_VF_VPORT_START_NUM, ret);
10285     }
10286 }
10287 
10288 int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
10289               u16 vlan_id, bool is_kill)
10290 {
10291     struct hclge_vport *vport = hclge_get_vport(handle);
10292     struct hclge_dev *hdev = vport->back;
10293     bool writen_to_tbl = false;
10294     int ret = 0;
10295 
10296     /* When device is resetting or reset failed, firmware is unable to
10297      * handle mailbox. Just record the vlan id, and remove it after
10298      * reset finished.
10299      */
10300     if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10301          test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
10302         set_bit(vlan_id, vport->vlan_del_fail_bmap);
10303         return -EBUSY;
10304     }
10305 
10306     /* when port base vlan enabled, we use port base vlan as the vlan
10307      * filter entry. In this case, we don't update vlan filter table
10308      * when user add new vlan or remove exist vlan, just update the vport
10309      * vlan list. The vlan id in vlan list will be writen in vlan filter
10310      * table until port base vlan disabled
10311      */
10312     if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
10313         ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
10314                            vlan_id, is_kill);
10315         writen_to_tbl = true;
10316     }
10317 
10318     if (!ret) {
10319         if (!is_kill)
10320             hclge_add_vport_vlan_table(vport, vlan_id,
10321                            writen_to_tbl);
10322         else if (is_kill && vlan_id != 0)
10323             hclge_rm_vport_vlan_table(vport, vlan_id, false);
10324     } else if (is_kill) {
10325         /* when remove hw vlan filter failed, record the vlan id,
10326          * and try to remove it from hw later, to be consistence
10327          * with stack
10328          */
10329         set_bit(vlan_id, vport->vlan_del_fail_bmap);
10330     }
10331 
10332     hclge_set_vport_vlan_fltr_change(vport);
10333 
10334     return ret;
10335 }
10336 
10337 static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev)
10338 {
10339     struct hclge_vport *vport;
10340     int ret;
10341     u16 i;
10342 
10343     for (i = 0; i < hdev->num_alloc_vport; i++) {
10344         vport = &hdev->vport[i];
10345         if (!test_and_clear_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10346                     &vport->state))
10347             continue;
10348 
10349         ret = hclge_enable_vport_vlan_filter(vport,
10350                              vport->req_vlan_fltr_en);
10351         if (ret) {
10352             dev_err(&hdev->pdev->dev,
10353                 "failed to sync vlan filter state for vport%u, ret = %d\n",
10354                 vport->vport_id, ret);
10355             set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
10356                 &vport->state);
10357             return;
10358         }
10359     }
10360 }
10361 
10362 static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
10363 {
10364 #define HCLGE_MAX_SYNC_COUNT    60
10365 
10366     int i, ret, sync_cnt = 0;
10367     u16 vlan_id;
10368 
10369     /* start from vport 1 for PF is always alive */
10370     for (i = 0; i < hdev->num_alloc_vport; i++) {
10371         struct hclge_vport *vport = &hdev->vport[i];
10372 
10373         vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10374                      VLAN_N_VID);
10375         while (vlan_id != VLAN_N_VID) {
10376             ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
10377                                vport->vport_id, vlan_id,
10378                                true);
10379             if (ret && ret != -EINVAL)
10380                 return;
10381 
10382             clear_bit(vlan_id, vport->vlan_del_fail_bmap);
10383             hclge_rm_vport_vlan_table(vport, vlan_id, false);
10384             hclge_set_vport_vlan_fltr_change(vport);
10385 
10386             sync_cnt++;
10387             if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
10388                 return;
10389 
10390             vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
10391                          VLAN_N_VID);
10392         }
10393     }
10394 
10395     hclge_sync_vlan_fltr_state(hdev);
10396 }
10397 
10398 static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
10399 {
10400     struct hclge_config_max_frm_size_cmd *req;
10401     struct hclge_desc desc;
10402 
10403     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
10404 
10405     req = (struct hclge_config_max_frm_size_cmd *)desc.data;
10406     req->max_frm_size = cpu_to_le16(new_mps);
10407     req->min_frm_size = HCLGE_MAC_MIN_FRAME;
10408 
10409     return hclge_cmd_send(&hdev->hw, &desc, 1);
10410 }
10411 
10412 static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
10413 {
10414     struct hclge_vport *vport = hclge_get_vport(handle);
10415 
10416     return hclge_set_vport_mtu(vport, new_mtu);
10417 }
10418 
10419 int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
10420 {
10421     struct hclge_dev *hdev = vport->back;
10422     int i, max_frm_size, ret;
10423 
10424     /* HW supprt 2 layer vlan */
10425     max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
10426     if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
10427         max_frm_size > hdev->ae_dev->dev_specs.max_frm_size)
10428         return -EINVAL;
10429 
10430     max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
10431     mutex_lock(&hdev->vport_lock);
10432     /* VF's mps must fit within hdev->mps */
10433     if (vport->vport_id && max_frm_size > hdev->mps) {
10434         mutex_unlock(&hdev->vport_lock);
10435         return -EINVAL;
10436     } else if (vport->vport_id) {
10437         vport->mps = max_frm_size;
10438         mutex_unlock(&hdev->vport_lock);
10439         return 0;
10440     }
10441 
10442     /* PF's mps must be greater then VF's mps */
10443     for (i = 1; i < hdev->num_alloc_vport; i++)
10444         if (max_frm_size < hdev->vport[i].mps) {
10445             dev_err(&hdev->pdev->dev,
10446                 "failed to set pf mtu for less than vport %d, mps = %u.\n",
10447                 i, hdev->vport[i].mps);
10448             mutex_unlock(&hdev->vport_lock);
10449             return -EINVAL;
10450         }
10451 
10452     hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
10453 
10454     ret = hclge_set_mac_mtu(hdev, max_frm_size);
10455     if (ret) {
10456         dev_err(&hdev->pdev->dev,
10457             "Change mtu fail, ret =%d\n", ret);
10458         goto out;
10459     }
10460 
10461     hdev->mps = max_frm_size;
10462     vport->mps = max_frm_size;
10463 
10464     ret = hclge_buffer_alloc(hdev);
10465     if (ret)
10466         dev_err(&hdev->pdev->dev,
10467             "Allocate buffer fail, ret =%d\n", ret);
10468 
10469 out:
10470     hclge_notify_client(hdev, HNAE3_UP_CLIENT);
10471     mutex_unlock(&hdev->vport_lock);
10472     return ret;
10473 }
10474 
10475 static int hclge_reset_tqp_cmd_send(struct hclge_dev *hdev, u16 queue_id,
10476                     bool enable)
10477 {
10478     struct hclge_reset_tqp_queue_cmd *req;
10479     struct hclge_desc desc;
10480     int ret;
10481 
10482     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
10483 
10484     req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10485     req->tqp_id = cpu_to_le16(queue_id);
10486     if (enable)
10487         hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
10488 
10489     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10490     if (ret) {
10491         dev_err(&hdev->pdev->dev,
10492             "Send tqp reset cmd error, status =%d\n", ret);
10493         return ret;
10494     }
10495 
10496     return 0;
10497 }
10498 
10499 static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id,
10500                   u8 *reset_status)
10501 {
10502     struct hclge_reset_tqp_queue_cmd *req;
10503     struct hclge_desc desc;
10504     int ret;
10505 
10506     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
10507 
10508     req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
10509     req->tqp_id = cpu_to_le16(queue_id);
10510 
10511     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10512     if (ret) {
10513         dev_err(&hdev->pdev->dev,
10514             "Get reset status error, status =%d\n", ret);
10515         return ret;
10516     }
10517 
10518     *reset_status = hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
10519 
10520     return 0;
10521 }
10522 
10523 u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
10524 {
10525     struct hclge_comm_tqp *tqp;
10526     struct hnae3_queue *queue;
10527 
10528     queue = handle->kinfo.tqp[queue_id];
10529     tqp = container_of(queue, struct hclge_comm_tqp, q);
10530 
10531     return tqp->index;
10532 }
10533 
10534 static int hclge_reset_tqp_cmd(struct hnae3_handle *handle)
10535 {
10536     struct hclge_vport *vport = hclge_get_vport(handle);
10537     struct hclge_dev *hdev = vport->back;
10538     u16 reset_try_times = 0;
10539     u8 reset_status;
10540     u16 queue_gid;
10541     int ret;
10542     u16 i;
10543 
10544     for (i = 0; i < handle->kinfo.num_tqps; i++) {
10545         queue_gid = hclge_covert_handle_qid_global(handle, i);
10546         ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, true);
10547         if (ret) {
10548             dev_err(&hdev->pdev->dev,
10549                 "failed to send reset tqp cmd, ret = %d\n",
10550                 ret);
10551             return ret;
10552         }
10553 
10554         while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
10555             ret = hclge_get_reset_status(hdev, queue_gid,
10556                              &reset_status);
10557             if (ret)
10558                 return ret;
10559 
10560             if (reset_status)
10561                 break;
10562 
10563             /* Wait for tqp hw reset */
10564             usleep_range(1000, 1200);
10565         }
10566 
10567         if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
10568             dev_err(&hdev->pdev->dev,
10569                 "wait for tqp hw reset timeout\n");
10570             return -ETIME;
10571         }
10572 
10573         ret = hclge_reset_tqp_cmd_send(hdev, queue_gid, false);
10574         if (ret) {
10575             dev_err(&hdev->pdev->dev,
10576                 "failed to deassert soft reset, ret = %d\n",
10577                 ret);
10578             return ret;
10579         }
10580         reset_try_times = 0;
10581     }
10582     return 0;
10583 }
10584 
10585 static int hclge_reset_rcb(struct hnae3_handle *handle)
10586 {
10587 #define HCLGE_RESET_RCB_NOT_SUPPORT 0U
10588 #define HCLGE_RESET_RCB_SUCCESS     1U
10589 
10590     struct hclge_vport *vport = hclge_get_vport(handle);
10591     struct hclge_dev *hdev = vport->back;
10592     struct hclge_reset_cmd *req;
10593     struct hclge_desc desc;
10594     u8 return_status;
10595     u16 queue_gid;
10596     int ret;
10597 
10598     queue_gid = hclge_covert_handle_qid_global(handle, 0);
10599 
10600     req = (struct hclge_reset_cmd *)desc.data;
10601     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
10602     hnae3_set_bit(req->fun_reset_rcb, HCLGE_CFG_RESET_RCB_B, 1);
10603     req->fun_reset_rcb_vqid_start = cpu_to_le16(queue_gid);
10604     req->fun_reset_rcb_vqid_num = cpu_to_le16(handle->kinfo.num_tqps);
10605 
10606     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
10607     if (ret) {
10608         dev_err(&hdev->pdev->dev,
10609             "failed to send rcb reset cmd, ret = %d\n", ret);
10610         return ret;
10611     }
10612 
10613     return_status = req->fun_reset_rcb_return_status;
10614     if (return_status == HCLGE_RESET_RCB_SUCCESS)
10615         return 0;
10616 
10617     if (return_status != HCLGE_RESET_RCB_NOT_SUPPORT) {
10618         dev_err(&hdev->pdev->dev, "failed to reset rcb, ret = %u\n",
10619             return_status);
10620         return -EIO;
10621     }
10622 
10623     /* if reset rcb cmd is unsupported, we need to send reset tqp cmd
10624      * again to reset all tqps
10625      */
10626     return hclge_reset_tqp_cmd(handle);
10627 }
10628 
10629 int hclge_reset_tqp(struct hnae3_handle *handle)
10630 {
10631     struct hclge_vport *vport = hclge_get_vport(handle);
10632     struct hclge_dev *hdev = vport->back;
10633     int ret;
10634 
10635     /* only need to disable PF's tqp */
10636     if (!vport->vport_id) {
10637         ret = hclge_tqp_enable(handle, false);
10638         if (ret) {
10639             dev_err(&hdev->pdev->dev,
10640                 "failed to disable tqp, ret = %d\n", ret);
10641             return ret;
10642         }
10643     }
10644 
10645     return hclge_reset_rcb(handle);
10646 }
10647 
10648 static u32 hclge_get_fw_version(struct hnae3_handle *handle)
10649 {
10650     struct hclge_vport *vport = hclge_get_vport(handle);
10651     struct hclge_dev *hdev = vport->back;
10652 
10653     return hdev->fw_version;
10654 }
10655 
10656 static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10657 {
10658     struct phy_device *phydev = hdev->hw.mac.phydev;
10659 
10660     if (!phydev)
10661         return;
10662 
10663     phy_set_asym_pause(phydev, rx_en, tx_en);
10664 }
10665 
10666 static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
10667 {
10668     int ret;
10669 
10670     if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
10671         return 0;
10672 
10673     ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
10674     if (ret)
10675         dev_err(&hdev->pdev->dev,
10676             "configure pauseparam error, ret = %d.\n", ret);
10677 
10678     return ret;
10679 }
10680 
10681 int hclge_cfg_flowctrl(struct hclge_dev *hdev)
10682 {
10683     struct phy_device *phydev = hdev->hw.mac.phydev;
10684     u16 remote_advertising = 0;
10685     u16 local_advertising;
10686     u32 rx_pause, tx_pause;
10687     u8 flowctl;
10688 
10689     if (!phydev->link || !phydev->autoneg)
10690         return 0;
10691 
10692     local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
10693 
10694     if (phydev->pause)
10695         remote_advertising = LPA_PAUSE_CAP;
10696 
10697     if (phydev->asym_pause)
10698         remote_advertising |= LPA_PAUSE_ASYM;
10699 
10700     flowctl = mii_resolve_flowctrl_fdx(local_advertising,
10701                        remote_advertising);
10702     tx_pause = flowctl & FLOW_CTRL_TX;
10703     rx_pause = flowctl & FLOW_CTRL_RX;
10704 
10705     if (phydev->duplex == HCLGE_MAC_HALF) {
10706         tx_pause = 0;
10707         rx_pause = 0;
10708     }
10709 
10710     return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
10711 }
10712 
10713 static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
10714                  u32 *rx_en, u32 *tx_en)
10715 {
10716     struct hclge_vport *vport = hclge_get_vport(handle);
10717     struct hclge_dev *hdev = vport->back;
10718     u8 media_type = hdev->hw.mac.media_type;
10719 
10720     *auto_neg = (media_type == HNAE3_MEDIA_TYPE_COPPER) ?
10721             hclge_get_autoneg(handle) : 0;
10722 
10723     if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10724         *rx_en = 0;
10725         *tx_en = 0;
10726         return;
10727     }
10728 
10729     if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
10730         *rx_en = 1;
10731         *tx_en = 0;
10732     } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
10733         *tx_en = 1;
10734         *rx_en = 0;
10735     } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
10736         *rx_en = 1;
10737         *tx_en = 1;
10738     } else {
10739         *rx_en = 0;
10740         *tx_en = 0;
10741     }
10742 }
10743 
10744 static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
10745                      u32 rx_en, u32 tx_en)
10746 {
10747     if (rx_en && tx_en)
10748         hdev->fc_mode_last_time = HCLGE_FC_FULL;
10749     else if (rx_en && !tx_en)
10750         hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
10751     else if (!rx_en && tx_en)
10752         hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
10753     else
10754         hdev->fc_mode_last_time = HCLGE_FC_NONE;
10755 
10756     hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
10757 }
10758 
10759 static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
10760                 u32 rx_en, u32 tx_en)
10761 {
10762     struct hclge_vport *vport = hclge_get_vport(handle);
10763     struct hclge_dev *hdev = vport->back;
10764     struct phy_device *phydev = hdev->hw.mac.phydev;
10765     u32 fc_autoneg;
10766 
10767     if (phydev || hnae3_dev_phy_imp_supported(hdev)) {
10768         fc_autoneg = hclge_get_autoneg(handle);
10769         if (auto_neg != fc_autoneg) {
10770             dev_info(&hdev->pdev->dev,
10771                  "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
10772             return -EOPNOTSUPP;
10773         }
10774     }
10775 
10776     if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
10777         dev_info(&hdev->pdev->dev,
10778              "Priority flow control enabled. Cannot set link flow control.\n");
10779         return -EOPNOTSUPP;
10780     }
10781 
10782     hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
10783 
10784     hclge_record_user_pauseparam(hdev, rx_en, tx_en);
10785 
10786     if (!auto_neg || hnae3_dev_phy_imp_supported(hdev))
10787         return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
10788 
10789     if (phydev)
10790         return phy_start_aneg(phydev);
10791 
10792     return -EOPNOTSUPP;
10793 }
10794 
10795 static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
10796                       u8 *auto_neg, u32 *speed, u8 *duplex)
10797 {
10798     struct hclge_vport *vport = hclge_get_vport(handle);
10799     struct hclge_dev *hdev = vport->back;
10800 
10801     if (speed)
10802         *speed = hdev->hw.mac.speed;
10803     if (duplex)
10804         *duplex = hdev->hw.mac.duplex;
10805     if (auto_neg)
10806         *auto_neg = hdev->hw.mac.autoneg;
10807 }
10808 
10809 static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
10810                  u8 *module_type)
10811 {
10812     struct hclge_vport *vport = hclge_get_vport(handle);
10813     struct hclge_dev *hdev = vport->back;
10814 
10815     /* When nic is down, the service task is not running, doesn't update
10816      * the port information per second. Query the port information before
10817      * return the media type, ensure getting the correct media information.
10818      */
10819     hclge_update_port_info(hdev);
10820 
10821     if (media_type)
10822         *media_type = hdev->hw.mac.media_type;
10823 
10824     if (module_type)
10825         *module_type = hdev->hw.mac.module_type;
10826 }
10827 
10828 static void hclge_get_mdix_mode(struct hnae3_handle *handle,
10829                 u8 *tp_mdix_ctrl, u8 *tp_mdix)
10830 {
10831     struct hclge_vport *vport = hclge_get_vport(handle);
10832     struct hclge_dev *hdev = vport->back;
10833     struct phy_device *phydev = hdev->hw.mac.phydev;
10834     int mdix_ctrl, mdix, is_resolved;
10835     unsigned int retval;
10836 
10837     if (!phydev) {
10838         *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10839         *tp_mdix = ETH_TP_MDI_INVALID;
10840         return;
10841     }
10842 
10843     phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
10844 
10845     retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
10846     mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
10847                     HCLGE_PHY_MDIX_CTRL_S);
10848 
10849     retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
10850     mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
10851     is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
10852 
10853     phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
10854 
10855     switch (mdix_ctrl) {
10856     case 0x0:
10857         *tp_mdix_ctrl = ETH_TP_MDI;
10858         break;
10859     case 0x1:
10860         *tp_mdix_ctrl = ETH_TP_MDI_X;
10861         break;
10862     case 0x3:
10863         *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
10864         break;
10865     default:
10866         *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
10867         break;
10868     }
10869 
10870     if (!is_resolved)
10871         *tp_mdix = ETH_TP_MDI_INVALID;
10872     else if (mdix)
10873         *tp_mdix = ETH_TP_MDI_X;
10874     else
10875         *tp_mdix = ETH_TP_MDI;
10876 }
10877 
10878 static void hclge_info_show(struct hclge_dev *hdev)
10879 {
10880     struct device *dev = &hdev->pdev->dev;
10881 
10882     dev_info(dev, "PF info begin:\n");
10883 
10884     dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
10885     dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
10886     dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
10887     dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
10888     dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
10889     dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
10890     dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
10891     dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
10892     dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
10893     dev_info(dev, "This is %s PF\n",
10894          hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
10895     dev_info(dev, "DCB %s\n",
10896          hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
10897     dev_info(dev, "MQPRIO %s\n",
10898          hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
10899     dev_info(dev, "Default tx spare buffer size: %u\n",
10900          hdev->tx_spare_buf_size);
10901 
10902     dev_info(dev, "PF info end.\n");
10903 }
10904 
10905 static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
10906                       struct hclge_vport *vport)
10907 {
10908     struct hnae3_client *client = vport->nic.client;
10909     struct hclge_dev *hdev = ae_dev->priv;
10910     int rst_cnt = hdev->rst_stats.reset_cnt;
10911     int ret;
10912 
10913     ret = client->ops->init_instance(&vport->nic);
10914     if (ret)
10915         return ret;
10916 
10917     set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10918     if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10919         rst_cnt != hdev->rst_stats.reset_cnt) {
10920         ret = -EBUSY;
10921         goto init_nic_err;
10922     }
10923 
10924     /* Enable nic hw error interrupts */
10925     ret = hclge_config_nic_hw_error(hdev, true);
10926     if (ret) {
10927         dev_err(&ae_dev->pdev->dev,
10928             "fail(%d) to enable hw error interrupts\n", ret);
10929         goto init_nic_err;
10930     }
10931 
10932     hnae3_set_client_init_flag(client, ae_dev, 1);
10933 
10934     if (netif_msg_drv(&hdev->vport->nic))
10935         hclge_info_show(hdev);
10936 
10937     return ret;
10938 
10939 init_nic_err:
10940     clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
10941     while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10942         msleep(HCLGE_WAIT_RESET_DONE);
10943 
10944     client->ops->uninit_instance(&vport->nic, 0);
10945 
10946     return ret;
10947 }
10948 
10949 static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
10950                        struct hclge_vport *vport)
10951 {
10952     struct hclge_dev *hdev = ae_dev->priv;
10953     struct hnae3_client *client;
10954     int rst_cnt;
10955     int ret;
10956 
10957     if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
10958         !hdev->nic_client)
10959         return 0;
10960 
10961     client = hdev->roce_client;
10962     ret = hclge_init_roce_base_info(vport);
10963     if (ret)
10964         return ret;
10965 
10966     rst_cnt = hdev->rst_stats.reset_cnt;
10967     ret = client->ops->init_instance(&vport->roce);
10968     if (ret)
10969         return ret;
10970 
10971     set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10972     if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
10973         rst_cnt != hdev->rst_stats.reset_cnt) {
10974         ret = -EBUSY;
10975         goto init_roce_err;
10976     }
10977 
10978     /* Enable roce ras interrupts */
10979     ret = hclge_config_rocee_ras_interrupt(hdev, true);
10980     if (ret) {
10981         dev_err(&ae_dev->pdev->dev,
10982             "fail(%d) to enable roce ras interrupts\n", ret);
10983         goto init_roce_err;
10984     }
10985 
10986     hnae3_set_client_init_flag(client, ae_dev, 1);
10987 
10988     return 0;
10989 
10990 init_roce_err:
10991     clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
10992     while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
10993         msleep(HCLGE_WAIT_RESET_DONE);
10994 
10995     hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
10996 
10997     return ret;
10998 }
10999 
11000 static int hclge_init_client_instance(struct hnae3_client *client,
11001                       struct hnae3_ae_dev *ae_dev)
11002 {
11003     struct hclge_dev *hdev = ae_dev->priv;
11004     struct hclge_vport *vport = &hdev->vport[0];
11005     int ret;
11006 
11007     switch (client->type) {
11008     case HNAE3_CLIENT_KNIC:
11009         hdev->nic_client = client;
11010         vport->nic.client = client;
11011         ret = hclge_init_nic_client_instance(ae_dev, vport);
11012         if (ret)
11013             goto clear_nic;
11014 
11015         ret = hclge_init_roce_client_instance(ae_dev, vport);
11016         if (ret)
11017             goto clear_roce;
11018 
11019         break;
11020     case HNAE3_CLIENT_ROCE:
11021         if (hnae3_dev_roce_supported(hdev)) {
11022             hdev->roce_client = client;
11023             vport->roce.client = client;
11024         }
11025 
11026         ret = hclge_init_roce_client_instance(ae_dev, vport);
11027         if (ret)
11028             goto clear_roce;
11029 
11030         break;
11031     default:
11032         return -EINVAL;
11033     }
11034 
11035     return 0;
11036 
11037 clear_nic:
11038     hdev->nic_client = NULL;
11039     vport->nic.client = NULL;
11040     return ret;
11041 clear_roce:
11042     hdev->roce_client = NULL;
11043     vport->roce.client = NULL;
11044     return ret;
11045 }
11046 
11047 static void hclge_uninit_client_instance(struct hnae3_client *client,
11048                      struct hnae3_ae_dev *ae_dev)
11049 {
11050     struct hclge_dev *hdev = ae_dev->priv;
11051     struct hclge_vport *vport = &hdev->vport[0];
11052 
11053     if (hdev->roce_client) {
11054         clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
11055         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11056             msleep(HCLGE_WAIT_RESET_DONE);
11057 
11058         hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
11059         hdev->roce_client = NULL;
11060         vport->roce.client = NULL;
11061     }
11062     if (client->type == HNAE3_CLIENT_ROCE)
11063         return;
11064     if (hdev->nic_client && client->ops->uninit_instance) {
11065         clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
11066         while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
11067             msleep(HCLGE_WAIT_RESET_DONE);
11068 
11069         client->ops->uninit_instance(&vport->nic, 0);
11070         hdev->nic_client = NULL;
11071         vport->nic.client = NULL;
11072     }
11073 }
11074 
11075 static int hclge_dev_mem_map(struct hclge_dev *hdev)
11076 {
11077     struct pci_dev *pdev = hdev->pdev;
11078     struct hclge_hw *hw = &hdev->hw;
11079 
11080     /* for device does not have device memory, return directly */
11081     if (!(pci_select_bars(pdev, IORESOURCE_MEM) & BIT(HCLGE_MEM_BAR)))
11082         return 0;
11083 
11084     hw->hw.mem_base =
11085         devm_ioremap_wc(&pdev->dev,
11086                 pci_resource_start(pdev, HCLGE_MEM_BAR),
11087                 pci_resource_len(pdev, HCLGE_MEM_BAR));
11088     if (!hw->hw.mem_base) {
11089         dev_err(&pdev->dev, "failed to map device memory\n");
11090         return -EFAULT;
11091     }
11092 
11093     return 0;
11094 }
11095 
11096 static int hclge_pci_init(struct hclge_dev *hdev)
11097 {
11098     struct pci_dev *pdev = hdev->pdev;
11099     struct hclge_hw *hw;
11100     int ret;
11101 
11102     ret = pci_enable_device(pdev);
11103     if (ret) {
11104         dev_err(&pdev->dev, "failed to enable PCI device\n");
11105         return ret;
11106     }
11107 
11108     ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
11109     if (ret) {
11110         ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
11111         if (ret) {
11112             dev_err(&pdev->dev,
11113                 "can't set consistent PCI DMA");
11114             goto err_disable_device;
11115         }
11116         dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
11117     }
11118 
11119     ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
11120     if (ret) {
11121         dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
11122         goto err_disable_device;
11123     }
11124 
11125     pci_set_master(pdev);
11126     hw = &hdev->hw;
11127     hw->hw.io_base = pcim_iomap(pdev, 2, 0);
11128     if (!hw->hw.io_base) {
11129         dev_err(&pdev->dev, "Can't map configuration register space\n");
11130         ret = -ENOMEM;
11131         goto err_clr_master;
11132     }
11133 
11134     ret = hclge_dev_mem_map(hdev);
11135     if (ret)
11136         goto err_unmap_io_base;
11137 
11138     hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
11139 
11140     return 0;
11141 
11142 err_unmap_io_base:
11143     pcim_iounmap(pdev, hdev->hw.hw.io_base);
11144 err_clr_master:
11145     pci_clear_master(pdev);
11146     pci_release_regions(pdev);
11147 err_disable_device:
11148     pci_disable_device(pdev);
11149 
11150     return ret;
11151 }
11152 
11153 static void hclge_pci_uninit(struct hclge_dev *hdev)
11154 {
11155     struct pci_dev *pdev = hdev->pdev;
11156 
11157     if (hdev->hw.hw.mem_base)
11158         devm_iounmap(&pdev->dev, hdev->hw.hw.mem_base);
11159 
11160     pcim_iounmap(pdev, hdev->hw.hw.io_base);
11161     pci_free_irq_vectors(pdev);
11162     pci_clear_master(pdev);
11163     pci_release_mem_regions(pdev);
11164     pci_disable_device(pdev);
11165 }
11166 
11167 static void hclge_state_init(struct hclge_dev *hdev)
11168 {
11169     set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
11170     set_bit(HCLGE_STATE_DOWN, &hdev->state);
11171     clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
11172     clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11173     clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
11174     clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
11175     clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
11176 }
11177 
11178 static void hclge_state_uninit(struct hclge_dev *hdev)
11179 {
11180     set_bit(HCLGE_STATE_DOWN, &hdev->state);
11181     set_bit(HCLGE_STATE_REMOVING, &hdev->state);
11182 
11183     if (hdev->reset_timer.function)
11184         del_timer_sync(&hdev->reset_timer);
11185     if (hdev->service_task.work.func)
11186         cancel_delayed_work_sync(&hdev->service_task);
11187 }
11188 
11189 static void hclge_reset_prepare_general(struct hnae3_ae_dev *ae_dev,
11190                     enum hnae3_reset_type rst_type)
11191 {
11192 #define HCLGE_RESET_RETRY_WAIT_MS   500
11193 #define HCLGE_RESET_RETRY_CNT   5
11194 
11195     struct hclge_dev *hdev = ae_dev->priv;
11196     int retry_cnt = 0;
11197     int ret;
11198 
11199     while (retry_cnt++ < HCLGE_RESET_RETRY_CNT) {
11200         down(&hdev->reset_sem);
11201         set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11202         hdev->reset_type = rst_type;
11203         ret = hclge_reset_prepare(hdev);
11204         if (!ret && !hdev->reset_pending)
11205             break;
11206 
11207         dev_err(&hdev->pdev->dev,
11208             "failed to prepare to reset, ret=%d, reset_pending:0x%lx, retry_cnt:%d\n",
11209             ret, hdev->reset_pending, retry_cnt);
11210         clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11211         up(&hdev->reset_sem);
11212         msleep(HCLGE_RESET_RETRY_WAIT_MS);
11213     }
11214 
11215     /* disable misc vector before reset done */
11216     hclge_enable_vector(&hdev->misc_vector, false);
11217     set_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state);
11218 
11219     if (hdev->reset_type == HNAE3_FLR_RESET)
11220         hdev->rst_stats.flr_rst_cnt++;
11221 }
11222 
11223 static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
11224 {
11225     struct hclge_dev *hdev = ae_dev->priv;
11226     int ret;
11227 
11228     hclge_enable_vector(&hdev->misc_vector, true);
11229 
11230     ret = hclge_reset_rebuild(hdev);
11231     if (ret)
11232         dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
11233 
11234     hdev->reset_type = HNAE3_NONE_RESET;
11235     clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
11236     up(&hdev->reset_sem);
11237 }
11238 
11239 static void hclge_clear_resetting_state(struct hclge_dev *hdev)
11240 {
11241     u16 i;
11242 
11243     for (i = 0; i < hdev->num_alloc_vport; i++) {
11244         struct hclge_vport *vport = &hdev->vport[i];
11245         int ret;
11246 
11247          /* Send cmd to clear vport's FUNC_RST_ING */
11248         ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
11249         if (ret)
11250             dev_warn(&hdev->pdev->dev,
11251                  "clear vport(%u) rst failed %d!\n",
11252                  vport->vport_id, ret);
11253     }
11254 }
11255 
11256 static int hclge_clear_hw_resource(struct hclge_dev *hdev)
11257 {
11258     struct hclge_desc desc;
11259     int ret;
11260 
11261     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
11262 
11263     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
11264     /* This new command is only supported by new firmware, it will
11265      * fail with older firmware. Error value -EOPNOSUPP can only be
11266      * returned by older firmware running this command, to keep code
11267      * backward compatible we will override this value and return
11268      * success.
11269      */
11270     if (ret && ret != -EOPNOTSUPP) {
11271         dev_err(&hdev->pdev->dev,
11272             "failed to clear hw resource, ret = %d\n", ret);
11273         return ret;
11274     }
11275     return 0;
11276 }
11277 
11278 static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev)
11279 {
11280     if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11281         hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 1);
11282 }
11283 
11284 static void hclge_uninit_rxd_adv_layout(struct hclge_dev *hdev)
11285 {
11286     if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev))
11287         hclge_write_dev(&hdev->hw, HCLGE_RXD_ADV_LAYOUT_EN_REG, 0);
11288 }
11289 
11290 static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
11291 {
11292     struct pci_dev *pdev = ae_dev->pdev;
11293     struct hclge_dev *hdev;
11294     int ret;
11295 
11296     hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
11297     if (!hdev)
11298         return -ENOMEM;
11299 
11300     hdev->pdev = pdev;
11301     hdev->ae_dev = ae_dev;
11302     hdev->reset_type = HNAE3_NONE_RESET;
11303     hdev->reset_level = HNAE3_FUNC_RESET;
11304     ae_dev->priv = hdev;
11305 
11306     /* HW supprt 2 layer vlan */
11307     hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
11308 
11309     mutex_init(&hdev->vport_lock);
11310     spin_lock_init(&hdev->fd_rule_lock);
11311     sema_init(&hdev->reset_sem, 1);
11312 
11313     ret = hclge_pci_init(hdev);
11314     if (ret)
11315         goto out;
11316 
11317     ret = hclge_devlink_init(hdev);
11318     if (ret)
11319         goto err_pci_uninit;
11320 
11321     /* Firmware command queue initialize */
11322     ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
11323     if (ret)
11324         goto err_devlink_uninit;
11325 
11326     /* Firmware command initialize */
11327     ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
11328                   true, hdev->reset_pending);
11329     if (ret)
11330         goto err_cmd_uninit;
11331 
11332     ret  = hclge_clear_hw_resource(hdev);
11333     if (ret)
11334         goto err_cmd_uninit;
11335 
11336     ret = hclge_get_cap(hdev);
11337     if (ret)
11338         goto err_cmd_uninit;
11339 
11340     ret = hclge_query_dev_specs(hdev);
11341     if (ret) {
11342         dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
11343             ret);
11344         goto err_cmd_uninit;
11345     }
11346 
11347     ret = hclge_configure(hdev);
11348     if (ret) {
11349         dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
11350         goto err_cmd_uninit;
11351     }
11352 
11353     ret = hclge_init_msi(hdev);
11354     if (ret) {
11355         dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
11356         goto err_cmd_uninit;
11357     }
11358 
11359     ret = hclge_misc_irq_init(hdev);
11360     if (ret)
11361         goto err_msi_uninit;
11362 
11363     ret = hclge_alloc_tqps(hdev);
11364     if (ret) {
11365         dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
11366         goto err_msi_irq_uninit;
11367     }
11368 
11369     ret = hclge_alloc_vport(hdev);
11370     if (ret)
11371         goto err_msi_irq_uninit;
11372 
11373     ret = hclge_map_tqp(hdev);
11374     if (ret)
11375         goto err_msi_irq_uninit;
11376 
11377     if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER &&
11378         !hnae3_dev_phy_imp_supported(hdev)) {
11379         ret = hclge_mac_mdio_config(hdev);
11380         if (ret)
11381             goto err_msi_irq_uninit;
11382     }
11383 
11384     ret = hclge_init_umv_space(hdev);
11385     if (ret)
11386         goto err_mdiobus_unreg;
11387 
11388     ret = hclge_mac_init(hdev);
11389     if (ret) {
11390         dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11391         goto err_mdiobus_unreg;
11392     }
11393 
11394     ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11395     if (ret) {
11396         dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11397         goto err_mdiobus_unreg;
11398     }
11399 
11400     ret = hclge_config_gro(hdev);
11401     if (ret)
11402         goto err_mdiobus_unreg;
11403 
11404     ret = hclge_init_vlan_config(hdev);
11405     if (ret) {
11406         dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11407         goto err_mdiobus_unreg;
11408     }
11409 
11410     ret = hclge_tm_schd_init(hdev);
11411     if (ret) {
11412         dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
11413         goto err_mdiobus_unreg;
11414     }
11415 
11416     ret = hclge_comm_rss_init_cfg(&hdev->vport->nic, hdev->ae_dev,
11417                       &hdev->rss_cfg);
11418     if (ret) {
11419         dev_err(&pdev->dev, "failed to init rss cfg, ret = %d\n", ret);
11420         goto err_mdiobus_unreg;
11421     }
11422 
11423     ret = hclge_rss_init_hw(hdev);
11424     if (ret) {
11425         dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11426         goto err_mdiobus_unreg;
11427     }
11428 
11429     ret = init_mgr_tbl(hdev);
11430     if (ret) {
11431         dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
11432         goto err_mdiobus_unreg;
11433     }
11434 
11435     ret = hclge_init_fd_config(hdev);
11436     if (ret) {
11437         dev_err(&pdev->dev,
11438             "fd table init fail, ret=%d\n", ret);
11439         goto err_mdiobus_unreg;
11440     }
11441 
11442     ret = hclge_ptp_init(hdev);
11443     if (ret)
11444         goto err_mdiobus_unreg;
11445 
11446     INIT_KFIFO(hdev->mac_tnl_log);
11447 
11448     hclge_dcb_ops_set(hdev);
11449 
11450     timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
11451     INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
11452 
11453     hclge_clear_all_event_cause(hdev);
11454     hclge_clear_resetting_state(hdev);
11455 
11456     /* Log and clear the hw errors those already occurred */
11457     if (hnae3_dev_ras_imp_supported(hdev))
11458         hclge_handle_occurred_error(hdev);
11459     else
11460         hclge_handle_all_hns_hw_errors(ae_dev);
11461 
11462     /* request delayed reset for the error recovery because an immediate
11463      * global reset on a PF affecting pending initialization of other PFs
11464      */
11465     if (ae_dev->hw_err_reset_req) {
11466         enum hnae3_reset_type reset_level;
11467 
11468         reset_level = hclge_get_reset_level(ae_dev,
11469                             &ae_dev->hw_err_reset_req);
11470         hclge_set_def_reset_request(ae_dev, reset_level);
11471         mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
11472     }
11473 
11474     hclge_init_rxd_adv_layout(hdev);
11475 
11476     /* Enable MISC vector(vector0) */
11477     hclge_enable_vector(&hdev->misc_vector, true);
11478 
11479     hclge_state_init(hdev);
11480     hdev->last_reset_time = jiffies;
11481 
11482     dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
11483          HCLGE_DRIVER_NAME);
11484 
11485     hclge_task_schedule(hdev, round_jiffies_relative(HZ));
11486 
11487     return 0;
11488 
11489 err_mdiobus_unreg:
11490     if (hdev->hw.mac.phydev)
11491         mdiobus_unregister(hdev->hw.mac.mdio_bus);
11492 err_msi_irq_uninit:
11493     hclge_misc_irq_uninit(hdev);
11494 err_msi_uninit:
11495     pci_free_irq_vectors(pdev);
11496 err_cmd_uninit:
11497     hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
11498 err_devlink_uninit:
11499     hclge_devlink_uninit(hdev);
11500 err_pci_uninit:
11501     pcim_iounmap(pdev, hdev->hw.hw.io_base);
11502     pci_clear_master(pdev);
11503     pci_release_regions(pdev);
11504     pci_disable_device(pdev);
11505 out:
11506     mutex_destroy(&hdev->vport_lock);
11507     return ret;
11508 }
11509 
11510 static void hclge_stats_clear(struct hclge_dev *hdev)
11511 {
11512     memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
11513 }
11514 
11515 static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11516 {
11517     return hclge_config_switch_param(hdev, vf, enable,
11518                      HCLGE_SWITCH_ANTI_SPOOF_MASK);
11519 }
11520 
11521 static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
11522 {
11523     return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
11524                       HCLGE_FILTER_FE_NIC_INGRESS_B,
11525                       enable, vf);
11526 }
11527 
11528 static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
11529 {
11530     int ret;
11531 
11532     ret = hclge_set_mac_spoofchk(hdev, vf, enable);
11533     if (ret) {
11534         dev_err(&hdev->pdev->dev,
11535             "Set vf %d mac spoof check %s failed, ret=%d\n",
11536             vf, enable ? "on" : "off", ret);
11537         return ret;
11538     }
11539 
11540     ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
11541     if (ret)
11542         dev_err(&hdev->pdev->dev,
11543             "Set vf %d vlan spoof check %s failed, ret=%d\n",
11544             vf, enable ? "on" : "off", ret);
11545 
11546     return ret;
11547 }
11548 
11549 static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
11550                  bool enable)
11551 {
11552     struct hclge_vport *vport = hclge_get_vport(handle);
11553     struct hclge_dev *hdev = vport->back;
11554     u32 new_spoofchk = enable ? 1 : 0;
11555     int ret;
11556 
11557     if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11558         return -EOPNOTSUPP;
11559 
11560     vport = hclge_get_vf_vport(hdev, vf);
11561     if (!vport)
11562         return -EINVAL;
11563 
11564     if (vport->vf_info.spoofchk == new_spoofchk)
11565         return 0;
11566 
11567     if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
11568         dev_warn(&hdev->pdev->dev,
11569              "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
11570              vf);
11571     else if (enable && hclge_is_umv_space_full(vport, true))
11572         dev_warn(&hdev->pdev->dev,
11573              "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
11574              vf);
11575 
11576     ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
11577     if (ret)
11578         return ret;
11579 
11580     vport->vf_info.spoofchk = new_spoofchk;
11581     return 0;
11582 }
11583 
11584 static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
11585 {
11586     struct hclge_vport *vport = hdev->vport;
11587     int ret;
11588     int i;
11589 
11590     if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
11591         return 0;
11592 
11593     /* resume the vf spoof check state after reset */
11594     for (i = 0; i < hdev->num_alloc_vport; i++) {
11595         ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
11596                            vport->vf_info.spoofchk);
11597         if (ret)
11598             return ret;
11599 
11600         vport++;
11601     }
11602 
11603     return 0;
11604 }
11605 
11606 static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
11607 {
11608     struct hclge_vport *vport = hclge_get_vport(handle);
11609     struct hclge_dev *hdev = vport->back;
11610     u32 new_trusted = enable ? 1 : 0;
11611 
11612     vport = hclge_get_vf_vport(hdev, vf);
11613     if (!vport)
11614         return -EINVAL;
11615 
11616     if (vport->vf_info.trusted == new_trusted)
11617         return 0;
11618 
11619     vport->vf_info.trusted = new_trusted;
11620     set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
11621     hclge_task_schedule(hdev, 0);
11622 
11623     return 0;
11624 }
11625 
11626 static void hclge_reset_vf_rate(struct hclge_dev *hdev)
11627 {
11628     int ret;
11629     int vf;
11630 
11631     /* reset vf rate to default value */
11632     for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
11633         struct hclge_vport *vport = &hdev->vport[vf];
11634 
11635         vport->vf_info.max_tx_rate = 0;
11636         ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
11637         if (ret)
11638             dev_err(&hdev->pdev->dev,
11639                 "vf%d failed to reset to default, ret=%d\n",
11640                 vf - HCLGE_VF_VPORT_START_NUM, ret);
11641     }
11642 }
11643 
11644 static int hclge_vf_rate_param_check(struct hclge_dev *hdev,
11645                      int min_tx_rate, int max_tx_rate)
11646 {
11647     if (min_tx_rate != 0 ||
11648         max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
11649         dev_err(&hdev->pdev->dev,
11650             "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
11651             min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
11652         return -EINVAL;
11653     }
11654 
11655     return 0;
11656 }
11657 
11658 static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
11659                  int min_tx_rate, int max_tx_rate, bool force)
11660 {
11661     struct hclge_vport *vport = hclge_get_vport(handle);
11662     struct hclge_dev *hdev = vport->back;
11663     int ret;
11664 
11665     ret = hclge_vf_rate_param_check(hdev, min_tx_rate, max_tx_rate);
11666     if (ret)
11667         return ret;
11668 
11669     vport = hclge_get_vf_vport(hdev, vf);
11670     if (!vport)
11671         return -EINVAL;
11672 
11673     if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
11674         return 0;
11675 
11676     ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
11677     if (ret)
11678         return ret;
11679 
11680     vport->vf_info.max_tx_rate = max_tx_rate;
11681 
11682     return 0;
11683 }
11684 
11685 static int hclge_resume_vf_rate(struct hclge_dev *hdev)
11686 {
11687     struct hnae3_handle *handle = &hdev->vport->nic;
11688     struct hclge_vport *vport;
11689     int ret;
11690     int vf;
11691 
11692     /* resume the vf max_tx_rate after reset */
11693     for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
11694         vport = hclge_get_vf_vport(hdev, vf);
11695         if (!vport)
11696             return -EINVAL;
11697 
11698         /* zero means max rate, after reset, firmware already set it to
11699          * max rate, so just continue.
11700          */
11701         if (!vport->vf_info.max_tx_rate)
11702             continue;
11703 
11704         ret = hclge_set_vf_rate(handle, vf, 0,
11705                     vport->vf_info.max_tx_rate, true);
11706         if (ret) {
11707             dev_err(&hdev->pdev->dev,
11708                 "vf%d failed to resume tx_rate:%u, ret=%d\n",
11709                 vf, vport->vf_info.max_tx_rate, ret);
11710             return ret;
11711         }
11712     }
11713 
11714     return 0;
11715 }
11716 
11717 static void hclge_reset_vport_state(struct hclge_dev *hdev)
11718 {
11719     struct hclge_vport *vport = hdev->vport;
11720     int i;
11721 
11722     for (i = 0; i < hdev->num_alloc_vport; i++) {
11723         hclge_vport_stop(vport);
11724         vport++;
11725     }
11726 }
11727 
11728 static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
11729 {
11730     struct hclge_dev *hdev = ae_dev->priv;
11731     struct pci_dev *pdev = ae_dev->pdev;
11732     int ret;
11733 
11734     set_bit(HCLGE_STATE_DOWN, &hdev->state);
11735 
11736     hclge_stats_clear(hdev);
11737     /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
11738      * so here should not clean table in memory.
11739      */
11740     if (hdev->reset_type == HNAE3_IMP_RESET ||
11741         hdev->reset_type == HNAE3_GLOBAL_RESET) {
11742         memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
11743         memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
11744         bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
11745         hclge_reset_umv_space(hdev);
11746     }
11747 
11748     ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
11749                   true, hdev->reset_pending);
11750     if (ret) {
11751         dev_err(&pdev->dev, "Cmd queue init failed\n");
11752         return ret;
11753     }
11754 
11755     ret = hclge_map_tqp(hdev);
11756     if (ret) {
11757         dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
11758         return ret;
11759     }
11760 
11761     ret = hclge_mac_init(hdev);
11762     if (ret) {
11763         dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
11764         return ret;
11765     }
11766 
11767     ret = hclge_tp_port_init(hdev);
11768     if (ret) {
11769         dev_err(&pdev->dev, "failed to init tp port, ret = %d\n",
11770             ret);
11771         return ret;
11772     }
11773 
11774     ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
11775     if (ret) {
11776         dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
11777         return ret;
11778     }
11779 
11780     ret = hclge_config_gro(hdev);
11781     if (ret)
11782         return ret;
11783 
11784     ret = hclge_init_vlan_config(hdev);
11785     if (ret) {
11786         dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
11787         return ret;
11788     }
11789 
11790     ret = hclge_tm_init_hw(hdev, true);
11791     if (ret) {
11792         dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
11793         return ret;
11794     }
11795 
11796     ret = hclge_rss_init_hw(hdev);
11797     if (ret) {
11798         dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
11799         return ret;
11800     }
11801 
11802     ret = init_mgr_tbl(hdev);
11803     if (ret) {
11804         dev_err(&pdev->dev,
11805             "failed to reinit manager table, ret = %d\n", ret);
11806         return ret;
11807     }
11808 
11809     ret = hclge_init_fd_config(hdev);
11810     if (ret) {
11811         dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
11812         return ret;
11813     }
11814 
11815     ret = hclge_ptp_init(hdev);
11816     if (ret)
11817         return ret;
11818 
11819     /* Log and clear the hw errors those already occurred */
11820     if (hnae3_dev_ras_imp_supported(hdev))
11821         hclge_handle_occurred_error(hdev);
11822     else
11823         hclge_handle_all_hns_hw_errors(ae_dev);
11824 
11825     /* Re-enable the hw error interrupts because
11826      * the interrupts get disabled on global reset.
11827      */
11828     ret = hclge_config_nic_hw_error(hdev, true);
11829     if (ret) {
11830         dev_err(&pdev->dev,
11831             "fail(%d) to re-enable NIC hw error interrupts\n",
11832             ret);
11833         return ret;
11834     }
11835 
11836     if (hdev->roce_client) {
11837         ret = hclge_config_rocee_ras_interrupt(hdev, true);
11838         if (ret) {
11839             dev_err(&pdev->dev,
11840                 "fail(%d) to re-enable roce ras interrupts\n",
11841                 ret);
11842             return ret;
11843         }
11844     }
11845 
11846     hclge_reset_vport_state(hdev);
11847     ret = hclge_reset_vport_spoofchk(hdev);
11848     if (ret)
11849         return ret;
11850 
11851     ret = hclge_resume_vf_rate(hdev);
11852     if (ret)
11853         return ret;
11854 
11855     hclge_init_rxd_adv_layout(hdev);
11856 
11857     dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
11858          HCLGE_DRIVER_NAME);
11859 
11860     return 0;
11861 }
11862 
11863 static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
11864 {
11865     struct hclge_dev *hdev = ae_dev->priv;
11866     struct hclge_mac *mac = &hdev->hw.mac;
11867 
11868     hclge_reset_vf_rate(hdev);
11869     hclge_clear_vf_vlan(hdev);
11870     hclge_state_uninit(hdev);
11871     hclge_ptp_uninit(hdev);
11872     hclge_uninit_rxd_adv_layout(hdev);
11873     hclge_uninit_mac_table(hdev);
11874     hclge_del_all_fd_entries(hdev);
11875 
11876     if (mac->phydev)
11877         mdiobus_unregister(mac->mdio_bus);
11878 
11879     /* Disable MISC vector(vector0) */
11880     hclge_enable_vector(&hdev->misc_vector, false);
11881     synchronize_irq(hdev->misc_vector.vector_irq);
11882 
11883     /* Disable all hw interrupts */
11884     hclge_config_mac_tnl_int(hdev, false);
11885     hclge_config_nic_hw_error(hdev, false);
11886     hclge_config_rocee_ras_interrupt(hdev, false);
11887 
11888     hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
11889     hclge_misc_irq_uninit(hdev);
11890     hclge_devlink_uninit(hdev);
11891     hclge_pci_uninit(hdev);
11892     hclge_uninit_vport_vlan_table(hdev);
11893     mutex_destroy(&hdev->vport_lock);
11894     ae_dev->priv = NULL;
11895 }
11896 
11897 static u32 hclge_get_max_channels(struct hnae3_handle *handle)
11898 {
11899     struct hclge_vport *vport = hclge_get_vport(handle);
11900     struct hclge_dev *hdev = vport->back;
11901 
11902     return min_t(u32, hdev->pf_rss_size_max, vport->alloc_tqps);
11903 }
11904 
11905 static void hclge_get_channels(struct hnae3_handle *handle,
11906                    struct ethtool_channels *ch)
11907 {
11908     ch->max_combined = hclge_get_max_channels(handle);
11909     ch->other_count = 1;
11910     ch->max_other = 1;
11911     ch->combined_count = handle->kinfo.rss_size;
11912 }
11913 
11914 static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
11915                     u16 *alloc_tqps, u16 *max_rss_size)
11916 {
11917     struct hclge_vport *vport = hclge_get_vport(handle);
11918     struct hclge_dev *hdev = vport->back;
11919 
11920     *alloc_tqps = vport->alloc_tqps;
11921     *max_rss_size = hdev->pf_rss_size_max;
11922 }
11923 
11924 static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
11925 {
11926     struct hclge_vport *vport = hclge_get_vport(handle);
11927     u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
11928     struct hclge_dev *hdev = vport->back;
11929     u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
11930     u16 tc_valid[HCLGE_MAX_TC_NUM];
11931     u16 roundup_size;
11932     unsigned int i;
11933 
11934     roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
11935     roundup_size = ilog2(roundup_size);
11936     /* Set the RSS TC mode according to the new RSS size */
11937     for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
11938         tc_valid[i] = 0;
11939 
11940         if (!(hdev->hw_tc_map & BIT(i)))
11941             continue;
11942 
11943         tc_valid[i] = 1;
11944         tc_size[i] = roundup_size;
11945         tc_offset[i] = vport->nic.kinfo.rss_size * i;
11946     }
11947 
11948     return hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, tc_valid,
11949                       tc_size);
11950 }
11951 
11952 static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
11953                   bool rxfh_configured)
11954 {
11955     struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
11956     struct hclge_vport *vport = hclge_get_vport(handle);
11957     struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
11958     struct hclge_dev *hdev = vport->back;
11959     u16 cur_rss_size = kinfo->rss_size;
11960     u16 cur_tqps = kinfo->num_tqps;
11961     u32 *rss_indir;
11962     unsigned int i;
11963     int ret;
11964 
11965     kinfo->req_rss_size = new_tqps_num;
11966 
11967     ret = hclge_tm_vport_map_update(hdev);
11968     if (ret) {
11969         dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
11970         return ret;
11971     }
11972 
11973     ret = hclge_set_rss_tc_mode_cfg(handle);
11974     if (ret)
11975         return ret;
11976 
11977     /* RSS indirection table has been configured by user */
11978     if (rxfh_configured)
11979         goto out;
11980 
11981     /* Reinitializes the rss indirect table according to the new RSS size */
11982     rss_indir = kcalloc(ae_dev->dev_specs.rss_ind_tbl_size, sizeof(u32),
11983                 GFP_KERNEL);
11984     if (!rss_indir)
11985         return -ENOMEM;
11986 
11987     for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
11988         rss_indir[i] = i % kinfo->rss_size;
11989 
11990     ret = hclge_set_rss(handle, rss_indir, NULL, 0);
11991     if (ret)
11992         dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
11993             ret);
11994 
11995     kfree(rss_indir);
11996 
11997 out:
11998     if (!ret)
11999         dev_info(&hdev->pdev->dev,
12000              "Channels changed, rss_size from %u to %u, tqps from %u to %u",
12001              cur_rss_size, kinfo->rss_size,
12002              cur_tqps, kinfo->rss_size * kinfo->tc_info.num_tc);
12003 
12004     return ret;
12005 }
12006 
12007 static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
12008                   u32 *regs_num_64_bit)
12009 {
12010     struct hclge_desc desc;
12011     u32 total_num;
12012     int ret;
12013 
12014     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
12015     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12016     if (ret) {
12017         dev_err(&hdev->pdev->dev,
12018             "Query register number cmd failed, ret = %d.\n", ret);
12019         return ret;
12020     }
12021 
12022     *regs_num_32_bit = le32_to_cpu(desc.data[0]);
12023     *regs_num_64_bit = le32_to_cpu(desc.data[1]);
12024 
12025     total_num = *regs_num_32_bit + *regs_num_64_bit;
12026     if (!total_num)
12027         return -EINVAL;
12028 
12029     return 0;
12030 }
12031 
12032 static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12033                  void *data)
12034 {
12035 #define HCLGE_32_BIT_REG_RTN_DATANUM 8
12036 #define HCLGE_32_BIT_DESC_NODATA_LEN 2
12037 
12038     struct hclge_desc *desc;
12039     u32 *reg_val = data;
12040     __le32 *desc_data;
12041     int nodata_num;
12042     int cmd_num;
12043     int i, k, n;
12044     int ret;
12045 
12046     if (regs_num == 0)
12047         return 0;
12048 
12049     nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
12050     cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
12051                    HCLGE_32_BIT_REG_RTN_DATANUM);
12052     desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12053     if (!desc)
12054         return -ENOMEM;
12055 
12056     hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
12057     ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12058     if (ret) {
12059         dev_err(&hdev->pdev->dev,
12060             "Query 32 bit register cmd failed, ret = %d.\n", ret);
12061         kfree(desc);
12062         return ret;
12063     }
12064 
12065     for (i = 0; i < cmd_num; i++) {
12066         if (i == 0) {
12067             desc_data = (__le32 *)(&desc[i].data[0]);
12068             n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
12069         } else {
12070             desc_data = (__le32 *)(&desc[i]);
12071             n = HCLGE_32_BIT_REG_RTN_DATANUM;
12072         }
12073         for (k = 0; k < n; k++) {
12074             *reg_val++ = le32_to_cpu(*desc_data++);
12075 
12076             regs_num--;
12077             if (!regs_num)
12078                 break;
12079         }
12080     }
12081 
12082     kfree(desc);
12083     return 0;
12084 }
12085 
12086 static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
12087                  void *data)
12088 {
12089 #define HCLGE_64_BIT_REG_RTN_DATANUM 4
12090 #define HCLGE_64_BIT_DESC_NODATA_LEN 1
12091 
12092     struct hclge_desc *desc;
12093     u64 *reg_val = data;
12094     __le64 *desc_data;
12095     int nodata_len;
12096     int cmd_num;
12097     int i, k, n;
12098     int ret;
12099 
12100     if (regs_num == 0)
12101         return 0;
12102 
12103     nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
12104     cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
12105                    HCLGE_64_BIT_REG_RTN_DATANUM);
12106     desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
12107     if (!desc)
12108         return -ENOMEM;
12109 
12110     hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
12111     ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
12112     if (ret) {
12113         dev_err(&hdev->pdev->dev,
12114             "Query 64 bit register cmd failed, ret = %d.\n", ret);
12115         kfree(desc);
12116         return ret;
12117     }
12118 
12119     for (i = 0; i < cmd_num; i++) {
12120         if (i == 0) {
12121             desc_data = (__le64 *)(&desc[i].data[0]);
12122             n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
12123         } else {
12124             desc_data = (__le64 *)(&desc[i]);
12125             n = HCLGE_64_BIT_REG_RTN_DATANUM;
12126         }
12127         for (k = 0; k < n; k++) {
12128             *reg_val++ = le64_to_cpu(*desc_data++);
12129 
12130             regs_num--;
12131             if (!regs_num)
12132                 break;
12133         }
12134     }
12135 
12136     kfree(desc);
12137     return 0;
12138 }
12139 
12140 #define MAX_SEPARATE_NUM    4
12141 #define SEPARATOR_VALUE     0xFDFCFBFA
12142 #define REG_NUM_PER_LINE    4
12143 #define REG_LEN_PER_LINE    (REG_NUM_PER_LINE * sizeof(u32))
12144 #define REG_SEPARATOR_LINE  1
12145 #define REG_NUM_REMAIN_MASK 3
12146 
12147 int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
12148 {
12149     int i;
12150 
12151     /* initialize command BD except the last one */
12152     for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
12153         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
12154                        true);
12155         desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12156     }
12157 
12158     /* initialize the last command BD */
12159     hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
12160 
12161     return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
12162 }
12163 
12164 static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
12165                     int *bd_num_list,
12166                     u32 type_num)
12167 {
12168     u32 entries_per_desc, desc_index, index, offset, i;
12169     struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
12170     int ret;
12171 
12172     ret = hclge_query_bd_num_cmd_send(hdev, desc);
12173     if (ret) {
12174         dev_err(&hdev->pdev->dev,
12175             "Get dfx bd num fail, status is %d.\n", ret);
12176         return ret;
12177     }
12178 
12179     entries_per_desc = ARRAY_SIZE(desc[0].data);
12180     for (i = 0; i < type_num; i++) {
12181         offset = hclge_dfx_bd_offset_list[i];
12182         index = offset % entries_per_desc;
12183         desc_index = offset / entries_per_desc;
12184         bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
12185     }
12186 
12187     return ret;
12188 }
12189 
12190 static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
12191                   struct hclge_desc *desc_src, int bd_num,
12192                   enum hclge_opcode_type cmd)
12193 {
12194     struct hclge_desc *desc = desc_src;
12195     int i, ret;
12196 
12197     hclge_cmd_setup_basic_desc(desc, cmd, true);
12198     for (i = 0; i < bd_num - 1; i++) {
12199         desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12200         desc++;
12201         hclge_cmd_setup_basic_desc(desc, cmd, true);
12202     }
12203 
12204     desc = desc_src;
12205     ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
12206     if (ret)
12207         dev_err(&hdev->pdev->dev,
12208             "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
12209             cmd, ret);
12210 
12211     return ret;
12212 }
12213 
12214 static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
12215                     void *data)
12216 {
12217     int entries_per_desc, reg_num, separator_num, desc_index, index, i;
12218     struct hclge_desc *desc = desc_src;
12219     u32 *reg = data;
12220 
12221     entries_per_desc = ARRAY_SIZE(desc->data);
12222     reg_num = entries_per_desc * bd_num;
12223     separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
12224     for (i = 0; i < reg_num; i++) {
12225         index = i % entries_per_desc;
12226         desc_index = i / entries_per_desc;
12227         *reg++ = le32_to_cpu(desc[desc_index].data[index]);
12228     }
12229     for (i = 0; i < separator_num; i++)
12230         *reg++ = SEPARATOR_VALUE;
12231 
12232     return reg_num + separator_num;
12233 }
12234 
12235 static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
12236 {
12237     u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12238     int data_len_per_desc, bd_num, i;
12239     int *bd_num_list;
12240     u32 data_len;
12241     int ret;
12242 
12243     bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12244     if (!bd_num_list)
12245         return -ENOMEM;
12246 
12247     ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12248     if (ret) {
12249         dev_err(&hdev->pdev->dev,
12250             "Get dfx reg bd num fail, status is %d.\n", ret);
12251         goto out;
12252     }
12253 
12254     data_len_per_desc = sizeof_field(struct hclge_desc, data);
12255     *len = 0;
12256     for (i = 0; i < dfx_reg_type_num; i++) {
12257         bd_num = bd_num_list[i];
12258         data_len = data_len_per_desc * bd_num;
12259         *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
12260     }
12261 
12262 out:
12263     kfree(bd_num_list);
12264     return ret;
12265 }
12266 
12267 static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
12268 {
12269     u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
12270     int bd_num, bd_num_max, buf_len, i;
12271     struct hclge_desc *desc_src;
12272     int *bd_num_list;
12273     u32 *reg = data;
12274     int ret;
12275 
12276     bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
12277     if (!bd_num_list)
12278         return -ENOMEM;
12279 
12280     ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
12281     if (ret) {
12282         dev_err(&hdev->pdev->dev,
12283             "Get dfx reg bd num fail, status is %d.\n", ret);
12284         goto out;
12285     }
12286 
12287     bd_num_max = bd_num_list[0];
12288     for (i = 1; i < dfx_reg_type_num; i++)
12289         bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
12290 
12291     buf_len = sizeof(*desc_src) * bd_num_max;
12292     desc_src = kzalloc(buf_len, GFP_KERNEL);
12293     if (!desc_src) {
12294         ret = -ENOMEM;
12295         goto out;
12296     }
12297 
12298     for (i = 0; i < dfx_reg_type_num; i++) {
12299         bd_num = bd_num_list[i];
12300         ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
12301                          hclge_dfx_reg_opcode_list[i]);
12302         if (ret) {
12303             dev_err(&hdev->pdev->dev,
12304                 "Get dfx reg fail, status is %d.\n", ret);
12305             break;
12306         }
12307 
12308         reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
12309     }
12310 
12311     kfree(desc_src);
12312 out:
12313     kfree(bd_num_list);
12314     return ret;
12315 }
12316 
12317 static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
12318                   struct hnae3_knic_private_info *kinfo)
12319 {
12320 #define HCLGE_RING_REG_OFFSET       0x200
12321 #define HCLGE_RING_INT_REG_OFFSET   0x4
12322 
12323     int i, j, reg_num, separator_num;
12324     int data_num_sum;
12325     u32 *reg = data;
12326 
12327     /* fetching per-PF registers valus from PF PCIe register space */
12328     reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
12329     separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12330     for (i = 0; i < reg_num; i++)
12331         *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
12332     for (i = 0; i < separator_num; i++)
12333         *reg++ = SEPARATOR_VALUE;
12334     data_num_sum = reg_num + separator_num;
12335 
12336     reg_num = ARRAY_SIZE(common_reg_addr_list);
12337     separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12338     for (i = 0; i < reg_num; i++)
12339         *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
12340     for (i = 0; i < separator_num; i++)
12341         *reg++ = SEPARATOR_VALUE;
12342     data_num_sum += reg_num + separator_num;
12343 
12344     reg_num = ARRAY_SIZE(ring_reg_addr_list);
12345     separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12346     for (j = 0; j < kinfo->num_tqps; j++) {
12347         for (i = 0; i < reg_num; i++)
12348             *reg++ = hclge_read_dev(&hdev->hw,
12349                         ring_reg_addr_list[i] +
12350                         HCLGE_RING_REG_OFFSET * j);
12351         for (i = 0; i < separator_num; i++)
12352             *reg++ = SEPARATOR_VALUE;
12353     }
12354     data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
12355 
12356     reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
12357     separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12358     for (j = 0; j < hdev->num_msi_used - 1; j++) {
12359         for (i = 0; i < reg_num; i++)
12360             *reg++ = hclge_read_dev(&hdev->hw,
12361                         tqp_intr_reg_addr_list[i] +
12362                         HCLGE_RING_INT_REG_OFFSET * j);
12363         for (i = 0; i < separator_num; i++)
12364             *reg++ = SEPARATOR_VALUE;
12365     }
12366     data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
12367 
12368     return data_num_sum;
12369 }
12370 
12371 static int hclge_get_regs_len(struct hnae3_handle *handle)
12372 {
12373     int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
12374     struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12375     struct hclge_vport *vport = hclge_get_vport(handle);
12376     struct hclge_dev *hdev = vport->back;
12377     int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
12378     int regs_lines_32_bit, regs_lines_64_bit;
12379     int ret;
12380 
12381     ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12382     if (ret) {
12383         dev_err(&hdev->pdev->dev,
12384             "Get register number failed, ret = %d.\n", ret);
12385         return ret;
12386     }
12387 
12388     ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
12389     if (ret) {
12390         dev_err(&hdev->pdev->dev,
12391             "Get dfx reg len failed, ret = %d.\n", ret);
12392         return ret;
12393     }
12394 
12395     cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
12396         REG_SEPARATOR_LINE;
12397     common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
12398         REG_SEPARATOR_LINE;
12399     ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
12400         REG_SEPARATOR_LINE;
12401     tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
12402         REG_SEPARATOR_LINE;
12403     regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
12404         REG_SEPARATOR_LINE;
12405     regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
12406         REG_SEPARATOR_LINE;
12407 
12408     return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
12409         tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
12410         regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
12411 }
12412 
12413 static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
12414                void *data)
12415 {
12416     struct hnae3_knic_private_info *kinfo = &handle->kinfo;
12417     struct hclge_vport *vport = hclge_get_vport(handle);
12418     struct hclge_dev *hdev = vport->back;
12419     u32 regs_num_32_bit, regs_num_64_bit;
12420     int i, reg_num, separator_num, ret;
12421     u32 *reg = data;
12422 
12423     *version = hdev->fw_version;
12424 
12425     ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
12426     if (ret) {
12427         dev_err(&hdev->pdev->dev,
12428             "Get register number failed, ret = %d.\n", ret);
12429         return;
12430     }
12431 
12432     reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
12433 
12434     ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
12435     if (ret) {
12436         dev_err(&hdev->pdev->dev,
12437             "Get 32 bit register failed, ret = %d.\n", ret);
12438         return;
12439     }
12440     reg_num = regs_num_32_bit;
12441     reg += reg_num;
12442     separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12443     for (i = 0; i < separator_num; i++)
12444         *reg++ = SEPARATOR_VALUE;
12445 
12446     ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
12447     if (ret) {
12448         dev_err(&hdev->pdev->dev,
12449             "Get 64 bit register failed, ret = %d.\n", ret);
12450         return;
12451     }
12452     reg_num = regs_num_64_bit * 2;
12453     reg += reg_num;
12454     separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
12455     for (i = 0; i < separator_num; i++)
12456         *reg++ = SEPARATOR_VALUE;
12457 
12458     ret = hclge_get_dfx_reg(hdev, reg);
12459     if (ret)
12460         dev_err(&hdev->pdev->dev,
12461             "Get dfx register failed, ret = %d.\n", ret);
12462 }
12463 
12464 static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
12465 {
12466     struct hclge_set_led_state_cmd *req;
12467     struct hclge_desc desc;
12468     int ret;
12469 
12470     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
12471 
12472     req = (struct hclge_set_led_state_cmd *)desc.data;
12473     hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
12474             HCLGE_LED_LOCATE_STATE_S, locate_led_status);
12475 
12476     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12477     if (ret)
12478         dev_err(&hdev->pdev->dev,
12479             "Send set led state cmd error, ret =%d\n", ret);
12480 
12481     return ret;
12482 }
12483 
12484 enum hclge_led_status {
12485     HCLGE_LED_OFF,
12486     HCLGE_LED_ON,
12487     HCLGE_LED_NO_CHANGE = 0xFF,
12488 };
12489 
12490 static int hclge_set_led_id(struct hnae3_handle *handle,
12491                 enum ethtool_phys_id_state status)
12492 {
12493     struct hclge_vport *vport = hclge_get_vport(handle);
12494     struct hclge_dev *hdev = vport->back;
12495 
12496     switch (status) {
12497     case ETHTOOL_ID_ACTIVE:
12498         return hclge_set_led_status(hdev, HCLGE_LED_ON);
12499     case ETHTOOL_ID_INACTIVE:
12500         return hclge_set_led_status(hdev, HCLGE_LED_OFF);
12501     default:
12502         return -EINVAL;
12503     }
12504 }
12505 
12506 static void hclge_get_link_mode(struct hnae3_handle *handle,
12507                 unsigned long *supported,
12508                 unsigned long *advertising)
12509 {
12510     unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
12511     struct hclge_vport *vport = hclge_get_vport(handle);
12512     struct hclge_dev *hdev = vport->back;
12513     unsigned int idx = 0;
12514 
12515     for (; idx < size; idx++) {
12516         supported[idx] = hdev->hw.mac.supported[idx];
12517         advertising[idx] = hdev->hw.mac.advertising[idx];
12518     }
12519 }
12520 
12521 static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
12522 {
12523     struct hclge_vport *vport = hclge_get_vport(handle);
12524     struct hclge_dev *hdev = vport->back;
12525     bool gro_en_old = hdev->gro_en;
12526     int ret;
12527 
12528     hdev->gro_en = enable;
12529     ret = hclge_config_gro(hdev);
12530     if (ret)
12531         hdev->gro_en = gro_en_old;
12532 
12533     return ret;
12534 }
12535 
12536 static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
12537 {
12538     struct hclge_vport *vport = &hdev->vport[0];
12539     struct hnae3_handle *handle = &vport->nic;
12540     u8 tmp_flags;
12541     int ret;
12542     u16 i;
12543 
12544     if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
12545         set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
12546         vport->last_promisc_flags = vport->overflow_promisc_flags;
12547     }
12548 
12549     if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
12550         tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
12551         ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
12552                          tmp_flags & HNAE3_MPE);
12553         if (!ret) {
12554             clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12555                   &vport->state);
12556             set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
12557                 &vport->state);
12558         }
12559     }
12560 
12561     for (i = 1; i < hdev->num_alloc_vport; i++) {
12562         bool uc_en = false;
12563         bool mc_en = false;
12564         bool bc_en;
12565 
12566         vport = &hdev->vport[i];
12567 
12568         if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12569                     &vport->state))
12570             continue;
12571 
12572         if (vport->vf_info.trusted) {
12573             uc_en = vport->vf_info.request_uc_en > 0 ||
12574                 vport->overflow_promisc_flags &
12575                 HNAE3_OVERFLOW_UPE;
12576             mc_en = vport->vf_info.request_mc_en > 0 ||
12577                 vport->overflow_promisc_flags &
12578                 HNAE3_OVERFLOW_MPE;
12579         }
12580         bc_en = vport->vf_info.request_bc_en > 0;
12581 
12582         ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
12583                          mc_en, bc_en);
12584         if (ret) {
12585             set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
12586                 &vport->state);
12587             return;
12588         }
12589         hclge_set_vport_vlan_fltr_change(vport);
12590     }
12591 }
12592 
12593 static bool hclge_module_existed(struct hclge_dev *hdev)
12594 {
12595     struct hclge_desc desc;
12596     u32 existed;
12597     int ret;
12598 
12599     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
12600     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12601     if (ret) {
12602         dev_err(&hdev->pdev->dev,
12603             "failed to get SFP exist state, ret = %d\n", ret);
12604         return false;
12605     }
12606 
12607     existed = le32_to_cpu(desc.data[0]);
12608 
12609     return existed != 0;
12610 }
12611 
12612 /* need 6 bds(total 140 bytes) in one reading
12613  * return the number of bytes actually read, 0 means read failed.
12614  */
12615 static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
12616                      u32 len, u8 *data)
12617 {
12618     struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
12619     struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
12620     u16 read_len;
12621     u16 copy_len;
12622     int ret;
12623     int i;
12624 
12625     /* setup all 6 bds to read module eeprom info. */
12626     for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12627         hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
12628                        true);
12629 
12630         /* bd0~bd4 need next flag */
12631         if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
12632             desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
12633     }
12634 
12635     /* setup bd0, this bd contains offset and read length. */
12636     sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
12637     sfp_info_bd0->offset = cpu_to_le16((u16)offset);
12638     read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
12639     sfp_info_bd0->read_len = cpu_to_le16(read_len);
12640 
12641     ret = hclge_cmd_send(&hdev->hw, desc, i);
12642     if (ret) {
12643         dev_err(&hdev->pdev->dev,
12644             "failed to get SFP eeprom info, ret = %d\n", ret);
12645         return 0;
12646     }
12647 
12648     /* copy sfp info from bd0 to out buffer. */
12649     copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
12650     memcpy(data, sfp_info_bd0->data, copy_len);
12651     read_len = copy_len;
12652 
12653     /* copy sfp info from bd1~bd5 to out buffer if needed. */
12654     for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
12655         if (read_len >= len)
12656             return read_len;
12657 
12658         copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
12659         memcpy(data + read_len, desc[i].data, copy_len);
12660         read_len += copy_len;
12661     }
12662 
12663     return read_len;
12664 }
12665 
12666 static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
12667                    u32 len, u8 *data)
12668 {
12669     struct hclge_vport *vport = hclge_get_vport(handle);
12670     struct hclge_dev *hdev = vport->back;
12671     u32 read_len = 0;
12672     u16 data_len;
12673 
12674     if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
12675         return -EOPNOTSUPP;
12676 
12677     if (!hclge_module_existed(hdev))
12678         return -ENXIO;
12679 
12680     while (read_len < len) {
12681         data_len = hclge_get_sfp_eeprom_info(hdev,
12682                              offset + read_len,
12683                              len - read_len,
12684                              data + read_len);
12685         if (!data_len)
12686             return -EIO;
12687 
12688         read_len += data_len;
12689     }
12690 
12691     return 0;
12692 }
12693 
12694 static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle,
12695                      u32 *status_code)
12696 {
12697     struct hclge_vport *vport = hclge_get_vport(handle);
12698     struct hclge_dev *hdev = vport->back;
12699     struct hclge_desc desc;
12700     int ret;
12701 
12702     if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)
12703         return -EOPNOTSUPP;
12704 
12705     hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true);
12706     ret = hclge_cmd_send(&hdev->hw, &desc, 1);
12707     if (ret) {
12708         dev_err(&hdev->pdev->dev,
12709             "failed to query link diagnosis info, ret = %d\n", ret);
12710         return ret;
12711     }
12712 
12713     *status_code = le32_to_cpu(desc.data[0]);
12714     return 0;
12715 }
12716 
12717 /* After disable sriov, VF still has some config and info need clean,
12718  * which configed by PF.
12719  */
12720 static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
12721 {
12722     struct hclge_dev *hdev = vport->back;
12723     struct hclge_vlan_info vlan_info;
12724     int ret;
12725 
12726     /* after disable sriov, clean VF rate configured by PF */
12727     ret = hclge_tm_qs_shaper_cfg(vport, 0);
12728     if (ret)
12729         dev_err(&hdev->pdev->dev,
12730             "failed to clean vf%d rate config, ret = %d\n",
12731             vfid, ret);
12732 
12733     vlan_info.vlan_tag = 0;
12734     vlan_info.qos = 0;
12735     vlan_info.vlan_proto = ETH_P_8021Q;
12736     ret = hclge_update_port_base_vlan_cfg(vport,
12737                           HNAE3_PORT_BASE_VLAN_DISABLE,
12738                           &vlan_info);
12739     if (ret)
12740         dev_err(&hdev->pdev->dev,
12741             "failed to clean vf%d port base vlan, ret = %d\n",
12742             vfid, ret);
12743 
12744     ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, false);
12745     if (ret)
12746         dev_err(&hdev->pdev->dev,
12747             "failed to clean vf%d spoof config, ret = %d\n",
12748             vfid, ret);
12749 
12750     memset(&vport->vf_info, 0, sizeof(vport->vf_info));
12751 }
12752 
12753 static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
12754 {
12755     struct hclge_dev *hdev = ae_dev->priv;
12756     struct hclge_vport *vport;
12757     int i;
12758 
12759     for (i = 0; i < num_vfs; i++) {
12760         vport = &hdev->vport[i + HCLGE_VF_VPORT_START_NUM];
12761 
12762         hclge_clear_vport_vf_info(vport, i);
12763     }
12764 }
12765 
12766 static const struct hnae3_ae_ops hclge_ops = {
12767     .init_ae_dev = hclge_init_ae_dev,
12768     .uninit_ae_dev = hclge_uninit_ae_dev,
12769     .reset_prepare = hclge_reset_prepare_general,
12770     .reset_done = hclge_reset_done,
12771     .init_client_instance = hclge_init_client_instance,
12772     .uninit_client_instance = hclge_uninit_client_instance,
12773     .map_ring_to_vector = hclge_map_ring_to_vector,
12774     .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
12775     .get_vector = hclge_get_vector,
12776     .put_vector = hclge_put_vector,
12777     .set_promisc_mode = hclge_set_promisc_mode,
12778     .request_update_promisc_mode = hclge_request_update_promisc_mode,
12779     .set_loopback = hclge_set_loopback,
12780     .start = hclge_ae_start,
12781     .stop = hclge_ae_stop,
12782     .client_start = hclge_client_start,
12783     .client_stop = hclge_client_stop,
12784     .get_status = hclge_get_status,
12785     .get_ksettings_an_result = hclge_get_ksettings_an_result,
12786     .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
12787     .get_media_type = hclge_get_media_type,
12788     .check_port_speed = hclge_check_port_speed,
12789     .get_fec = hclge_get_fec,
12790     .set_fec = hclge_set_fec,
12791     .get_rss_key_size = hclge_comm_get_rss_key_size,
12792     .get_rss = hclge_get_rss,
12793     .set_rss = hclge_set_rss,
12794     .set_rss_tuple = hclge_set_rss_tuple,
12795     .get_rss_tuple = hclge_get_rss_tuple,
12796     .get_tc_size = hclge_get_tc_size,
12797     .get_mac_addr = hclge_get_mac_addr,
12798     .set_mac_addr = hclge_set_mac_addr,
12799     .do_ioctl = hclge_do_ioctl,
12800     .add_uc_addr = hclge_add_uc_addr,
12801     .rm_uc_addr = hclge_rm_uc_addr,
12802     .add_mc_addr = hclge_add_mc_addr,
12803     .rm_mc_addr = hclge_rm_mc_addr,
12804     .set_autoneg = hclge_set_autoneg,
12805     .get_autoneg = hclge_get_autoneg,
12806     .restart_autoneg = hclge_restart_autoneg,
12807     .halt_autoneg = hclge_halt_autoneg,
12808     .get_pauseparam = hclge_get_pauseparam,
12809     .set_pauseparam = hclge_set_pauseparam,
12810     .set_mtu = hclge_set_mtu,
12811     .reset_queue = hclge_reset_tqp,
12812     .get_stats = hclge_get_stats,
12813     .get_mac_stats = hclge_get_mac_stat,
12814     .update_stats = hclge_update_stats,
12815     .get_strings = hclge_get_strings,
12816     .get_sset_count = hclge_get_sset_count,
12817     .get_fw_version = hclge_get_fw_version,
12818     .get_mdix_mode = hclge_get_mdix_mode,
12819     .enable_vlan_filter = hclge_enable_vlan_filter,
12820     .set_vlan_filter = hclge_set_vlan_filter,
12821     .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
12822     .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
12823     .reset_event = hclge_reset_event,
12824     .get_reset_level = hclge_get_reset_level,
12825     .set_default_reset_request = hclge_set_def_reset_request,
12826     .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
12827     .set_channels = hclge_set_channels,
12828     .get_channels = hclge_get_channels,
12829     .get_regs_len = hclge_get_regs_len,
12830     .get_regs = hclge_get_regs,
12831     .set_led_id = hclge_set_led_id,
12832     .get_link_mode = hclge_get_link_mode,
12833     .add_fd_entry = hclge_add_fd_entry,
12834     .del_fd_entry = hclge_del_fd_entry,
12835     .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
12836     .get_fd_rule_info = hclge_get_fd_rule_info,
12837     .get_fd_all_rules = hclge_get_all_rules,
12838     .enable_fd = hclge_enable_fd,
12839     .add_arfs_entry = hclge_add_fd_entry_by_arfs,
12840     .dbg_read_cmd = hclge_dbg_read_cmd,
12841     .handle_hw_ras_error = hclge_handle_hw_ras_error,
12842     .get_hw_reset_stat = hclge_get_hw_reset_stat,
12843     .ae_dev_resetting = hclge_ae_dev_resetting,
12844     .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
12845     .set_gro_en = hclge_gro_en,
12846     .get_global_queue_id = hclge_covert_handle_qid_global,
12847     .set_timer_task = hclge_set_timer_task,
12848     .mac_connect_phy = hclge_mac_connect_phy,
12849     .mac_disconnect_phy = hclge_mac_disconnect_phy,
12850     .get_vf_config = hclge_get_vf_config,
12851     .set_vf_link_state = hclge_set_vf_link_state,
12852     .set_vf_spoofchk = hclge_set_vf_spoofchk,
12853     .set_vf_trust = hclge_set_vf_trust,
12854     .set_vf_rate = hclge_set_vf_rate,
12855     .set_vf_mac = hclge_set_vf_mac,
12856     .get_module_eeprom = hclge_get_module_eeprom,
12857     .get_cmdq_stat = hclge_get_cmdq_stat,
12858     .add_cls_flower = hclge_add_cls_flower,
12859     .del_cls_flower = hclge_del_cls_flower,
12860     .cls_flower_active = hclge_is_cls_flower_active,
12861     .get_phy_link_ksettings = hclge_get_phy_link_ksettings,
12862     .set_phy_link_ksettings = hclge_set_phy_link_ksettings,
12863     .set_tx_hwts_info = hclge_ptp_set_tx_info,
12864     .get_rx_hwts = hclge_ptp_get_rx_hwts,
12865     .get_ts_info = hclge_ptp_get_ts_info,
12866     .get_link_diagnosis_info = hclge_get_link_diagnosis_info,
12867     .clean_vf_config = hclge_clean_vport_config,
12868 };
12869 
12870 static struct hnae3_ae_algo ae_algo = {
12871     .ops = &hclge_ops,
12872     .pdev_id_table = ae_algo_pci_tbl,
12873 };
12874 
12875 static int hclge_init(void)
12876 {
12877     pr_info("%s is initializing\n", HCLGE_NAME);
12878 
12879     hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
12880     if (!hclge_wq) {
12881         pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
12882         return -ENOMEM;
12883     }
12884 
12885     hnae3_register_ae_algo(&ae_algo);
12886 
12887     return 0;
12888 }
12889 
12890 static void hclge_exit(void)
12891 {
12892     hnae3_unregister_ae_algo_prepare(&ae_algo);
12893     hnae3_unregister_ae_algo(&ae_algo);
12894     destroy_workqueue(hclge_wq);
12895 }
12896 module_init(hclge_init);
12897 module_exit(hclge_exit);
12898 
12899 MODULE_LICENSE("GPL");
12900 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
12901 MODULE_DESCRIPTION("HCLGE Driver");
12902 MODULE_VERSION(HCLGE_MOD_VERSION);