Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
0002 /* QLogic qede NIC Driver
0003  * Copyright (c) 2015-2017  QLogic Corporation
0004  * Copyright (c) 2019-2020 Marvell International Ltd.
0005  */
0006 
0007 #include <linux/crash_dump.h>
0008 #include <linux/module.h>
0009 #include <linux/pci.h>
0010 #include <linux/device.h>
0011 #include <linux/netdevice.h>
0012 #include <linux/etherdevice.h>
0013 #include <linux/skbuff.h>
0014 #include <linux/errno.h>
0015 #include <linux/list.h>
0016 #include <linux/string.h>
0017 #include <linux/dma-mapping.h>
0018 #include <linux/interrupt.h>
0019 #include <asm/byteorder.h>
0020 #include <asm/param.h>
0021 #include <linux/io.h>
0022 #include <linux/netdev_features.h>
0023 #include <linux/udp.h>
0024 #include <linux/tcp.h>
0025 #include <net/udp_tunnel.h>
0026 #include <linux/ip.h>
0027 #include <net/ipv6.h>
0028 #include <net/tcp.h>
0029 #include <linux/if_ether.h>
0030 #include <linux/if_vlan.h>
0031 #include <linux/pkt_sched.h>
0032 #include <linux/ethtool.h>
0033 #include <linux/in.h>
0034 #include <linux/random.h>
0035 #include <net/ip6_checksum.h>
0036 #include <linux/bitops.h>
0037 #include <linux/vmalloc.h>
0038 #include <linux/aer.h>
0039 #include "qede.h"
0040 #include "qede_ptp.h"
0041 
0042 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver");
0043 MODULE_LICENSE("GPL");
0044 
0045 static uint debug;
0046 module_param(debug, uint, 0);
0047 MODULE_PARM_DESC(debug, " Default debug msglevel");
0048 
0049 static const struct qed_eth_ops *qed_ops;
0050 
0051 #define CHIP_NUM_57980S_40      0x1634
0052 #define CHIP_NUM_57980S_10      0x1666
0053 #define CHIP_NUM_57980S_MF      0x1636
0054 #define CHIP_NUM_57980S_100     0x1644
0055 #define CHIP_NUM_57980S_50      0x1654
0056 #define CHIP_NUM_57980S_25      0x1656
0057 #define CHIP_NUM_57980S_IOV     0x1664
0058 #define CHIP_NUM_AH         0x8070
0059 #define CHIP_NUM_AH_IOV         0x8090
0060 
0061 #ifndef PCI_DEVICE_ID_NX2_57980E
0062 #define PCI_DEVICE_ID_57980S_40     CHIP_NUM_57980S_40
0063 #define PCI_DEVICE_ID_57980S_10     CHIP_NUM_57980S_10
0064 #define PCI_DEVICE_ID_57980S_MF     CHIP_NUM_57980S_MF
0065 #define PCI_DEVICE_ID_57980S_100    CHIP_NUM_57980S_100
0066 #define PCI_DEVICE_ID_57980S_50     CHIP_NUM_57980S_50
0067 #define PCI_DEVICE_ID_57980S_25     CHIP_NUM_57980S_25
0068 #define PCI_DEVICE_ID_57980S_IOV    CHIP_NUM_57980S_IOV
0069 #define PCI_DEVICE_ID_AH        CHIP_NUM_AH
0070 #define PCI_DEVICE_ID_AH_IOV        CHIP_NUM_AH_IOV
0071 
0072 #endif
0073 
0074 enum qede_pci_private {
0075     QEDE_PRIVATE_PF,
0076     QEDE_PRIVATE_VF
0077 };
0078 
0079 static const struct pci_device_id qede_pci_tbl[] = {
0080     {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
0081     {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
0082     {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
0083     {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
0084     {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
0085     {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
0086 #ifdef CONFIG_QED_SRIOV
0087     {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
0088 #endif
0089     {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH), QEDE_PRIVATE_PF},
0090 #ifdef CONFIG_QED_SRIOV
0091     {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_AH_IOV), QEDE_PRIVATE_VF},
0092 #endif
0093     { 0 }
0094 };
0095 
0096 MODULE_DEVICE_TABLE(pci, qede_pci_tbl);
0097 
0098 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id);
0099 static pci_ers_result_t
0100 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state);
0101 
0102 #define TX_TIMEOUT      (5 * HZ)
0103 
0104 /* Utilize last protocol index for XDP */
0105 #define XDP_PI  11
0106 
0107 static void qede_remove(struct pci_dev *pdev);
0108 static void qede_shutdown(struct pci_dev *pdev);
0109 static void qede_link_update(void *dev, struct qed_link_output *link);
0110 static void qede_schedule_recovery_handler(void *dev);
0111 static void qede_recovery_handler(struct qede_dev *edev);
0112 static void qede_schedule_hw_err_handler(void *dev,
0113                      enum qed_hw_err_type err_type);
0114 static void qede_get_eth_tlv_data(void *edev, void *data);
0115 static void qede_get_generic_tlv_data(void *edev,
0116                       struct qed_generic_tlvs *data);
0117 static void qede_generic_hw_err_handler(struct qede_dev *edev);
0118 #ifdef CONFIG_QED_SRIOV
0119 static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos,
0120                 __be16 vlan_proto)
0121 {
0122     struct qede_dev *edev = netdev_priv(ndev);
0123 
0124     if (vlan > 4095) {
0125         DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
0126         return -EINVAL;
0127     }
0128 
0129     if (vlan_proto != htons(ETH_P_8021Q))
0130         return -EPROTONOSUPPORT;
0131 
0132     DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
0133            vlan, vf);
0134 
0135     return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
0136 }
0137 
0138 static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
0139 {
0140     struct qede_dev *edev = netdev_priv(ndev);
0141 
0142     DP_VERBOSE(edev, QED_MSG_IOV, "Setting MAC %pM to VF [%d]\n", mac, vfidx);
0143 
0144     if (!is_valid_ether_addr(mac)) {
0145         DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
0146         return -EINVAL;
0147     }
0148 
0149     return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
0150 }
0151 
0152 static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
0153 {
0154     struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
0155     struct qed_dev_info *qed_info = &edev->dev_info.common;
0156     struct qed_update_vport_params *vport_params;
0157     int rc;
0158 
0159     vport_params = vzalloc(sizeof(*vport_params));
0160     if (!vport_params)
0161         return -ENOMEM;
0162     DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
0163 
0164     rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
0165 
0166     /* Enable/Disable Tx switching for PF */
0167     if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
0168         !qed_info->b_inter_pf_switch && qed_info->tx_switching) {
0169         vport_params->vport_id = 0;
0170         vport_params->update_tx_switching_flg = 1;
0171         vport_params->tx_switching_flg = num_vfs_param ? 1 : 0;
0172         edev->ops->vport_update(edev->cdev, vport_params);
0173     }
0174 
0175     vfree(vport_params);
0176     return rc;
0177 }
0178 #endif
0179 
0180 static const struct pci_error_handlers qede_err_handler = {
0181     .error_detected = qede_io_error_detected,
0182 };
0183 
0184 static struct pci_driver qede_pci_driver = {
0185     .name = "qede",
0186     .id_table = qede_pci_tbl,
0187     .probe = qede_probe,
0188     .remove = qede_remove,
0189     .shutdown = qede_shutdown,
0190 #ifdef CONFIG_QED_SRIOV
0191     .sriov_configure = qede_sriov_configure,
0192 #endif
0193     .err_handler = &qede_err_handler,
0194 };
0195 
0196 static struct qed_eth_cb_ops qede_ll_ops = {
0197     {
0198 #ifdef CONFIG_RFS_ACCEL
0199         .arfs_filter_op = qede_arfs_filter_op,
0200 #endif
0201         .link_update = qede_link_update,
0202         .schedule_recovery_handler = qede_schedule_recovery_handler,
0203         .schedule_hw_err_handler = qede_schedule_hw_err_handler,
0204         .get_generic_tlv_data = qede_get_generic_tlv_data,
0205         .get_protocol_tlv_data = qede_get_eth_tlv_data,
0206     },
0207     .force_mac = qede_force_mac,
0208     .ports_update = qede_udp_ports_update,
0209 };
0210 
0211 static int qede_netdev_event(struct notifier_block *this, unsigned long event,
0212                  void *ptr)
0213 {
0214     struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
0215     struct ethtool_drvinfo drvinfo;
0216     struct qede_dev *edev;
0217 
0218     if (event != NETDEV_CHANGENAME && event != NETDEV_CHANGEADDR)
0219         goto done;
0220 
0221     /* Check whether this is a qede device */
0222     if (!ndev || !ndev->ethtool_ops || !ndev->ethtool_ops->get_drvinfo)
0223         goto done;
0224 
0225     memset(&drvinfo, 0, sizeof(drvinfo));
0226     ndev->ethtool_ops->get_drvinfo(ndev, &drvinfo);
0227     if (strcmp(drvinfo.driver, "qede"))
0228         goto done;
0229     edev = netdev_priv(ndev);
0230 
0231     switch (event) {
0232     case NETDEV_CHANGENAME:
0233         /* Notify qed of the name change */
0234         if (!edev->ops || !edev->ops->common)
0235             goto done;
0236         edev->ops->common->set_name(edev->cdev, edev->ndev->name);
0237         break;
0238     case NETDEV_CHANGEADDR:
0239         edev = netdev_priv(ndev);
0240         qede_rdma_event_changeaddr(edev);
0241         break;
0242     }
0243 
0244 done:
0245     return NOTIFY_DONE;
0246 }
0247 
0248 static struct notifier_block qede_netdev_notifier = {
0249     .notifier_call = qede_netdev_event,
0250 };
0251 
0252 static
0253 int __init qede_init(void)
0254 {
0255     int ret;
0256 
0257     pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n");
0258 
0259     qede_forced_speed_maps_init();
0260 
0261     qed_ops = qed_get_eth_ops();
0262     if (!qed_ops) {
0263         pr_notice("Failed to get qed ethtool operations\n");
0264         return -EINVAL;
0265     }
0266 
0267     /* Must register notifier before pci ops, since we might miss
0268      * interface rename after pci probe and netdev registration.
0269      */
0270     ret = register_netdevice_notifier(&qede_netdev_notifier);
0271     if (ret) {
0272         pr_notice("Failed to register netdevice_notifier\n");
0273         qed_put_eth_ops();
0274         return -EINVAL;
0275     }
0276 
0277     ret = pci_register_driver(&qede_pci_driver);
0278     if (ret) {
0279         pr_notice("Failed to register driver\n");
0280         unregister_netdevice_notifier(&qede_netdev_notifier);
0281         qed_put_eth_ops();
0282         return -EINVAL;
0283     }
0284 
0285     return 0;
0286 }
0287 
0288 static void __exit qede_cleanup(void)
0289 {
0290     if (debug & QED_LOG_INFO_MASK)
0291         pr_info("qede_cleanup called\n");
0292 
0293     unregister_netdevice_notifier(&qede_netdev_notifier);
0294     pci_unregister_driver(&qede_pci_driver);
0295     qed_put_eth_ops();
0296 }
0297 
0298 module_init(qede_init);
0299 module_exit(qede_cleanup);
0300 
0301 static int qede_open(struct net_device *ndev);
0302 static int qede_close(struct net_device *ndev);
0303 
0304 void qede_fill_by_demand_stats(struct qede_dev *edev)
0305 {
0306     struct qede_stats_common *p_common = &edev->stats.common;
0307     struct qed_eth_stats stats;
0308 
0309     edev->ops->get_vport_stats(edev->cdev, &stats);
0310 
0311     p_common->no_buff_discards = stats.common.no_buff_discards;
0312     p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
0313     p_common->ttl0_discard = stats.common.ttl0_discard;
0314     p_common->rx_ucast_bytes = stats.common.rx_ucast_bytes;
0315     p_common->rx_mcast_bytes = stats.common.rx_mcast_bytes;
0316     p_common->rx_bcast_bytes = stats.common.rx_bcast_bytes;
0317     p_common->rx_ucast_pkts = stats.common.rx_ucast_pkts;
0318     p_common->rx_mcast_pkts = stats.common.rx_mcast_pkts;
0319     p_common->rx_bcast_pkts = stats.common.rx_bcast_pkts;
0320     p_common->mftag_filter_discards = stats.common.mftag_filter_discards;
0321     p_common->mac_filter_discards = stats.common.mac_filter_discards;
0322     p_common->gft_filter_drop = stats.common.gft_filter_drop;
0323 
0324     p_common->tx_ucast_bytes = stats.common.tx_ucast_bytes;
0325     p_common->tx_mcast_bytes = stats.common.tx_mcast_bytes;
0326     p_common->tx_bcast_bytes = stats.common.tx_bcast_bytes;
0327     p_common->tx_ucast_pkts = stats.common.tx_ucast_pkts;
0328     p_common->tx_mcast_pkts = stats.common.tx_mcast_pkts;
0329     p_common->tx_bcast_pkts = stats.common.tx_bcast_pkts;
0330     p_common->tx_err_drop_pkts = stats.common.tx_err_drop_pkts;
0331     p_common->coalesced_pkts = stats.common.tpa_coalesced_pkts;
0332     p_common->coalesced_events = stats.common.tpa_coalesced_events;
0333     p_common->coalesced_aborts_num = stats.common.tpa_aborts_num;
0334     p_common->non_coalesced_pkts = stats.common.tpa_not_coalesced_pkts;
0335     p_common->coalesced_bytes = stats.common.tpa_coalesced_bytes;
0336 
0337     p_common->rx_64_byte_packets = stats.common.rx_64_byte_packets;
0338     p_common->rx_65_to_127_byte_packets =
0339         stats.common.rx_65_to_127_byte_packets;
0340     p_common->rx_128_to_255_byte_packets =
0341         stats.common.rx_128_to_255_byte_packets;
0342     p_common->rx_256_to_511_byte_packets =
0343         stats.common.rx_256_to_511_byte_packets;
0344     p_common->rx_512_to_1023_byte_packets =
0345         stats.common.rx_512_to_1023_byte_packets;
0346     p_common->rx_1024_to_1518_byte_packets =
0347         stats.common.rx_1024_to_1518_byte_packets;
0348     p_common->rx_crc_errors = stats.common.rx_crc_errors;
0349     p_common->rx_mac_crtl_frames = stats.common.rx_mac_crtl_frames;
0350     p_common->rx_pause_frames = stats.common.rx_pause_frames;
0351     p_common->rx_pfc_frames = stats.common.rx_pfc_frames;
0352     p_common->rx_align_errors = stats.common.rx_align_errors;
0353     p_common->rx_carrier_errors = stats.common.rx_carrier_errors;
0354     p_common->rx_oversize_packets = stats.common.rx_oversize_packets;
0355     p_common->rx_jabbers = stats.common.rx_jabbers;
0356     p_common->rx_undersize_packets = stats.common.rx_undersize_packets;
0357     p_common->rx_fragments = stats.common.rx_fragments;
0358     p_common->tx_64_byte_packets = stats.common.tx_64_byte_packets;
0359     p_common->tx_65_to_127_byte_packets =
0360         stats.common.tx_65_to_127_byte_packets;
0361     p_common->tx_128_to_255_byte_packets =
0362         stats.common.tx_128_to_255_byte_packets;
0363     p_common->tx_256_to_511_byte_packets =
0364         stats.common.tx_256_to_511_byte_packets;
0365     p_common->tx_512_to_1023_byte_packets =
0366         stats.common.tx_512_to_1023_byte_packets;
0367     p_common->tx_1024_to_1518_byte_packets =
0368         stats.common.tx_1024_to_1518_byte_packets;
0369     p_common->tx_pause_frames = stats.common.tx_pause_frames;
0370     p_common->tx_pfc_frames = stats.common.tx_pfc_frames;
0371     p_common->brb_truncates = stats.common.brb_truncates;
0372     p_common->brb_discards = stats.common.brb_discards;
0373     p_common->tx_mac_ctrl_frames = stats.common.tx_mac_ctrl_frames;
0374     p_common->link_change_count = stats.common.link_change_count;
0375     p_common->ptp_skip_txts = edev->ptp_skip_txts;
0376 
0377     if (QEDE_IS_BB(edev)) {
0378         struct qede_stats_bb *p_bb = &edev->stats.bb;
0379 
0380         p_bb->rx_1519_to_1522_byte_packets =
0381             stats.bb.rx_1519_to_1522_byte_packets;
0382         p_bb->rx_1519_to_2047_byte_packets =
0383             stats.bb.rx_1519_to_2047_byte_packets;
0384         p_bb->rx_2048_to_4095_byte_packets =
0385             stats.bb.rx_2048_to_4095_byte_packets;
0386         p_bb->rx_4096_to_9216_byte_packets =
0387             stats.bb.rx_4096_to_9216_byte_packets;
0388         p_bb->rx_9217_to_16383_byte_packets =
0389             stats.bb.rx_9217_to_16383_byte_packets;
0390         p_bb->tx_1519_to_2047_byte_packets =
0391             stats.bb.tx_1519_to_2047_byte_packets;
0392         p_bb->tx_2048_to_4095_byte_packets =
0393             stats.bb.tx_2048_to_4095_byte_packets;
0394         p_bb->tx_4096_to_9216_byte_packets =
0395             stats.bb.tx_4096_to_9216_byte_packets;
0396         p_bb->tx_9217_to_16383_byte_packets =
0397             stats.bb.tx_9217_to_16383_byte_packets;
0398         p_bb->tx_lpi_entry_count = stats.bb.tx_lpi_entry_count;
0399         p_bb->tx_total_collisions = stats.bb.tx_total_collisions;
0400     } else {
0401         struct qede_stats_ah *p_ah = &edev->stats.ah;
0402 
0403         p_ah->rx_1519_to_max_byte_packets =
0404             stats.ah.rx_1519_to_max_byte_packets;
0405         p_ah->tx_1519_to_max_byte_packets =
0406             stats.ah.tx_1519_to_max_byte_packets;
0407     }
0408 }
0409 
0410 static void qede_get_stats64(struct net_device *dev,
0411                  struct rtnl_link_stats64 *stats)
0412 {
0413     struct qede_dev *edev = netdev_priv(dev);
0414     struct qede_stats_common *p_common;
0415 
0416     qede_fill_by_demand_stats(edev);
0417     p_common = &edev->stats.common;
0418 
0419     stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
0420                 p_common->rx_bcast_pkts;
0421     stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
0422                 p_common->tx_bcast_pkts;
0423 
0424     stats->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
0425               p_common->rx_bcast_bytes;
0426     stats->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
0427               p_common->tx_bcast_bytes;
0428 
0429     stats->tx_errors = p_common->tx_err_drop_pkts;
0430     stats->multicast = p_common->rx_mcast_pkts + p_common->rx_bcast_pkts;
0431 
0432     stats->rx_fifo_errors = p_common->no_buff_discards;
0433 
0434     if (QEDE_IS_BB(edev))
0435         stats->collisions = edev->stats.bb.tx_total_collisions;
0436     stats->rx_crc_errors = p_common->rx_crc_errors;
0437     stats->rx_frame_errors = p_common->rx_align_errors;
0438 }
0439 
0440 #ifdef CONFIG_QED_SRIOV
0441 static int qede_get_vf_config(struct net_device *dev, int vfidx,
0442                   struct ifla_vf_info *ivi)
0443 {
0444     struct qede_dev *edev = netdev_priv(dev);
0445 
0446     if (!edev->ops)
0447         return -EINVAL;
0448 
0449     return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
0450 }
0451 
0452 static int qede_set_vf_rate(struct net_device *dev, int vfidx,
0453                 int min_tx_rate, int max_tx_rate)
0454 {
0455     struct qede_dev *edev = netdev_priv(dev);
0456 
0457     return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
0458                     max_tx_rate);
0459 }
0460 
0461 static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
0462 {
0463     struct qede_dev *edev = netdev_priv(dev);
0464 
0465     if (!edev->ops)
0466         return -EINVAL;
0467 
0468     return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
0469 }
0470 
0471 static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
0472                   int link_state)
0473 {
0474     struct qede_dev *edev = netdev_priv(dev);
0475 
0476     if (!edev->ops)
0477         return -EINVAL;
0478 
0479     return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
0480 }
0481 
0482 static int qede_set_vf_trust(struct net_device *dev, int vfidx, bool setting)
0483 {
0484     struct qede_dev *edev = netdev_priv(dev);
0485 
0486     if (!edev->ops)
0487         return -EINVAL;
0488 
0489     return edev->ops->iov->set_trust(edev->cdev, vfidx, setting);
0490 }
0491 #endif
0492 
0493 static int qede_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
0494 {
0495     struct qede_dev *edev = netdev_priv(dev);
0496 
0497     if (!netif_running(dev))
0498         return -EAGAIN;
0499 
0500     switch (cmd) {
0501     case SIOCSHWTSTAMP:
0502         return qede_ptp_hw_ts(edev, ifr);
0503     default:
0504         DP_VERBOSE(edev, QED_MSG_DEBUG,
0505                "default IOCTL cmd 0x%x\n", cmd);
0506         return -EOPNOTSUPP;
0507     }
0508 
0509     return 0;
0510 }
0511 
0512 static void qede_fp_sb_dump(struct qede_dev *edev, struct qede_fastpath *fp)
0513 {
0514     char *p_sb = (char *)fp->sb_info->sb_virt;
0515     u32 sb_size, i;
0516 
0517     sb_size = sizeof(struct status_block);
0518 
0519     for (i = 0; i < sb_size; i += 8)
0520         DP_NOTICE(edev,
0521               "%02hhX %02hhX %02hhX %02hhX  %02hhX %02hhX %02hhX %02hhX\n",
0522               p_sb[i], p_sb[i + 1], p_sb[i + 2], p_sb[i + 3],
0523               p_sb[i + 4], p_sb[i + 5], p_sb[i + 6], p_sb[i + 7]);
0524 }
0525 
0526 static void
0527 qede_txq_fp_log_metadata(struct qede_dev *edev,
0528              struct qede_fastpath *fp, struct qede_tx_queue *txq)
0529 {
0530     struct qed_chain *p_chain = &txq->tx_pbl;
0531 
0532     /* Dump txq/fp/sb ids etc. other metadata */
0533     DP_NOTICE(edev,
0534           "fpid 0x%x sbid 0x%x txqid [0x%x] ndev_qid [0x%x] cos [0x%x] p_chain %p cap %d size %d jiffies %lu HZ 0x%x\n",
0535           fp->id, fp->sb_info->igu_sb_id, txq->index, txq->ndev_txq_id, txq->cos,
0536           p_chain, p_chain->capacity, p_chain->size, jiffies, HZ);
0537 
0538     /* Dump all the relevant prod/cons indexes */
0539     DP_NOTICE(edev,
0540           "hw cons %04x sw_tx_prod=0x%x, sw_tx_cons=0x%x, bd_prod 0x%x bd_cons 0x%x\n",
0541           le16_to_cpu(*txq->hw_cons_ptr), txq->sw_tx_prod, txq->sw_tx_cons,
0542           qed_chain_get_prod_idx(p_chain), qed_chain_get_cons_idx(p_chain));
0543 }
0544 
0545 static void
0546 qede_tx_log_print(struct qede_dev *edev, struct qede_fastpath *fp, struct qede_tx_queue *txq)
0547 {
0548     struct qed_sb_info_dbg sb_dbg;
0549     int rc;
0550 
0551     /* sb info */
0552     qede_fp_sb_dump(edev, fp);
0553 
0554     memset(&sb_dbg, 0, sizeof(sb_dbg));
0555     rc = edev->ops->common->get_sb_info(edev->cdev, fp->sb_info, (u16)fp->id, &sb_dbg);
0556 
0557     DP_NOTICE(edev, "IGU: prod %08x cons %08x CAU Tx %04x\n",
0558           sb_dbg.igu_prod, sb_dbg.igu_cons, sb_dbg.pi[TX_PI(txq->cos)]);
0559 
0560     /* report to mfw */
0561     edev->ops->common->mfw_report(edev->cdev,
0562                       "Txq[%d]: FW cons [host] %04x, SW cons %04x, SW prod %04x [Jiffies %lu]\n",
0563                       txq->index, le16_to_cpu(*txq->hw_cons_ptr),
0564                       qed_chain_get_cons_idx(&txq->tx_pbl),
0565                       qed_chain_get_prod_idx(&txq->tx_pbl), jiffies);
0566     if (!rc)
0567         edev->ops->common->mfw_report(edev->cdev,
0568                           "Txq[%d]: SB[0x%04x] - IGU: prod %08x cons %08x CAU Tx %04x\n",
0569                           txq->index, fp->sb_info->igu_sb_id,
0570                           sb_dbg.igu_prod, sb_dbg.igu_cons,
0571                           sb_dbg.pi[TX_PI(txq->cos)]);
0572 }
0573 
0574 static void qede_tx_timeout(struct net_device *dev, unsigned int txqueue)
0575 {
0576     struct qede_dev *edev = netdev_priv(dev);
0577     int i;
0578 
0579     netif_carrier_off(dev);
0580     DP_NOTICE(edev, "TX timeout on queue %u!\n", txqueue);
0581 
0582     for_each_queue(i) {
0583         struct qede_tx_queue *txq;
0584         struct qede_fastpath *fp;
0585         int cos;
0586 
0587         fp = &edev->fp_array[i];
0588         if (!(fp->type & QEDE_FASTPATH_TX))
0589             continue;
0590 
0591         for_each_cos_in_txq(edev, cos) {
0592             txq = &fp->txq[cos];
0593 
0594             /* Dump basic metadata for all queues */
0595             qede_txq_fp_log_metadata(edev, fp, txq);
0596 
0597             if (qed_chain_get_cons_idx(&txq->tx_pbl) !=
0598                 qed_chain_get_prod_idx(&txq->tx_pbl))
0599                 qede_tx_log_print(edev, fp, txq);
0600         }
0601     }
0602 
0603     if (IS_VF(edev))
0604         return;
0605 
0606     if (test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
0607         edev->state == QEDE_STATE_RECOVERY) {
0608         DP_INFO(edev,
0609             "Avoid handling a Tx timeout while another HW error is being handled\n");
0610         return;
0611     }
0612 
0613     set_bit(QEDE_ERR_GET_DBG_INFO, &edev->err_flags);
0614     set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
0615     schedule_delayed_work(&edev->sp_task, 0);
0616 }
0617 
0618 static int qede_setup_tc(struct net_device *ndev, u8 num_tc)
0619 {
0620     struct qede_dev *edev = netdev_priv(ndev);
0621     int cos, count, offset;
0622 
0623     if (num_tc > edev->dev_info.num_tc)
0624         return -EINVAL;
0625 
0626     netdev_reset_tc(ndev);
0627     netdev_set_num_tc(ndev, num_tc);
0628 
0629     for_each_cos_in_txq(edev, cos) {
0630         count = QEDE_TSS_COUNT(edev);
0631         offset = cos * QEDE_TSS_COUNT(edev);
0632         netdev_set_tc_queue(ndev, cos, count, offset);
0633     }
0634 
0635     return 0;
0636 }
0637 
0638 static int
0639 qede_set_flower(struct qede_dev *edev, struct flow_cls_offload *f,
0640         __be16 proto)
0641 {
0642     switch (f->command) {
0643     case FLOW_CLS_REPLACE:
0644         return qede_add_tc_flower_fltr(edev, proto, f);
0645     case FLOW_CLS_DESTROY:
0646         return qede_delete_flow_filter(edev, f->cookie);
0647     default:
0648         return -EOPNOTSUPP;
0649     }
0650 }
0651 
0652 static int qede_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
0653                   void *cb_priv)
0654 {
0655     struct flow_cls_offload *f;
0656     struct qede_dev *edev = cb_priv;
0657 
0658     if (!tc_cls_can_offload_and_chain0(edev->ndev, type_data))
0659         return -EOPNOTSUPP;
0660 
0661     switch (type) {
0662     case TC_SETUP_CLSFLOWER:
0663         f = type_data;
0664         return qede_set_flower(edev, f, f->common.protocol);
0665     default:
0666         return -EOPNOTSUPP;
0667     }
0668 }
0669 
0670 static LIST_HEAD(qede_block_cb_list);
0671 
0672 static int
0673 qede_setup_tc_offload(struct net_device *dev, enum tc_setup_type type,
0674               void *type_data)
0675 {
0676     struct qede_dev *edev = netdev_priv(dev);
0677     struct tc_mqprio_qopt *mqprio;
0678 
0679     switch (type) {
0680     case TC_SETUP_BLOCK:
0681         return flow_block_cb_setup_simple(type_data,
0682                           &qede_block_cb_list,
0683                           qede_setup_tc_block_cb,
0684                           edev, edev, true);
0685     case TC_SETUP_QDISC_MQPRIO:
0686         mqprio = type_data;
0687 
0688         mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
0689         return qede_setup_tc(dev, mqprio->num_tc);
0690     default:
0691         return -EOPNOTSUPP;
0692     }
0693 }
0694 
0695 static const struct net_device_ops qede_netdev_ops = {
0696     .ndo_open       = qede_open,
0697     .ndo_stop       = qede_close,
0698     .ndo_start_xmit     = qede_start_xmit,
0699     .ndo_select_queue   = qede_select_queue,
0700     .ndo_set_rx_mode    = qede_set_rx_mode,
0701     .ndo_set_mac_address    = qede_set_mac_addr,
0702     .ndo_validate_addr  = eth_validate_addr,
0703     .ndo_change_mtu     = qede_change_mtu,
0704     .ndo_eth_ioctl      = qede_ioctl,
0705     .ndo_tx_timeout     = qede_tx_timeout,
0706 #ifdef CONFIG_QED_SRIOV
0707     .ndo_set_vf_mac     = qede_set_vf_mac,
0708     .ndo_set_vf_vlan    = qede_set_vf_vlan,
0709     .ndo_set_vf_trust   = qede_set_vf_trust,
0710 #endif
0711     .ndo_vlan_rx_add_vid    = qede_vlan_rx_add_vid,
0712     .ndo_vlan_rx_kill_vid   = qede_vlan_rx_kill_vid,
0713     .ndo_fix_features   = qede_fix_features,
0714     .ndo_set_features   = qede_set_features,
0715     .ndo_get_stats64    = qede_get_stats64,
0716 #ifdef CONFIG_QED_SRIOV
0717     .ndo_set_vf_link_state  = qede_set_vf_link_state,
0718     .ndo_set_vf_spoofchk    = qede_set_vf_spoofchk,
0719     .ndo_get_vf_config  = qede_get_vf_config,
0720     .ndo_set_vf_rate    = qede_set_vf_rate,
0721 #endif
0722     .ndo_features_check = qede_features_check,
0723     .ndo_bpf        = qede_xdp,
0724 #ifdef CONFIG_RFS_ACCEL
0725     .ndo_rx_flow_steer  = qede_rx_flow_steer,
0726 #endif
0727     .ndo_xdp_xmit       = qede_xdp_transmit,
0728     .ndo_setup_tc       = qede_setup_tc_offload,
0729 };
0730 
0731 static const struct net_device_ops qede_netdev_vf_ops = {
0732     .ndo_open       = qede_open,
0733     .ndo_stop       = qede_close,
0734     .ndo_start_xmit     = qede_start_xmit,
0735     .ndo_select_queue   = qede_select_queue,
0736     .ndo_set_rx_mode    = qede_set_rx_mode,
0737     .ndo_set_mac_address    = qede_set_mac_addr,
0738     .ndo_validate_addr  = eth_validate_addr,
0739     .ndo_change_mtu     = qede_change_mtu,
0740     .ndo_vlan_rx_add_vid    = qede_vlan_rx_add_vid,
0741     .ndo_vlan_rx_kill_vid   = qede_vlan_rx_kill_vid,
0742     .ndo_fix_features   = qede_fix_features,
0743     .ndo_set_features   = qede_set_features,
0744     .ndo_get_stats64    = qede_get_stats64,
0745     .ndo_features_check = qede_features_check,
0746 };
0747 
0748 static const struct net_device_ops qede_netdev_vf_xdp_ops = {
0749     .ndo_open       = qede_open,
0750     .ndo_stop       = qede_close,
0751     .ndo_start_xmit     = qede_start_xmit,
0752     .ndo_select_queue   = qede_select_queue,
0753     .ndo_set_rx_mode    = qede_set_rx_mode,
0754     .ndo_set_mac_address    = qede_set_mac_addr,
0755     .ndo_validate_addr  = eth_validate_addr,
0756     .ndo_change_mtu     = qede_change_mtu,
0757     .ndo_vlan_rx_add_vid    = qede_vlan_rx_add_vid,
0758     .ndo_vlan_rx_kill_vid   = qede_vlan_rx_kill_vid,
0759     .ndo_fix_features   = qede_fix_features,
0760     .ndo_set_features   = qede_set_features,
0761     .ndo_get_stats64    = qede_get_stats64,
0762     .ndo_features_check = qede_features_check,
0763     .ndo_bpf        = qede_xdp,
0764     .ndo_xdp_xmit       = qede_xdp_transmit,
0765 };
0766 
0767 /* -------------------------------------------------------------------------
0768  * START OF PROBE / REMOVE
0769  * -------------------------------------------------------------------------
0770  */
0771 
0772 static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
0773                         struct pci_dev *pdev,
0774                         struct qed_dev_eth_info *info,
0775                         u32 dp_module, u8 dp_level)
0776 {
0777     struct net_device *ndev;
0778     struct qede_dev *edev;
0779 
0780     ndev = alloc_etherdev_mqs(sizeof(*edev),
0781                   info->num_queues * info->num_tc,
0782                   info->num_queues);
0783     if (!ndev) {
0784         pr_err("etherdev allocation failed\n");
0785         return NULL;
0786     }
0787 
0788     edev = netdev_priv(ndev);
0789     edev->ndev = ndev;
0790     edev->cdev = cdev;
0791     edev->pdev = pdev;
0792     edev->dp_module = dp_module;
0793     edev->dp_level = dp_level;
0794     edev->ops = qed_ops;
0795 
0796     if (is_kdump_kernel()) {
0797         edev->q_num_rx_buffers = NUM_RX_BDS_KDUMP_MIN;
0798         edev->q_num_tx_buffers = NUM_TX_BDS_KDUMP_MIN;
0799     } else {
0800         edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
0801         edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
0802     }
0803 
0804     DP_INFO(edev, "Allocated netdev with %d tx queues and %d rx queues\n",
0805         info->num_queues, info->num_queues);
0806 
0807     SET_NETDEV_DEV(ndev, &pdev->dev);
0808 
0809     memset(&edev->stats, 0, sizeof(edev->stats));
0810     memcpy(&edev->dev_info, info, sizeof(*info));
0811 
0812     /* As ethtool doesn't have the ability to show WoL behavior as
0813      * 'default', if device supports it declare it's enabled.
0814      */
0815     if (edev->dev_info.common.wol_support)
0816         edev->wol_enabled = true;
0817 
0818     INIT_LIST_HEAD(&edev->vlan_list);
0819 
0820     return edev;
0821 }
0822 
0823 static void qede_init_ndev(struct qede_dev *edev)
0824 {
0825     struct net_device *ndev = edev->ndev;
0826     struct pci_dev *pdev = edev->pdev;
0827     bool udp_tunnel_enable = false;
0828     netdev_features_t hw_features;
0829 
0830     pci_set_drvdata(pdev, ndev);
0831 
0832     ndev->mem_start = edev->dev_info.common.pci_mem_start;
0833     ndev->base_addr = ndev->mem_start;
0834     ndev->mem_end = edev->dev_info.common.pci_mem_end;
0835     ndev->irq = edev->dev_info.common.pci_irq;
0836 
0837     ndev->watchdog_timeo = TX_TIMEOUT;
0838 
0839     if (IS_VF(edev)) {
0840         if (edev->dev_info.xdp_supported)
0841             ndev->netdev_ops = &qede_netdev_vf_xdp_ops;
0842         else
0843             ndev->netdev_ops = &qede_netdev_vf_ops;
0844     } else {
0845         ndev->netdev_ops = &qede_netdev_ops;
0846     }
0847 
0848     qede_set_ethtool_ops(ndev);
0849 
0850     ndev->priv_flags |= IFF_UNICAST_FLT;
0851 
0852     /* user-changeble features */
0853     hw_features = NETIF_F_GRO | NETIF_F_GRO_HW | NETIF_F_SG |
0854               NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
0855               NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC;
0856 
0857     if (edev->dev_info.common.b_arfs_capable)
0858         hw_features |= NETIF_F_NTUPLE;
0859 
0860     if (edev->dev_info.common.vxlan_enable ||
0861         edev->dev_info.common.geneve_enable)
0862         udp_tunnel_enable = true;
0863 
0864     if (udp_tunnel_enable || edev->dev_info.common.gre_enable) {
0865         hw_features |= NETIF_F_TSO_ECN;
0866         ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
0867                     NETIF_F_SG | NETIF_F_TSO |
0868                     NETIF_F_TSO_ECN | NETIF_F_TSO6 |
0869                     NETIF_F_RXCSUM;
0870     }
0871 
0872     if (udp_tunnel_enable) {
0873         hw_features |= (NETIF_F_GSO_UDP_TUNNEL |
0874                 NETIF_F_GSO_UDP_TUNNEL_CSUM);
0875         ndev->hw_enc_features |= (NETIF_F_GSO_UDP_TUNNEL |
0876                       NETIF_F_GSO_UDP_TUNNEL_CSUM);
0877 
0878         qede_set_udp_tunnels(edev);
0879     }
0880 
0881     if (edev->dev_info.common.gre_enable) {
0882         hw_features |= (NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM);
0883         ndev->hw_enc_features |= (NETIF_F_GSO_GRE |
0884                       NETIF_F_GSO_GRE_CSUM);
0885     }
0886 
0887     ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
0888                   NETIF_F_HIGHDMA;
0889     ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
0890              NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HIGHDMA |
0891              NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX;
0892 
0893     ndev->hw_features = hw_features;
0894 
0895     /* MTU range: 46 - 9600 */
0896     ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
0897     ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
0898 
0899     /* Set network device HW mac */
0900     eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
0901 
0902     ndev->mtu = edev->dev_info.common.mtu;
0903 }
0904 
0905 /* This function converts from 32b param to two params of level and module
0906  * Input 32b decoding:
0907  * b31 - enable all NOTICE prints. NOTICE prints are for deviation from the
0908  * 'happy' flow, e.g. memory allocation failed.
0909  * b30 - enable all INFO prints. INFO prints are for major steps in the flow
0910  * and provide important parameters.
0911  * b29-b0 - per-module bitmap, where each bit enables VERBOSE prints of that
0912  * module. VERBOSE prints are for tracking the specific flow in low level.
0913  *
0914  * Notice that the level should be that of the lowest required logs.
0915  */
0916 void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level)
0917 {
0918     *p_dp_level = QED_LEVEL_NOTICE;
0919     *p_dp_module = 0;
0920 
0921     if (debug & QED_LOG_VERBOSE_MASK) {
0922         *p_dp_level = QED_LEVEL_VERBOSE;
0923         *p_dp_module = (debug & 0x3FFFFFFF);
0924     } else if (debug & QED_LOG_INFO_MASK) {
0925         *p_dp_level = QED_LEVEL_INFO;
0926     } else if (debug & QED_LOG_NOTICE_MASK) {
0927         *p_dp_level = QED_LEVEL_NOTICE;
0928     }
0929 }
0930 
0931 static void qede_free_fp_array(struct qede_dev *edev)
0932 {
0933     if (edev->fp_array) {
0934         struct qede_fastpath *fp;
0935         int i;
0936 
0937         for_each_queue(i) {
0938             fp = &edev->fp_array[i];
0939 
0940             kfree(fp->sb_info);
0941             /* Handle mem alloc failure case where qede_init_fp
0942              * didn't register xdp_rxq_info yet.
0943              * Implicit only (fp->type & QEDE_FASTPATH_RX)
0944              */
0945             if (fp->rxq && xdp_rxq_info_is_reg(&fp->rxq->xdp_rxq))
0946                 xdp_rxq_info_unreg(&fp->rxq->xdp_rxq);
0947             kfree(fp->rxq);
0948             kfree(fp->xdp_tx);
0949             kfree(fp->txq);
0950         }
0951         kfree(edev->fp_array);
0952     }
0953 
0954     edev->num_queues = 0;
0955     edev->fp_num_tx = 0;
0956     edev->fp_num_rx = 0;
0957 }
0958 
0959 static int qede_alloc_fp_array(struct qede_dev *edev)
0960 {
0961     u8 fp_combined, fp_rx = edev->fp_num_rx;
0962     struct qede_fastpath *fp;
0963     void *mem;
0964     int i;
0965 
0966     edev->fp_array = kcalloc(QEDE_QUEUE_CNT(edev),
0967                  sizeof(*edev->fp_array), GFP_KERNEL);
0968     if (!edev->fp_array) {
0969         DP_NOTICE(edev, "fp array allocation failed\n");
0970         goto err;
0971     }
0972 
0973     mem = krealloc(edev->coal_entry, QEDE_QUEUE_CNT(edev) *
0974                sizeof(*edev->coal_entry), GFP_KERNEL);
0975     if (!mem) {
0976         DP_ERR(edev, "coalesce entry allocation failed\n");
0977         kfree(edev->coal_entry);
0978         goto err;
0979     }
0980     edev->coal_entry = mem;
0981 
0982     fp_combined = QEDE_QUEUE_CNT(edev) - fp_rx - edev->fp_num_tx;
0983 
0984     /* Allocate the FP elements for Rx queues followed by combined and then
0985      * the Tx. This ordering should be maintained so that the respective
0986      * queues (Rx or Tx) will be together in the fastpath array and the
0987      * associated ids will be sequential.
0988      */
0989     for_each_queue(i) {
0990         fp = &edev->fp_array[i];
0991 
0992         fp->sb_info = kzalloc(sizeof(*fp->sb_info), GFP_KERNEL);
0993         if (!fp->sb_info) {
0994             DP_NOTICE(edev, "sb info struct allocation failed\n");
0995             goto err;
0996         }
0997 
0998         if (fp_rx) {
0999             fp->type = QEDE_FASTPATH_RX;
1000             fp_rx--;
1001         } else if (fp_combined) {
1002             fp->type = QEDE_FASTPATH_COMBINED;
1003             fp_combined--;
1004         } else {
1005             fp->type = QEDE_FASTPATH_TX;
1006         }
1007 
1008         if (fp->type & QEDE_FASTPATH_TX) {
1009             fp->txq = kcalloc(edev->dev_info.num_tc,
1010                       sizeof(*fp->txq), GFP_KERNEL);
1011             if (!fp->txq)
1012                 goto err;
1013         }
1014 
1015         if (fp->type & QEDE_FASTPATH_RX) {
1016             fp->rxq = kzalloc(sizeof(*fp->rxq), GFP_KERNEL);
1017             if (!fp->rxq)
1018                 goto err;
1019 
1020             if (edev->xdp_prog) {
1021                 fp->xdp_tx = kzalloc(sizeof(*fp->xdp_tx),
1022                              GFP_KERNEL);
1023                 if (!fp->xdp_tx)
1024                     goto err;
1025                 fp->type |= QEDE_FASTPATH_XDP;
1026             }
1027         }
1028     }
1029 
1030     return 0;
1031 err:
1032     qede_free_fp_array(edev);
1033     return -ENOMEM;
1034 }
1035 
1036 /* The qede lock is used to protect driver state change and driver flows that
1037  * are not reentrant.
1038  */
1039 void __qede_lock(struct qede_dev *edev)
1040 {
1041     mutex_lock(&edev->qede_lock);
1042 }
1043 
1044 void __qede_unlock(struct qede_dev *edev)
1045 {
1046     mutex_unlock(&edev->qede_lock);
1047 }
1048 
1049 /* This version of the lock should be used when acquiring the RTNL lock is also
1050  * needed in addition to the internal qede lock.
1051  */
1052 static void qede_lock(struct qede_dev *edev)
1053 {
1054     rtnl_lock();
1055     __qede_lock(edev);
1056 }
1057 
1058 static void qede_unlock(struct qede_dev *edev)
1059 {
1060     __qede_unlock(edev);
1061     rtnl_unlock();
1062 }
1063 
1064 static void qede_sp_task(struct work_struct *work)
1065 {
1066     struct qede_dev *edev = container_of(work, struct qede_dev,
1067                          sp_task.work);
1068 
1069     /* Disable execution of this deferred work once
1070      * qede removal is in progress, this stop any future
1071      * scheduling of sp_task.
1072      */
1073     if (test_bit(QEDE_SP_DISABLE, &edev->sp_flags))
1074         return;
1075 
1076     /* The locking scheme depends on the specific flag:
1077      * In case of QEDE_SP_RECOVERY, acquiring the RTNL lock is required to
1078      * ensure that ongoing flows are ended and new ones are not started.
1079      * In other cases - only the internal qede lock should be acquired.
1080      */
1081 
1082     if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
1083 #ifdef CONFIG_QED_SRIOV
1084         /* SRIOV must be disabled outside the lock to avoid a deadlock.
1085          * The recovery of the active VFs is currently not supported.
1086          */
1087         if (pci_num_vf(edev->pdev))
1088             qede_sriov_configure(edev->pdev, 0);
1089 #endif
1090         qede_lock(edev);
1091         qede_recovery_handler(edev);
1092         qede_unlock(edev);
1093     }
1094 
1095     __qede_lock(edev);
1096 
1097     if (test_and_clear_bit(QEDE_SP_RX_MODE, &edev->sp_flags))
1098         if (edev->state == QEDE_STATE_OPEN)
1099             qede_config_rx_mode(edev->ndev);
1100 
1101 #ifdef CONFIG_RFS_ACCEL
1102     if (test_and_clear_bit(QEDE_SP_ARFS_CONFIG, &edev->sp_flags)) {
1103         if (edev->state == QEDE_STATE_OPEN)
1104             qede_process_arfs_filters(edev, false);
1105     }
1106 #endif
1107     if (test_and_clear_bit(QEDE_SP_HW_ERR, &edev->sp_flags))
1108         qede_generic_hw_err_handler(edev);
1109     __qede_unlock(edev);
1110 
1111     if (test_and_clear_bit(QEDE_SP_AER, &edev->sp_flags)) {
1112 #ifdef CONFIG_QED_SRIOV
1113         /* SRIOV must be disabled outside the lock to avoid a deadlock.
1114          * The recovery of the active VFs is currently not supported.
1115          */
1116         if (pci_num_vf(edev->pdev))
1117             qede_sriov_configure(edev->pdev, 0);
1118 #endif
1119         edev->ops->common->recovery_process(edev->cdev);
1120     }
1121 }
1122 
1123 static void qede_update_pf_params(struct qed_dev *cdev)
1124 {
1125     struct qed_pf_params pf_params;
1126     u16 num_cons;
1127 
1128     /* 64 rx + 64 tx + 64 XDP */
1129     memset(&pf_params, 0, sizeof(struct qed_pf_params));
1130 
1131     /* 1 rx + 1 xdp + max tx cos */
1132     num_cons = QED_MIN_L2_CONS;
1133 
1134     pf_params.eth_pf_params.num_cons = (MAX_SB_PER_PF_MIMD - 1) * num_cons;
1135 
1136     /* Same for VFs - make sure they'll have sufficient connections
1137      * to support XDP Tx queues.
1138      */
1139     pf_params.eth_pf_params.num_vf_cons = 48;
1140 
1141     pf_params.eth_pf_params.num_arfs_filters = QEDE_RFS_MAX_FLTR;
1142     qed_ops->common->update_pf_params(cdev, &pf_params);
1143 }
1144 
1145 #define QEDE_FW_VER_STR_SIZE    80
1146 
1147 static void qede_log_probe(struct qede_dev *edev)
1148 {
1149     struct qed_dev_info *p_dev_info = &edev->dev_info.common;
1150     u8 buf[QEDE_FW_VER_STR_SIZE];
1151     size_t left_size;
1152 
1153     snprintf(buf, QEDE_FW_VER_STR_SIZE,
1154          "Storm FW %d.%d.%d.%d, Management FW %d.%d.%d.%d",
1155          p_dev_info->fw_major, p_dev_info->fw_minor, p_dev_info->fw_rev,
1156          p_dev_info->fw_eng,
1157          (p_dev_info->mfw_rev & QED_MFW_VERSION_3_MASK) >>
1158          QED_MFW_VERSION_3_OFFSET,
1159          (p_dev_info->mfw_rev & QED_MFW_VERSION_2_MASK) >>
1160          QED_MFW_VERSION_2_OFFSET,
1161          (p_dev_info->mfw_rev & QED_MFW_VERSION_1_MASK) >>
1162          QED_MFW_VERSION_1_OFFSET,
1163          (p_dev_info->mfw_rev & QED_MFW_VERSION_0_MASK) >>
1164          QED_MFW_VERSION_0_OFFSET);
1165 
1166     left_size = QEDE_FW_VER_STR_SIZE - strlen(buf);
1167     if (p_dev_info->mbi_version && left_size)
1168         snprintf(buf + strlen(buf), left_size,
1169              " [MBI %d.%d.%d]",
1170              (p_dev_info->mbi_version & QED_MBI_VERSION_2_MASK) >>
1171              QED_MBI_VERSION_2_OFFSET,
1172              (p_dev_info->mbi_version & QED_MBI_VERSION_1_MASK) >>
1173              QED_MBI_VERSION_1_OFFSET,
1174              (p_dev_info->mbi_version & QED_MBI_VERSION_0_MASK) >>
1175              QED_MBI_VERSION_0_OFFSET);
1176 
1177     pr_info("qede %02x:%02x.%02x: %s [%s]\n", edev->pdev->bus->number,
1178         PCI_SLOT(edev->pdev->devfn), PCI_FUNC(edev->pdev->devfn),
1179         buf, edev->ndev->name);
1180 }
1181 
1182 enum qede_probe_mode {
1183     QEDE_PROBE_NORMAL,
1184     QEDE_PROBE_RECOVERY,
1185 };
1186 
1187 static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
1188             bool is_vf, enum qede_probe_mode mode)
1189 {
1190     struct qed_probe_params probe_params;
1191     struct qed_slowpath_params sp_params;
1192     struct qed_dev_eth_info dev_info;
1193     struct qede_dev *edev;
1194     struct qed_dev *cdev;
1195     int rc;
1196 
1197     if (unlikely(dp_level & QED_LEVEL_INFO))
1198         pr_notice("Starting qede probe\n");
1199 
1200     memset(&probe_params, 0, sizeof(probe_params));
1201     probe_params.protocol = QED_PROTOCOL_ETH;
1202     probe_params.dp_module = dp_module;
1203     probe_params.dp_level = dp_level;
1204     probe_params.is_vf = is_vf;
1205     probe_params.recov_in_prog = (mode == QEDE_PROBE_RECOVERY);
1206     cdev = qed_ops->common->probe(pdev, &probe_params);
1207     if (!cdev) {
1208         rc = -ENODEV;
1209         goto err0;
1210     }
1211 
1212     qede_update_pf_params(cdev);
1213 
1214     /* Start the Slowpath-process */
1215     memset(&sp_params, 0, sizeof(sp_params));
1216     sp_params.int_mode = QED_INT_MODE_MSIX;
1217     strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE);
1218     rc = qed_ops->common->slowpath_start(cdev, &sp_params);
1219     if (rc) {
1220         pr_notice("Cannot start slowpath\n");
1221         goto err1;
1222     }
1223 
1224     /* Learn information crucial for qede to progress */
1225     rc = qed_ops->fill_dev_info(cdev, &dev_info);
1226     if (rc)
1227         goto err2;
1228 
1229     if (mode != QEDE_PROBE_RECOVERY) {
1230         edev = qede_alloc_etherdev(cdev, pdev, &dev_info, dp_module,
1231                        dp_level);
1232         if (!edev) {
1233             rc = -ENOMEM;
1234             goto err2;
1235         }
1236 
1237         edev->devlink = qed_ops->common->devlink_register(cdev);
1238         if (IS_ERR(edev->devlink)) {
1239             DP_NOTICE(edev, "Cannot register devlink\n");
1240             rc = PTR_ERR(edev->devlink);
1241             edev->devlink = NULL;
1242             goto err3;
1243         }
1244     } else {
1245         struct net_device *ndev = pci_get_drvdata(pdev);
1246         struct qed_devlink *qdl;
1247 
1248         edev = netdev_priv(ndev);
1249         qdl = devlink_priv(edev->devlink);
1250         qdl->cdev = cdev;
1251         edev->cdev = cdev;
1252         memset(&edev->stats, 0, sizeof(edev->stats));
1253         memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
1254     }
1255 
1256     if (is_vf)
1257         set_bit(QEDE_FLAGS_IS_VF, &edev->flags);
1258 
1259     qede_init_ndev(edev);
1260 
1261     rc = qede_rdma_dev_add(edev, (mode == QEDE_PROBE_RECOVERY));
1262     if (rc)
1263         goto err3;
1264 
1265     if (mode != QEDE_PROBE_RECOVERY) {
1266         /* Prepare the lock prior to the registration of the netdev,
1267          * as once it's registered we might reach flows requiring it
1268          * [it's even possible to reach a flow needing it directly
1269          * from there, although it's unlikely].
1270          */
1271         INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
1272         mutex_init(&edev->qede_lock);
1273 
1274         rc = register_netdev(edev->ndev);
1275         if (rc) {
1276             DP_NOTICE(edev, "Cannot register net-device\n");
1277             goto err4;
1278         }
1279     }
1280 
1281     edev->ops->common->set_name(cdev, edev->ndev->name);
1282 
1283     /* PTP not supported on VFs */
1284     if (!is_vf)
1285         qede_ptp_enable(edev);
1286 
1287     edev->ops->register_ops(cdev, &qede_ll_ops, edev);
1288 
1289 #ifdef CONFIG_DCB
1290     if (!IS_VF(edev))
1291         qede_set_dcbnl_ops(edev->ndev);
1292 #endif
1293 
1294     edev->rx_copybreak = QEDE_RX_HDR_SIZE;
1295 
1296     qede_log_probe(edev);
1297     return 0;
1298 
1299 err4:
1300     qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
1301 err3:
1302     if (mode != QEDE_PROBE_RECOVERY)
1303         free_netdev(edev->ndev);
1304     else
1305         edev->cdev = NULL;
1306 err2:
1307     qed_ops->common->slowpath_stop(cdev);
1308 err1:
1309     qed_ops->common->remove(cdev);
1310 err0:
1311     return rc;
1312 }
1313 
1314 static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1315 {
1316     bool is_vf = false;
1317     u32 dp_module = 0;
1318     u8 dp_level = 0;
1319 
1320     switch ((enum qede_pci_private)id->driver_data) {
1321     case QEDE_PRIVATE_VF:
1322         if (debug & QED_LOG_VERBOSE_MASK)
1323             dev_err(&pdev->dev, "Probing a VF\n");
1324         is_vf = true;
1325         break;
1326     default:
1327         if (debug & QED_LOG_VERBOSE_MASK)
1328             dev_err(&pdev->dev, "Probing a PF\n");
1329     }
1330 
1331     qede_config_debug(debug, &dp_module, &dp_level);
1332 
1333     return __qede_probe(pdev, dp_module, dp_level, is_vf,
1334                 QEDE_PROBE_NORMAL);
1335 }
1336 
1337 enum qede_remove_mode {
1338     QEDE_REMOVE_NORMAL,
1339     QEDE_REMOVE_RECOVERY,
1340 };
1341 
1342 static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1343 {
1344     struct net_device *ndev = pci_get_drvdata(pdev);
1345     struct qede_dev *edev;
1346     struct qed_dev *cdev;
1347 
1348     if (!ndev) {
1349         dev_info(&pdev->dev, "Device has already been removed\n");
1350         return;
1351     }
1352 
1353     edev = netdev_priv(ndev);
1354     cdev = edev->cdev;
1355 
1356     DP_INFO(edev, "Starting qede_remove\n");
1357 
1358     qede_rdma_dev_remove(edev, (mode == QEDE_REMOVE_RECOVERY));
1359 
1360     if (mode != QEDE_REMOVE_RECOVERY) {
1361         set_bit(QEDE_SP_DISABLE, &edev->sp_flags);
1362         unregister_netdev(ndev);
1363 
1364         cancel_delayed_work_sync(&edev->sp_task);
1365 
1366         edev->ops->common->set_power_state(cdev, PCI_D0);
1367 
1368         pci_set_drvdata(pdev, NULL);
1369     }
1370 
1371     qede_ptp_disable(edev);
1372 
1373     /* Use global ops since we've freed edev */
1374     qed_ops->common->slowpath_stop(cdev);
1375     if (system_state == SYSTEM_POWER_OFF)
1376         return;
1377 
1378     if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
1379         qed_ops->common->devlink_unregister(edev->devlink);
1380         edev->devlink = NULL;
1381     }
1382     qed_ops->common->remove(cdev);
1383     edev->cdev = NULL;
1384 
1385     /* Since this can happen out-of-sync with other flows,
1386      * don't release the netdevice until after slowpath stop
1387      * has been called to guarantee various other contexts
1388      * [e.g., QED register callbacks] won't break anything when
1389      * accessing the netdevice.
1390      */
1391     if (mode != QEDE_REMOVE_RECOVERY) {
1392         kfree(edev->coal_entry);
1393         free_netdev(ndev);
1394     }
1395 
1396     dev_info(&pdev->dev, "Ending qede_remove successfully\n");
1397 }
1398 
1399 static void qede_remove(struct pci_dev *pdev)
1400 {
1401     __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1402 }
1403 
1404 static void qede_shutdown(struct pci_dev *pdev)
1405 {
1406     __qede_remove(pdev, QEDE_REMOVE_NORMAL);
1407 }
1408 
1409 /* -------------------------------------------------------------------------
1410  * START OF LOAD / UNLOAD
1411  * -------------------------------------------------------------------------
1412  */
1413 
1414 static int qede_set_num_queues(struct qede_dev *edev)
1415 {
1416     int rc;
1417     u16 rss_num;
1418 
1419     /* Setup queues according to possible resources*/
1420     if (edev->req_queues)
1421         rss_num = edev->req_queues;
1422     else
1423         rss_num = netif_get_num_default_rss_queues() *
1424               edev->dev_info.common.num_hwfns;
1425 
1426     rss_num = min_t(u16, QEDE_MAX_RSS_CNT(edev), rss_num);
1427 
1428     rc = edev->ops->common->set_fp_int(edev->cdev, rss_num);
1429     if (rc > 0) {
1430         /* Managed to request interrupts for our queues */
1431         edev->num_queues = rc;
1432         DP_INFO(edev, "Managed %d [of %d] RSS queues\n",
1433             QEDE_QUEUE_CNT(edev), rss_num);
1434         rc = 0;
1435     }
1436 
1437     edev->fp_num_tx = edev->req_num_tx;
1438     edev->fp_num_rx = edev->req_num_rx;
1439 
1440     return rc;
1441 }
1442 
1443 static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
1444                  u16 sb_id)
1445 {
1446     if (sb_info->sb_virt) {
1447         edev->ops->common->sb_release(edev->cdev, sb_info, sb_id,
1448                           QED_SB_TYPE_L2_QUEUE);
1449         dma_free_coherent(&edev->pdev->dev, sizeof(*sb_info->sb_virt),
1450                   (void *)sb_info->sb_virt, sb_info->sb_phys);
1451         memset(sb_info, 0, sizeof(*sb_info));
1452     }
1453 }
1454 
1455 /* This function allocates fast-path status block memory */
1456 static int qede_alloc_mem_sb(struct qede_dev *edev,
1457                  struct qed_sb_info *sb_info, u16 sb_id)
1458 {
1459     struct status_block *sb_virt;
1460     dma_addr_t sb_phys;
1461     int rc;
1462 
1463     sb_virt = dma_alloc_coherent(&edev->pdev->dev,
1464                      sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
1465     if (!sb_virt) {
1466         DP_ERR(edev, "Status block allocation failed\n");
1467         return -ENOMEM;
1468     }
1469 
1470     rc = edev->ops->common->sb_init(edev->cdev, sb_info,
1471                     sb_virt, sb_phys, sb_id,
1472                     QED_SB_TYPE_L2_QUEUE);
1473     if (rc) {
1474         DP_ERR(edev, "Status block initialization failed\n");
1475         dma_free_coherent(&edev->pdev->dev, sizeof(*sb_virt),
1476                   sb_virt, sb_phys);
1477         return rc;
1478     }
1479 
1480     return 0;
1481 }
1482 
1483 static void qede_free_rx_buffers(struct qede_dev *edev,
1484                  struct qede_rx_queue *rxq)
1485 {
1486     u16 i;
1487 
1488     for (i = rxq->sw_rx_cons; i != rxq->sw_rx_prod; i++) {
1489         struct sw_rx_data *rx_buf;
1490         struct page *data;
1491 
1492         rx_buf = &rxq->sw_rx_ring[i & NUM_RX_BDS_MAX];
1493         data = rx_buf->data;
1494 
1495         dma_unmap_page(&edev->pdev->dev,
1496                    rx_buf->mapping, PAGE_SIZE, rxq->data_direction);
1497 
1498         rx_buf->data = NULL;
1499         __free_page(data);
1500     }
1501 }
1502 
1503 static void qede_free_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1504 {
1505     /* Free rx buffers */
1506     qede_free_rx_buffers(edev, rxq);
1507 
1508     /* Free the parallel SW ring */
1509     kfree(rxq->sw_rx_ring);
1510 
1511     /* Free the real RQ ring used by FW */
1512     edev->ops->common->chain_free(edev->cdev, &rxq->rx_bd_ring);
1513     edev->ops->common->chain_free(edev->cdev, &rxq->rx_comp_ring);
1514 }
1515 
1516 static void qede_set_tpa_param(struct qede_rx_queue *rxq)
1517 {
1518     int i;
1519 
1520     for (i = 0; i < ETH_TPA_MAX_AGGS_NUM; i++) {
1521         struct qede_agg_info *tpa_info = &rxq->tpa_info[i];
1522 
1523         tpa_info->state = QEDE_AGG_STATE_NONE;
1524     }
1525 }
1526 
1527 /* This function allocates all memory needed per Rx queue */
1528 static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
1529 {
1530     struct qed_chain_init_params params = {
1531         .cnt_type   = QED_CHAIN_CNT_TYPE_U16,
1532         .num_elems  = RX_RING_SIZE,
1533     };
1534     struct qed_dev *cdev = edev->cdev;
1535     int i, rc, size;
1536 
1537     rxq->num_rx_buffers = edev->q_num_rx_buffers;
1538 
1539     rxq->rx_buf_size = NET_IP_ALIGN + ETH_OVERHEAD + edev->ndev->mtu;
1540 
1541     rxq->rx_headroom = edev->xdp_prog ? XDP_PACKET_HEADROOM : NET_SKB_PAD;
1542     size = rxq->rx_headroom +
1543            SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1544 
1545     /* Make sure that the headroom and  payload fit in a single page */
1546     if (rxq->rx_buf_size + size > PAGE_SIZE)
1547         rxq->rx_buf_size = PAGE_SIZE - size;
1548 
1549     /* Segment size to split a page in multiple equal parts,
1550      * unless XDP is used in which case we'd use the entire page.
1551      */
1552     if (!edev->xdp_prog) {
1553         size = size + rxq->rx_buf_size;
1554         rxq->rx_buf_seg_size = roundup_pow_of_two(size);
1555     } else {
1556         rxq->rx_buf_seg_size = PAGE_SIZE;
1557         edev->ndev->features &= ~NETIF_F_GRO_HW;
1558     }
1559 
1560     /* Allocate the parallel driver ring for Rx buffers */
1561     size = sizeof(*rxq->sw_rx_ring) * RX_RING_SIZE;
1562     rxq->sw_rx_ring = kzalloc(size, GFP_KERNEL);
1563     if (!rxq->sw_rx_ring) {
1564         DP_ERR(edev, "Rx buffers ring allocation failed\n");
1565         rc = -ENOMEM;
1566         goto err;
1567     }
1568 
1569     /* Allocate FW Rx ring  */
1570     params.mode = QED_CHAIN_MODE_NEXT_PTR;
1571     params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
1572     params.elem_size = sizeof(struct eth_rx_bd);
1573 
1574     rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_bd_ring, &params);
1575     if (rc)
1576         goto err;
1577 
1578     /* Allocate FW completion ring */
1579     params.mode = QED_CHAIN_MODE_PBL;
1580     params.intended_use = QED_CHAIN_USE_TO_CONSUME;
1581     params.elem_size = sizeof(union eth_rx_cqe);
1582 
1583     rc = edev->ops->common->chain_alloc(cdev, &rxq->rx_comp_ring, &params);
1584     if (rc)
1585         goto err;
1586 
1587     /* Allocate buffers for the Rx ring */
1588     rxq->filled_buffers = 0;
1589     for (i = 0; i < rxq->num_rx_buffers; i++) {
1590         rc = qede_alloc_rx_buffer(rxq, false);
1591         if (rc) {
1592             DP_ERR(edev,
1593                    "Rx buffers allocation failed at index %d\n", i);
1594             goto err;
1595         }
1596     }
1597 
1598     edev->gro_disable = !(edev->ndev->features & NETIF_F_GRO_HW);
1599     if (!edev->gro_disable)
1600         qede_set_tpa_param(rxq);
1601 err:
1602     return rc;
1603 }
1604 
1605 static void qede_free_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1606 {
1607     /* Free the parallel SW ring */
1608     if (txq->is_xdp)
1609         kfree(txq->sw_tx_ring.xdp);
1610     else
1611         kfree(txq->sw_tx_ring.skbs);
1612 
1613     /* Free the real RQ ring used by FW */
1614     edev->ops->common->chain_free(edev->cdev, &txq->tx_pbl);
1615 }
1616 
1617 /* This function allocates all memory needed per Tx queue */
1618 static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
1619 {
1620     struct qed_chain_init_params params = {
1621         .mode       = QED_CHAIN_MODE_PBL,
1622         .intended_use   = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1623         .cnt_type   = QED_CHAIN_CNT_TYPE_U16,
1624         .num_elems  = edev->q_num_tx_buffers,
1625         .elem_size  = sizeof(union eth_tx_bd_types),
1626     };
1627     int size, rc;
1628 
1629     txq->num_tx_buffers = edev->q_num_tx_buffers;
1630 
1631     /* Allocate the parallel driver ring for Tx buffers */
1632     if (txq->is_xdp) {
1633         size = sizeof(*txq->sw_tx_ring.xdp) * txq->num_tx_buffers;
1634         txq->sw_tx_ring.xdp = kzalloc(size, GFP_KERNEL);
1635         if (!txq->sw_tx_ring.xdp)
1636             goto err;
1637     } else {
1638         size = sizeof(*txq->sw_tx_ring.skbs) * txq->num_tx_buffers;
1639         txq->sw_tx_ring.skbs = kzalloc(size, GFP_KERNEL);
1640         if (!txq->sw_tx_ring.skbs)
1641             goto err;
1642     }
1643 
1644     rc = edev->ops->common->chain_alloc(edev->cdev, &txq->tx_pbl, &params);
1645     if (rc)
1646         goto err;
1647 
1648     return 0;
1649 
1650 err:
1651     qede_free_mem_txq(edev, txq);
1652     return -ENOMEM;
1653 }
1654 
1655 /* This function frees all memory of a single fp */
1656 static void qede_free_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1657 {
1658     qede_free_mem_sb(edev, fp->sb_info, fp->id);
1659 
1660     if (fp->type & QEDE_FASTPATH_RX)
1661         qede_free_mem_rxq(edev, fp->rxq);
1662 
1663     if (fp->type & QEDE_FASTPATH_XDP)
1664         qede_free_mem_txq(edev, fp->xdp_tx);
1665 
1666     if (fp->type & QEDE_FASTPATH_TX) {
1667         int cos;
1668 
1669         for_each_cos_in_txq(edev, cos)
1670             qede_free_mem_txq(edev, &fp->txq[cos]);
1671     }
1672 }
1673 
1674 /* This function allocates all memory needed for a single fp (i.e. an entity
1675  * which contains status block, one rx queue and/or multiple per-TC tx queues.
1676  */
1677 static int qede_alloc_mem_fp(struct qede_dev *edev, struct qede_fastpath *fp)
1678 {
1679     int rc = 0;
1680 
1681     rc = qede_alloc_mem_sb(edev, fp->sb_info, fp->id);
1682     if (rc)
1683         goto out;
1684 
1685     if (fp->type & QEDE_FASTPATH_RX) {
1686         rc = qede_alloc_mem_rxq(edev, fp->rxq);
1687         if (rc)
1688             goto out;
1689     }
1690 
1691     if (fp->type & QEDE_FASTPATH_XDP) {
1692         rc = qede_alloc_mem_txq(edev, fp->xdp_tx);
1693         if (rc)
1694             goto out;
1695     }
1696 
1697     if (fp->type & QEDE_FASTPATH_TX) {
1698         int cos;
1699 
1700         for_each_cos_in_txq(edev, cos) {
1701             rc = qede_alloc_mem_txq(edev, &fp->txq[cos]);
1702             if (rc)
1703                 goto out;
1704         }
1705     }
1706 
1707 out:
1708     return rc;
1709 }
1710 
1711 static void qede_free_mem_load(struct qede_dev *edev)
1712 {
1713     int i;
1714 
1715     for_each_queue(i) {
1716         struct qede_fastpath *fp = &edev->fp_array[i];
1717 
1718         qede_free_mem_fp(edev, fp);
1719     }
1720 }
1721 
1722 /* This function allocates all qede memory at NIC load. */
1723 static int qede_alloc_mem_load(struct qede_dev *edev)
1724 {
1725     int rc = 0, queue_id;
1726 
1727     for (queue_id = 0; queue_id < QEDE_QUEUE_CNT(edev); queue_id++) {
1728         struct qede_fastpath *fp = &edev->fp_array[queue_id];
1729 
1730         rc = qede_alloc_mem_fp(edev, fp);
1731         if (rc) {
1732             DP_ERR(edev,
1733                    "Failed to allocate memory for fastpath - rss id = %d\n",
1734                    queue_id);
1735             qede_free_mem_load(edev);
1736             return rc;
1737         }
1738     }
1739 
1740     return 0;
1741 }
1742 
1743 static void qede_empty_tx_queue(struct qede_dev *edev,
1744                 struct qede_tx_queue *txq)
1745 {
1746     unsigned int pkts_compl = 0, bytes_compl = 0;
1747     struct netdev_queue *netdev_txq;
1748     int rc, len = 0;
1749 
1750     netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
1751 
1752     while (qed_chain_get_cons_idx(&txq->tx_pbl) !=
1753            qed_chain_get_prod_idx(&txq->tx_pbl)) {
1754         DP_VERBOSE(edev, NETIF_MSG_IFDOWN,
1755                "Freeing a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1756                txq->index, qed_chain_get_cons_idx(&txq->tx_pbl),
1757                qed_chain_get_prod_idx(&txq->tx_pbl));
1758 
1759         rc = qede_free_tx_pkt(edev, txq, &len);
1760         if (rc) {
1761             DP_NOTICE(edev,
1762                   "Failed to free a packet on tx queue[%d]: chain_cons 0x%x, chain_prod 0x%x\n",
1763                   txq->index,
1764                   qed_chain_get_cons_idx(&txq->tx_pbl),
1765                   qed_chain_get_prod_idx(&txq->tx_pbl));
1766             break;
1767         }
1768 
1769         bytes_compl += len;
1770         pkts_compl++;
1771         txq->sw_tx_cons++;
1772     }
1773 
1774     netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
1775 }
1776 
1777 static void qede_empty_tx_queues(struct qede_dev *edev)
1778 {
1779     int i;
1780 
1781     for_each_queue(i)
1782         if (edev->fp_array[i].type & QEDE_FASTPATH_TX) {
1783             int cos;
1784 
1785             for_each_cos_in_txq(edev, cos) {
1786                 struct qede_fastpath *fp;
1787 
1788                 fp = &edev->fp_array[i];
1789                 qede_empty_tx_queue(edev,
1790                             &fp->txq[cos]);
1791             }
1792         }
1793 }
1794 
1795 /* This function inits fp content and resets the SB, RXQ and TXQ structures */
1796 static void qede_init_fp(struct qede_dev *edev)
1797 {
1798     int queue_id, rxq_index = 0, txq_index = 0;
1799     struct qede_fastpath *fp;
1800     bool init_xdp = false;
1801 
1802     for_each_queue(queue_id) {
1803         fp = &edev->fp_array[queue_id];
1804 
1805         fp->edev = edev;
1806         fp->id = queue_id;
1807 
1808         if (fp->type & QEDE_FASTPATH_XDP) {
1809             fp->xdp_tx->index = QEDE_TXQ_IDX_TO_XDP(edev,
1810                                 rxq_index);
1811             fp->xdp_tx->is_xdp = 1;
1812 
1813             spin_lock_init(&fp->xdp_tx->xdp_tx_lock);
1814             init_xdp = true;
1815         }
1816 
1817         if (fp->type & QEDE_FASTPATH_RX) {
1818             fp->rxq->rxq_id = rxq_index++;
1819 
1820             /* Determine how to map buffers for this queue */
1821             if (fp->type & QEDE_FASTPATH_XDP)
1822                 fp->rxq->data_direction = DMA_BIDIRECTIONAL;
1823             else
1824                 fp->rxq->data_direction = DMA_FROM_DEVICE;
1825             fp->rxq->dev = &edev->pdev->dev;
1826 
1827             /* Driver have no error path from here */
1828             WARN_ON(xdp_rxq_info_reg(&fp->rxq->xdp_rxq, edev->ndev,
1829                          fp->rxq->rxq_id, 0) < 0);
1830 
1831             if (xdp_rxq_info_reg_mem_model(&fp->rxq->xdp_rxq,
1832                                MEM_TYPE_PAGE_ORDER0,
1833                                NULL)) {
1834                 DP_NOTICE(edev,
1835                       "Failed to register XDP memory model\n");
1836             }
1837         }
1838 
1839         if (fp->type & QEDE_FASTPATH_TX) {
1840             int cos;
1841 
1842             for_each_cos_in_txq(edev, cos) {
1843                 struct qede_tx_queue *txq = &fp->txq[cos];
1844                 u16 ndev_tx_id;
1845 
1846                 txq->cos = cos;
1847                 txq->index = txq_index;
1848                 ndev_tx_id = QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq);
1849                 txq->ndev_txq_id = ndev_tx_id;
1850 
1851                 if (edev->dev_info.is_legacy)
1852                     txq->is_legacy = true;
1853                 txq->dev = &edev->pdev->dev;
1854             }
1855 
1856             txq_index++;
1857         }
1858 
1859         snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1860              edev->ndev->name, queue_id);
1861     }
1862 
1863     if (init_xdp) {
1864         edev->total_xdp_queues = QEDE_RSS_COUNT(edev);
1865         DP_INFO(edev, "Total XDP queues: %u\n", edev->total_xdp_queues);
1866     }
1867 }
1868 
1869 static int qede_set_real_num_queues(struct qede_dev *edev)
1870 {
1871     int rc = 0;
1872 
1873     rc = netif_set_real_num_tx_queues(edev->ndev,
1874                       QEDE_TSS_COUNT(edev) *
1875                       edev->dev_info.num_tc);
1876     if (rc) {
1877         DP_NOTICE(edev, "Failed to set real number of Tx queues\n");
1878         return rc;
1879     }
1880 
1881     rc = netif_set_real_num_rx_queues(edev->ndev, QEDE_RSS_COUNT(edev));
1882     if (rc) {
1883         DP_NOTICE(edev, "Failed to set real number of Rx queues\n");
1884         return rc;
1885     }
1886 
1887     return 0;
1888 }
1889 
1890 static void qede_napi_disable_remove(struct qede_dev *edev)
1891 {
1892     int i;
1893 
1894     for_each_queue(i) {
1895         napi_disable(&edev->fp_array[i].napi);
1896 
1897         netif_napi_del(&edev->fp_array[i].napi);
1898     }
1899 }
1900 
1901 static void qede_napi_add_enable(struct qede_dev *edev)
1902 {
1903     int i;
1904 
1905     /* Add NAPI objects */
1906     for_each_queue(i) {
1907         netif_napi_add(edev->ndev, &edev->fp_array[i].napi,
1908                    qede_poll, NAPI_POLL_WEIGHT);
1909         napi_enable(&edev->fp_array[i].napi);
1910     }
1911 }
1912 
1913 static void qede_sync_free_irqs(struct qede_dev *edev)
1914 {
1915     int i;
1916 
1917     for (i = 0; i < edev->int_info.used_cnt; i++) {
1918         if (edev->int_info.msix_cnt) {
1919             free_irq(edev->int_info.msix[i].vector,
1920                  &edev->fp_array[i]);
1921         } else {
1922             edev->ops->common->simd_handler_clean(edev->cdev, i);
1923         }
1924     }
1925 
1926     edev->int_info.used_cnt = 0;
1927     edev->int_info.msix_cnt = 0;
1928 }
1929 
1930 static int qede_req_msix_irqs(struct qede_dev *edev)
1931 {
1932     int i, rc;
1933 
1934     /* Sanitize number of interrupts == number of prepared RSS queues */
1935     if (QEDE_QUEUE_CNT(edev) > edev->int_info.msix_cnt) {
1936         DP_ERR(edev,
1937                "Interrupt mismatch: %d RSS queues > %d MSI-x vectors\n",
1938                QEDE_QUEUE_CNT(edev), edev->int_info.msix_cnt);
1939         return -EINVAL;
1940     }
1941 
1942     for (i = 0; i < QEDE_QUEUE_CNT(edev); i++) {
1943 #ifdef CONFIG_RFS_ACCEL
1944         struct qede_fastpath *fp = &edev->fp_array[i];
1945 
1946         if (edev->ndev->rx_cpu_rmap && (fp->type & QEDE_FASTPATH_RX)) {
1947             rc = irq_cpu_rmap_add(edev->ndev->rx_cpu_rmap,
1948                           edev->int_info.msix[i].vector);
1949             if (rc) {
1950                 DP_ERR(edev, "Failed to add CPU rmap\n");
1951                 qede_free_arfs(edev);
1952             }
1953         }
1954 #endif
1955         rc = request_irq(edev->int_info.msix[i].vector,
1956                  qede_msix_fp_int, 0, edev->fp_array[i].name,
1957                  &edev->fp_array[i]);
1958         if (rc) {
1959             DP_ERR(edev, "Request fp %d irq failed\n", i);
1960 #ifdef CONFIG_RFS_ACCEL
1961             if (edev->ndev->rx_cpu_rmap)
1962                 free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
1963 
1964             edev->ndev->rx_cpu_rmap = NULL;
1965 #endif
1966             qede_sync_free_irqs(edev);
1967             return rc;
1968         }
1969         DP_VERBOSE(edev, NETIF_MSG_INTR,
1970                "Requested fp irq for %s [entry %d]. Cookie is at %p\n",
1971                edev->fp_array[i].name, i,
1972                &edev->fp_array[i]);
1973         edev->int_info.used_cnt++;
1974     }
1975 
1976     return 0;
1977 }
1978 
1979 static void qede_simd_fp_handler(void *cookie)
1980 {
1981     struct qede_fastpath *fp = (struct qede_fastpath *)cookie;
1982 
1983     napi_schedule_irqoff(&fp->napi);
1984 }
1985 
1986 static int qede_setup_irqs(struct qede_dev *edev)
1987 {
1988     int i, rc = 0;
1989 
1990     /* Learn Interrupt configuration */
1991     rc = edev->ops->common->get_fp_int(edev->cdev, &edev->int_info);
1992     if (rc)
1993         return rc;
1994 
1995     if (edev->int_info.msix_cnt) {
1996         rc = qede_req_msix_irqs(edev);
1997         if (rc)
1998             return rc;
1999         edev->ndev->irq = edev->int_info.msix[0].vector;
2000     } else {
2001         const struct qed_common_ops *ops;
2002 
2003         /* qed should learn receive the RSS ids and callbacks */
2004         ops = edev->ops->common;
2005         for (i = 0; i < QEDE_QUEUE_CNT(edev); i++)
2006             ops->simd_handler_config(edev->cdev,
2007                          &edev->fp_array[i], i,
2008                          qede_simd_fp_handler);
2009         edev->int_info.used_cnt = QEDE_QUEUE_CNT(edev);
2010     }
2011     return 0;
2012 }
2013 
2014 static int qede_drain_txq(struct qede_dev *edev,
2015               struct qede_tx_queue *txq, bool allow_drain)
2016 {
2017     int rc, cnt = 1000;
2018 
2019     while (txq->sw_tx_cons != txq->sw_tx_prod) {
2020         if (!cnt) {
2021             if (allow_drain) {
2022                 DP_NOTICE(edev,
2023                       "Tx queue[%d] is stuck, requesting MCP to drain\n",
2024                       txq->index);
2025                 rc = edev->ops->common->drain(edev->cdev);
2026                 if (rc)
2027                     return rc;
2028                 return qede_drain_txq(edev, txq, false);
2029             }
2030             DP_NOTICE(edev,
2031                   "Timeout waiting for tx queue[%d]: PROD=%d, CONS=%d\n",
2032                   txq->index, txq->sw_tx_prod,
2033                   txq->sw_tx_cons);
2034             return -ENODEV;
2035         }
2036         cnt--;
2037         usleep_range(1000, 2000);
2038         barrier();
2039     }
2040 
2041     /* FW finished processing, wait for HW to transmit all tx packets */
2042     usleep_range(1000, 2000);
2043 
2044     return 0;
2045 }
2046 
2047 static int qede_stop_txq(struct qede_dev *edev,
2048              struct qede_tx_queue *txq, int rss_id)
2049 {
2050     /* delete doorbell from doorbell recovery mechanism */
2051     edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr,
2052                        &txq->tx_db);
2053 
2054     return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle);
2055 }
2056 
2057 static int qede_stop_queues(struct qede_dev *edev)
2058 {
2059     struct qed_update_vport_params *vport_update_params;
2060     struct qed_dev *cdev = edev->cdev;
2061     struct qede_fastpath *fp;
2062     int rc, i;
2063 
2064     /* Disable the vport */
2065     vport_update_params = vzalloc(sizeof(*vport_update_params));
2066     if (!vport_update_params)
2067         return -ENOMEM;
2068 
2069     vport_update_params->vport_id = 0;
2070     vport_update_params->update_vport_active_flg = 1;
2071     vport_update_params->vport_active_flg = 0;
2072     vport_update_params->update_rss_flg = 0;
2073 
2074     rc = edev->ops->vport_update(cdev, vport_update_params);
2075     vfree(vport_update_params);
2076 
2077     if (rc) {
2078         DP_ERR(edev, "Failed to update vport\n");
2079         return rc;
2080     }
2081 
2082     /* Flush Tx queues. If needed, request drain from MCP */
2083     for_each_queue(i) {
2084         fp = &edev->fp_array[i];
2085 
2086         if (fp->type & QEDE_FASTPATH_TX) {
2087             int cos;
2088 
2089             for_each_cos_in_txq(edev, cos) {
2090                 rc = qede_drain_txq(edev, &fp->txq[cos], true);
2091                 if (rc)
2092                     return rc;
2093             }
2094         }
2095 
2096         if (fp->type & QEDE_FASTPATH_XDP) {
2097             rc = qede_drain_txq(edev, fp->xdp_tx, true);
2098             if (rc)
2099                 return rc;
2100         }
2101     }
2102 
2103     /* Stop all Queues in reverse order */
2104     for (i = QEDE_QUEUE_CNT(edev) - 1; i >= 0; i--) {
2105         fp = &edev->fp_array[i];
2106 
2107         /* Stop the Tx Queue(s) */
2108         if (fp->type & QEDE_FASTPATH_TX) {
2109             int cos;
2110 
2111             for_each_cos_in_txq(edev, cos) {
2112                 rc = qede_stop_txq(edev, &fp->txq[cos], i);
2113                 if (rc)
2114                     return rc;
2115             }
2116         }
2117 
2118         /* Stop the Rx Queue */
2119         if (fp->type & QEDE_FASTPATH_RX) {
2120             rc = edev->ops->q_rx_stop(cdev, i, fp->rxq->handle);
2121             if (rc) {
2122                 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
2123                 return rc;
2124             }
2125         }
2126 
2127         /* Stop the XDP forwarding queue */
2128         if (fp->type & QEDE_FASTPATH_XDP) {
2129             rc = qede_stop_txq(edev, fp->xdp_tx, i);
2130             if (rc)
2131                 return rc;
2132 
2133             bpf_prog_put(fp->rxq->xdp_prog);
2134         }
2135     }
2136 
2137     /* Stop the vport */
2138     rc = edev->ops->vport_stop(cdev, 0);
2139     if (rc)
2140         DP_ERR(edev, "Failed to stop VPORT\n");
2141 
2142     return rc;
2143 }
2144 
2145 static int qede_start_txq(struct qede_dev *edev,
2146               struct qede_fastpath *fp,
2147               struct qede_tx_queue *txq, u8 rss_id, u16 sb_idx)
2148 {
2149     dma_addr_t phys_table = qed_chain_get_pbl_phys(&txq->tx_pbl);
2150     u32 page_cnt = qed_chain_get_page_cnt(&txq->tx_pbl);
2151     struct qed_queue_start_common_params params;
2152     struct qed_txq_start_ret_params ret_params;
2153     int rc;
2154 
2155     memset(&params, 0, sizeof(params));
2156     memset(&ret_params, 0, sizeof(ret_params));
2157 
2158     /* Let the XDP queue share the queue-zone with one of the regular txq.
2159      * We don't really care about its coalescing.
2160      */
2161     if (txq->is_xdp)
2162         params.queue_id = QEDE_TXQ_XDP_TO_IDX(edev, txq);
2163     else
2164         params.queue_id = txq->index;
2165 
2166     params.p_sb = fp->sb_info;
2167     params.sb_idx = sb_idx;
2168     params.tc = txq->cos;
2169 
2170     rc = edev->ops->q_tx_start(edev->cdev, rss_id, &params, phys_table,
2171                    page_cnt, &ret_params);
2172     if (rc) {
2173         DP_ERR(edev, "Start TXQ #%d failed %d\n", txq->index, rc);
2174         return rc;
2175     }
2176 
2177     txq->doorbell_addr = ret_params.p_doorbell;
2178     txq->handle = ret_params.p_handle;
2179 
2180     /* Determine the FW consumer address associated */
2181     txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[sb_idx];
2182 
2183     /* Prepare the doorbell parameters */
2184     SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST, DB_DEST_XCM);
2185     SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
2186     SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_VAL_SEL,
2187           DQ_XCM_ETH_TX_BD_PROD_CMD);
2188     txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
2189 
2190     /* register doorbell with doorbell recovery mechanism */
2191     rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr,
2192                         &txq->tx_db, DB_REC_WIDTH_32B,
2193                         DB_REC_KERNEL);
2194 
2195     return rc;
2196 }
2197 
2198 static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
2199 {
2200     int vlan_removal_en = 1;
2201     struct qed_dev *cdev = edev->cdev;
2202     struct qed_dev_info *qed_info = &edev->dev_info.common;
2203     struct qed_update_vport_params *vport_update_params;
2204     struct qed_queue_start_common_params q_params;
2205     struct qed_start_vport_params start = {0};
2206     int rc, i;
2207 
2208     if (!edev->num_queues) {
2209         DP_ERR(edev,
2210                "Cannot update V-VPORT as active as there are no Rx queues\n");
2211         return -EINVAL;
2212     }
2213 
2214     vport_update_params = vzalloc(sizeof(*vport_update_params));
2215     if (!vport_update_params)
2216         return -ENOMEM;
2217 
2218     start.handle_ptp_pkts = !!(edev->ptp);
2219     start.gro_enable = !edev->gro_disable;
2220     start.mtu = edev->ndev->mtu;
2221     start.vport_id = 0;
2222     start.drop_ttl0 = true;
2223     start.remove_inner_vlan = vlan_removal_en;
2224     start.clear_stats = clear_stats;
2225 
2226     rc = edev->ops->vport_start(cdev, &start);
2227 
2228     if (rc) {
2229         DP_ERR(edev, "Start V-PORT failed %d\n", rc);
2230         goto out;
2231     }
2232 
2233     DP_VERBOSE(edev, NETIF_MSG_IFUP,
2234            "Start vport ramrod passed, vport_id = %d, MTU = %d, vlan_removal_en = %d\n",
2235            start.vport_id, edev->ndev->mtu + 0xe, vlan_removal_en);
2236 
2237     for_each_queue(i) {
2238         struct qede_fastpath *fp = &edev->fp_array[i];
2239         dma_addr_t p_phys_table;
2240         u32 page_cnt;
2241 
2242         if (fp->type & QEDE_FASTPATH_RX) {
2243             struct qed_rxq_start_ret_params ret_params;
2244             struct qede_rx_queue *rxq = fp->rxq;
2245             __le16 *val;
2246 
2247             memset(&ret_params, 0, sizeof(ret_params));
2248             memset(&q_params, 0, sizeof(q_params));
2249             q_params.queue_id = rxq->rxq_id;
2250             q_params.vport_id = 0;
2251             q_params.p_sb = fp->sb_info;
2252             q_params.sb_idx = RX_PI;
2253 
2254             p_phys_table =
2255                 qed_chain_get_pbl_phys(&rxq->rx_comp_ring);
2256             page_cnt = qed_chain_get_page_cnt(&rxq->rx_comp_ring);
2257 
2258             rc = edev->ops->q_rx_start(cdev, i, &q_params,
2259                            rxq->rx_buf_size,
2260                            rxq->rx_bd_ring.p_phys_addr,
2261                            p_phys_table,
2262                            page_cnt, &ret_params);
2263             if (rc) {
2264                 DP_ERR(edev, "Start RXQ #%d failed %d\n", i,
2265                        rc);
2266                 goto out;
2267             }
2268 
2269             /* Use the return parameters */
2270             rxq->hw_rxq_prod_addr = ret_params.p_prod;
2271             rxq->handle = ret_params.p_handle;
2272 
2273             val = &fp->sb_info->sb_virt->pi_array[RX_PI];
2274             rxq->hw_cons_ptr = val;
2275 
2276             qede_update_rx_prod(edev, rxq);
2277         }
2278 
2279         if (fp->type & QEDE_FASTPATH_XDP) {
2280             rc = qede_start_txq(edev, fp, fp->xdp_tx, i, XDP_PI);
2281             if (rc)
2282                 goto out;
2283 
2284             bpf_prog_add(edev->xdp_prog, 1);
2285             fp->rxq->xdp_prog = edev->xdp_prog;
2286         }
2287 
2288         if (fp->type & QEDE_FASTPATH_TX) {
2289             int cos;
2290 
2291             for_each_cos_in_txq(edev, cos) {
2292                 rc = qede_start_txq(edev, fp, &fp->txq[cos], i,
2293                             TX_PI(cos));
2294                 if (rc)
2295                     goto out;
2296             }
2297         }
2298     }
2299 
2300     /* Prepare and send the vport enable */
2301     vport_update_params->vport_id = start.vport_id;
2302     vport_update_params->update_vport_active_flg = 1;
2303     vport_update_params->vport_active_flg = 1;
2304 
2305     if ((qed_info->b_inter_pf_switch || pci_num_vf(edev->pdev)) &&
2306         qed_info->tx_switching) {
2307         vport_update_params->update_tx_switching_flg = 1;
2308         vport_update_params->tx_switching_flg = 1;
2309     }
2310 
2311     qede_fill_rss_params(edev, &vport_update_params->rss_params,
2312                  &vport_update_params->update_rss_flg);
2313 
2314     rc = edev->ops->vport_update(cdev, vport_update_params);
2315     if (rc)
2316         DP_ERR(edev, "Update V-PORT failed %d\n", rc);
2317 
2318 out:
2319     vfree(vport_update_params);
2320     return rc;
2321 }
2322 
2323 enum qede_unload_mode {
2324     QEDE_UNLOAD_NORMAL,
2325     QEDE_UNLOAD_RECOVERY,
2326 };
2327 
2328 static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
2329             bool is_locked)
2330 {
2331     struct qed_link_params link_params;
2332     int rc;
2333 
2334     DP_INFO(edev, "Starting qede unload\n");
2335 
2336     if (!is_locked)
2337         __qede_lock(edev);
2338 
2339     clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2340 
2341     if (mode != QEDE_UNLOAD_RECOVERY)
2342         edev->state = QEDE_STATE_CLOSED;
2343 
2344     qede_rdma_dev_event_close(edev);
2345 
2346     /* Close OS Tx */
2347     netif_tx_disable(edev->ndev);
2348     netif_carrier_off(edev->ndev);
2349 
2350     if (mode != QEDE_UNLOAD_RECOVERY) {
2351         /* Reset the link */
2352         memset(&link_params, 0, sizeof(link_params));
2353         link_params.link_up = false;
2354         edev->ops->common->set_link(edev->cdev, &link_params);
2355 
2356         rc = qede_stop_queues(edev);
2357         if (rc) {
2358 #ifdef CONFIG_RFS_ACCEL
2359             if (edev->dev_info.common.b_arfs_capable) {
2360                 qede_poll_for_freeing_arfs_filters(edev);
2361                 if (edev->ndev->rx_cpu_rmap)
2362                     free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap);
2363 
2364                 edev->ndev->rx_cpu_rmap = NULL;
2365             }
2366 #endif
2367             qede_sync_free_irqs(edev);
2368             goto out;
2369         }
2370 
2371         DP_INFO(edev, "Stopped Queues\n");
2372     }
2373 
2374     qede_vlan_mark_nonconfigured(edev);
2375     edev->ops->fastpath_stop(edev->cdev);
2376 
2377     if (edev->dev_info.common.b_arfs_capable) {
2378         qede_poll_for_freeing_arfs_filters(edev);
2379         qede_free_arfs(edev);
2380     }
2381 
2382     /* Release the interrupts */
2383     qede_sync_free_irqs(edev);
2384     edev->ops->common->set_fp_int(edev->cdev, 0);
2385 
2386     qede_napi_disable_remove(edev);
2387 
2388     if (mode == QEDE_UNLOAD_RECOVERY)
2389         qede_empty_tx_queues(edev);
2390 
2391     qede_free_mem_load(edev);
2392     qede_free_fp_array(edev);
2393 
2394 out:
2395     if (!is_locked)
2396         __qede_unlock(edev);
2397 
2398     if (mode != QEDE_UNLOAD_RECOVERY)
2399         DP_NOTICE(edev, "Link is down\n");
2400 
2401     edev->ptp_skip_txts = 0;
2402 
2403     DP_INFO(edev, "Ending qede unload\n");
2404 }
2405 
2406 enum qede_load_mode {
2407     QEDE_LOAD_NORMAL,
2408     QEDE_LOAD_RELOAD,
2409     QEDE_LOAD_RECOVERY,
2410 };
2411 
2412 static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2413              bool is_locked)
2414 {
2415     struct qed_link_params link_params;
2416     struct ethtool_coalesce coal = {};
2417     u8 num_tc;
2418     int rc, i;
2419 
2420     DP_INFO(edev, "Starting qede load\n");
2421 
2422     if (!is_locked)
2423         __qede_lock(edev);
2424 
2425     rc = qede_set_num_queues(edev);
2426     if (rc)
2427         goto out;
2428 
2429     rc = qede_alloc_fp_array(edev);
2430     if (rc)
2431         goto out;
2432 
2433     qede_init_fp(edev);
2434 
2435     rc = qede_alloc_mem_load(edev);
2436     if (rc)
2437         goto err1;
2438     DP_INFO(edev, "Allocated %d Rx, %d Tx queues\n",
2439         QEDE_RSS_COUNT(edev), QEDE_TSS_COUNT(edev));
2440 
2441     rc = qede_set_real_num_queues(edev);
2442     if (rc)
2443         goto err2;
2444 
2445     if (qede_alloc_arfs(edev)) {
2446         edev->ndev->features &= ~NETIF_F_NTUPLE;
2447         edev->dev_info.common.b_arfs_capable = false;
2448     }
2449 
2450     qede_napi_add_enable(edev);
2451     DP_INFO(edev, "Napi added and enabled\n");
2452 
2453     rc = qede_setup_irqs(edev);
2454     if (rc)
2455         goto err3;
2456     DP_INFO(edev, "Setup IRQs succeeded\n");
2457 
2458     rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
2459     if (rc)
2460         goto err4;
2461     DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
2462 
2463     num_tc = netdev_get_num_tc(edev->ndev);
2464     num_tc = num_tc ? num_tc : edev->dev_info.num_tc;
2465     qede_setup_tc(edev->ndev, num_tc);
2466 
2467     /* Program un-configured VLANs */
2468     qede_configure_vlan_filters(edev);
2469 
2470     set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags);
2471 
2472     /* Ask for link-up using current configuration */
2473     memset(&link_params, 0, sizeof(link_params));
2474     link_params.link_up = true;
2475     edev->ops->common->set_link(edev->cdev, &link_params);
2476 
2477     edev->state = QEDE_STATE_OPEN;
2478 
2479     coal.rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
2480     coal.tx_coalesce_usecs = QED_DEFAULT_TX_USECS;
2481 
2482     for_each_queue(i) {
2483         if (edev->coal_entry[i].isvalid) {
2484             coal.rx_coalesce_usecs = edev->coal_entry[i].rxc;
2485             coal.tx_coalesce_usecs = edev->coal_entry[i].txc;
2486         }
2487         __qede_unlock(edev);
2488         qede_set_per_coalesce(edev->ndev, i, &coal);
2489         __qede_lock(edev);
2490     }
2491     DP_INFO(edev, "Ending successfully qede load\n");
2492 
2493     goto out;
2494 err4:
2495     qede_sync_free_irqs(edev);
2496 err3:
2497     qede_napi_disable_remove(edev);
2498 err2:
2499     qede_free_mem_load(edev);
2500 err1:
2501     edev->ops->common->set_fp_int(edev->cdev, 0);
2502     qede_free_fp_array(edev);
2503     edev->num_queues = 0;
2504     edev->fp_num_tx = 0;
2505     edev->fp_num_rx = 0;
2506 out:
2507     if (!is_locked)
2508         __qede_unlock(edev);
2509 
2510     return rc;
2511 }
2512 
2513 /* 'func' should be able to run between unload and reload assuming interface
2514  * is actually running, or afterwards in case it's currently DOWN.
2515  */
2516 void qede_reload(struct qede_dev *edev,
2517          struct qede_reload_args *args, bool is_locked)
2518 {
2519     if (!is_locked)
2520         __qede_lock(edev);
2521 
2522     /* Since qede_lock is held, internal state wouldn't change even
2523      * if netdev state would start transitioning. Check whether current
2524      * internal configuration indicates device is up, then reload.
2525      */
2526     if (edev->state == QEDE_STATE_OPEN) {
2527         qede_unload(edev, QEDE_UNLOAD_NORMAL, true);
2528         if (args)
2529             args->func(edev, args);
2530         qede_load(edev, QEDE_LOAD_RELOAD, true);
2531 
2532         /* Since no one is going to do it for us, re-configure */
2533         qede_config_rx_mode(edev->ndev);
2534     } else if (args) {
2535         args->func(edev, args);
2536     }
2537 
2538     if (!is_locked)
2539         __qede_unlock(edev);
2540 }
2541 
2542 /* called with rtnl_lock */
2543 static int qede_open(struct net_device *ndev)
2544 {
2545     struct qede_dev *edev = netdev_priv(ndev);
2546     int rc;
2547 
2548     netif_carrier_off(ndev);
2549 
2550     edev->ops->common->set_power_state(edev->cdev, PCI_D0);
2551 
2552     rc = qede_load(edev, QEDE_LOAD_NORMAL, false);
2553     if (rc)
2554         return rc;
2555 
2556     udp_tunnel_nic_reset_ntf(ndev);
2557 
2558     edev->ops->common->update_drv_state(edev->cdev, true);
2559 
2560     return 0;
2561 }
2562 
2563 static int qede_close(struct net_device *ndev)
2564 {
2565     struct qede_dev *edev = netdev_priv(ndev);
2566 
2567     qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
2568 
2569     if (edev->cdev)
2570         edev->ops->common->update_drv_state(edev->cdev, false);
2571 
2572     return 0;
2573 }
2574 
2575 static void qede_link_update(void *dev, struct qed_link_output *link)
2576 {
2577     struct qede_dev *edev = dev;
2578 
2579     if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) {
2580         DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n");
2581         return;
2582     }
2583 
2584     if (link->link_up) {
2585         if (!netif_carrier_ok(edev->ndev)) {
2586             DP_NOTICE(edev, "Link is up\n");
2587             netif_tx_start_all_queues(edev->ndev);
2588             netif_carrier_on(edev->ndev);
2589             qede_rdma_dev_event_open(edev);
2590         }
2591     } else {
2592         if (netif_carrier_ok(edev->ndev)) {
2593             DP_NOTICE(edev, "Link is down\n");
2594             netif_tx_disable(edev->ndev);
2595             netif_carrier_off(edev->ndev);
2596             qede_rdma_dev_event_close(edev);
2597         }
2598     }
2599 }
2600 
2601 static void qede_schedule_recovery_handler(void *dev)
2602 {
2603     struct qede_dev *edev = dev;
2604 
2605     if (edev->state == QEDE_STATE_RECOVERY) {
2606         DP_NOTICE(edev,
2607               "Avoid scheduling a recovery handling since already in recovery state\n");
2608         return;
2609     }
2610 
2611     set_bit(QEDE_SP_RECOVERY, &edev->sp_flags);
2612     schedule_delayed_work(&edev->sp_task, 0);
2613 
2614     DP_INFO(edev, "Scheduled a recovery handler\n");
2615 }
2616 
2617 static void qede_recovery_failed(struct qede_dev *edev)
2618 {
2619     netdev_err(edev->ndev, "Recovery handling has failed. Power cycle is needed.\n");
2620 
2621     netif_device_detach(edev->ndev);
2622 
2623     if (edev->cdev)
2624         edev->ops->common->set_power_state(edev->cdev, PCI_D3hot);
2625 }
2626 
2627 static void qede_recovery_handler(struct qede_dev *edev)
2628 {
2629     u32 curr_state = edev->state;
2630     int rc;
2631 
2632     DP_NOTICE(edev, "Starting a recovery process\n");
2633 
2634     /* No need to acquire first the qede_lock since is done by qede_sp_task
2635      * before calling this function.
2636      */
2637     edev->state = QEDE_STATE_RECOVERY;
2638 
2639     edev->ops->common->recovery_prolog(edev->cdev);
2640 
2641     if (curr_state == QEDE_STATE_OPEN)
2642         qede_unload(edev, QEDE_UNLOAD_RECOVERY, true);
2643 
2644     __qede_remove(edev->pdev, QEDE_REMOVE_RECOVERY);
2645 
2646     rc = __qede_probe(edev->pdev, edev->dp_module, edev->dp_level,
2647               IS_VF(edev), QEDE_PROBE_RECOVERY);
2648     if (rc) {
2649         edev->cdev = NULL;
2650         goto err;
2651     }
2652 
2653     if (curr_state == QEDE_STATE_OPEN) {
2654         rc = qede_load(edev, QEDE_LOAD_RECOVERY, true);
2655         if (rc)
2656             goto err;
2657 
2658         qede_config_rx_mode(edev->ndev);
2659         udp_tunnel_nic_reset_ntf(edev->ndev);
2660     }
2661 
2662     edev->state = curr_state;
2663 
2664     DP_NOTICE(edev, "Recovery handling is done\n");
2665 
2666     return;
2667 
2668 err:
2669     qede_recovery_failed(edev);
2670 }
2671 
2672 static void qede_atomic_hw_err_handler(struct qede_dev *edev)
2673 {
2674     struct qed_dev *cdev = edev->cdev;
2675 
2676     DP_NOTICE(edev,
2677           "Generic non-sleepable HW error handling started - err_flags 0x%lx\n",
2678           edev->err_flags);
2679 
2680     /* Get a call trace of the flow that led to the error */
2681     WARN_ON(test_bit(QEDE_ERR_WARN, &edev->err_flags));
2682 
2683     /* Prevent HW attentions from being reasserted */
2684     if (test_bit(QEDE_ERR_ATTN_CLR_EN, &edev->err_flags))
2685         edev->ops->common->attn_clr_enable(cdev, true);
2686 
2687     DP_NOTICE(edev, "Generic non-sleepable HW error handling is done\n");
2688 }
2689 
2690 static void qede_generic_hw_err_handler(struct qede_dev *edev)
2691 {
2692     DP_NOTICE(edev,
2693           "Generic sleepable HW error handling started - err_flags 0x%lx\n",
2694           edev->err_flags);
2695 
2696     if (edev->devlink) {
2697         DP_NOTICE(edev, "Reporting fatal error to devlink\n");
2698         edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
2699     }
2700 
2701     clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2702 
2703     DP_NOTICE(edev, "Generic sleepable HW error handling is done\n");
2704 }
2705 
2706 static void qede_set_hw_err_flags(struct qede_dev *edev,
2707                   enum qed_hw_err_type err_type)
2708 {
2709     unsigned long err_flags = 0;
2710 
2711     switch (err_type) {
2712     case QED_HW_ERR_DMAE_FAIL:
2713         set_bit(QEDE_ERR_WARN, &err_flags);
2714         fallthrough;
2715     case QED_HW_ERR_MFW_RESP_FAIL:
2716     case QED_HW_ERR_HW_ATTN:
2717     case QED_HW_ERR_RAMROD_FAIL:
2718     case QED_HW_ERR_FW_ASSERT:
2719         set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags);
2720         set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags);
2721         /* make this error as recoverable and start recovery*/
2722         set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags);
2723         break;
2724 
2725     default:
2726         DP_NOTICE(edev, "Unexpected HW error [%d]\n", err_type);
2727         break;
2728     }
2729 
2730     edev->err_flags |= err_flags;
2731 }
2732 
2733 static void qede_schedule_hw_err_handler(void *dev,
2734                      enum qed_hw_err_type err_type)
2735 {
2736     struct qede_dev *edev = dev;
2737 
2738     /* Fan failure cannot be masked by handling of another HW error or by a
2739      * concurrent recovery process.
2740      */
2741     if ((test_and_set_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags) ||
2742          edev->state == QEDE_STATE_RECOVERY) &&
2743          err_type != QED_HW_ERR_FAN_FAIL) {
2744         DP_INFO(edev,
2745             "Avoid scheduling an error handling while another HW error is being handled\n");
2746         return;
2747     }
2748 
2749     if (err_type >= QED_HW_ERR_LAST) {
2750         DP_NOTICE(edev, "Unknown HW error [%d]\n", err_type);
2751         clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
2752         return;
2753     }
2754 
2755     edev->last_err_type = err_type;
2756     qede_set_hw_err_flags(edev, err_type);
2757     qede_atomic_hw_err_handler(edev);
2758     set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
2759     schedule_delayed_work(&edev->sp_task, 0);
2760 
2761     DP_INFO(edev, "Scheduled a error handler [err_type %d]\n", err_type);
2762 }
2763 
2764 static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
2765 {
2766     struct netdev_queue *netdev_txq;
2767 
2768     netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
2769     if (netif_xmit_stopped(netdev_txq))
2770         return true;
2771 
2772     return false;
2773 }
2774 
2775 static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
2776 {
2777     struct qede_dev *edev = dev;
2778     struct netdev_hw_addr *ha;
2779     int i;
2780 
2781     if (edev->ndev->features & NETIF_F_IP_CSUM)
2782         data->feat_flags |= QED_TLV_IP_CSUM;
2783     if (edev->ndev->features & NETIF_F_TSO)
2784         data->feat_flags |= QED_TLV_LSO;
2785 
2786     ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
2787     eth_zero_addr(data->mac[1]);
2788     eth_zero_addr(data->mac[2]);
2789     /* Copy the first two UC macs */
2790     netif_addr_lock_bh(edev->ndev);
2791     i = 1;
2792     netdev_for_each_uc_addr(ha, edev->ndev) {
2793         ether_addr_copy(data->mac[i++], ha->addr);
2794         if (i == QED_TLV_MAC_COUNT)
2795             break;
2796     }
2797 
2798     netif_addr_unlock_bh(edev->ndev);
2799 }
2800 
2801 static void qede_get_eth_tlv_data(void *dev, void *data)
2802 {
2803     struct qed_mfw_tlv_eth *etlv = data;
2804     struct qede_dev *edev = dev;
2805     struct qede_fastpath *fp;
2806     int i;
2807 
2808     etlv->lso_maxoff_size = 0XFFFF;
2809     etlv->lso_maxoff_size_set = true;
2810     etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
2811     etlv->lso_minseg_size_set = true;
2812     etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
2813     etlv->prom_mode_set = true;
2814     etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
2815     etlv->tx_descr_size_set = true;
2816     etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
2817     etlv->rx_descr_size_set = true;
2818     etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
2819     etlv->iov_offload_set = true;
2820 
2821     /* Fill information regarding queues; Should be done under the qede
2822      * lock to guarantee those don't change beneath our feet.
2823      */
2824     etlv->txqs_empty = true;
2825     etlv->rxqs_empty = true;
2826     etlv->num_txqs_full = 0;
2827     etlv->num_rxqs_full = 0;
2828 
2829     __qede_lock(edev);
2830     for_each_queue(i) {
2831         fp = &edev->fp_array[i];
2832         if (fp->type & QEDE_FASTPATH_TX) {
2833             struct qede_tx_queue *txq = QEDE_FP_TC0_TXQ(fp);
2834 
2835             if (txq->sw_tx_cons != txq->sw_tx_prod)
2836                 etlv->txqs_empty = false;
2837             if (qede_is_txq_full(edev, txq))
2838                 etlv->num_txqs_full++;
2839         }
2840         if (fp->type & QEDE_FASTPATH_RX) {
2841             if (qede_has_rx_work(fp->rxq))
2842                 etlv->rxqs_empty = false;
2843 
2844             /* This one is a bit tricky; Firmware might stop
2845              * placing packets if ring is not yet full.
2846              * Give an approximation.
2847              */
2848             if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
2849                 qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
2850                 RX_RING_SIZE - 100)
2851                 etlv->num_rxqs_full++;
2852         }
2853     }
2854     __qede_unlock(edev);
2855 
2856     etlv->txqs_empty_set = true;
2857     etlv->rxqs_empty_set = true;
2858     etlv->num_txqs_full_set = true;
2859     etlv->num_rxqs_full_set = true;
2860 }
2861 
2862 /**
2863  * qede_io_error_detected(): Called when PCI error is detected
2864  *
2865  * @pdev: Pointer to PCI device
2866  * @state: The current pci connection state
2867  *
2868  *Return: pci_ers_result_t.
2869  *
2870  * This function is called after a PCI bus error affecting
2871  * this device has been detected.
2872  */
2873 static pci_ers_result_t
2874 qede_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
2875 {
2876     struct net_device *dev = pci_get_drvdata(pdev);
2877     struct qede_dev *edev = netdev_priv(dev);
2878 
2879     if (!edev)
2880         return PCI_ERS_RESULT_NONE;
2881 
2882     DP_NOTICE(edev, "IO error detected [%d]\n", state);
2883 
2884     __qede_lock(edev);
2885     if (edev->state == QEDE_STATE_RECOVERY) {
2886         DP_NOTICE(edev, "Device already in the recovery state\n");
2887         __qede_unlock(edev);
2888         return PCI_ERS_RESULT_NONE;
2889     }
2890 
2891     /* PF handles the recovery of its VFs */
2892     if (IS_VF(edev)) {
2893         DP_VERBOSE(edev, QED_MSG_IOV,
2894                "VF recovery is handled by its PF\n");
2895         __qede_unlock(edev);
2896         return PCI_ERS_RESULT_RECOVERED;
2897     }
2898 
2899     /* Close OS Tx */
2900     netif_tx_disable(edev->ndev);
2901     netif_carrier_off(edev->ndev);
2902 
2903     set_bit(QEDE_SP_AER, &edev->sp_flags);
2904     schedule_delayed_work(&edev->sp_task, 0);
2905 
2906     __qede_unlock(edev);
2907 
2908     return PCI_ERS_RESULT_CAN_RECOVER;
2909 }