Back to home page

OSCL-LXR

 
 

    


0001 /* Broadcom NetXtreme-C/E network driver.
0002  *
0003  * Copyright (c) 2014-2016 Broadcom Corporation
0004  * Copyright (c) 2016-2019 Broadcom Limited
0005  *
0006  * This program is free software; you can redistribute it and/or modify
0007  * it under the terms of the GNU General Public License as published by
0008  * the Free Software Foundation.
0009  */
0010 
0011 #include <linux/module.h>
0012 
0013 #include <linux/stringify.h>
0014 #include <linux/kernel.h>
0015 #include <linux/timer.h>
0016 #include <linux/errno.h>
0017 #include <linux/ioport.h>
0018 #include <linux/slab.h>
0019 #include <linux/vmalloc.h>
0020 #include <linux/interrupt.h>
0021 #include <linux/pci.h>
0022 #include <linux/netdevice.h>
0023 #include <linux/etherdevice.h>
0024 #include <linux/skbuff.h>
0025 #include <linux/dma-mapping.h>
0026 #include <linux/bitops.h>
0027 #include <linux/io.h>
0028 #include <linux/irq.h>
0029 #include <linux/delay.h>
0030 #include <asm/byteorder.h>
0031 #include <asm/page.h>
0032 #include <linux/time.h>
0033 #include <linux/mii.h>
0034 #include <linux/mdio.h>
0035 #include <linux/if.h>
0036 #include <linux/if_vlan.h>
0037 #include <linux/if_bridge.h>
0038 #include <linux/rtc.h>
0039 #include <linux/bpf.h>
0040 #include <net/gro.h>
0041 #include <net/ip.h>
0042 #include <net/tcp.h>
0043 #include <net/udp.h>
0044 #include <net/checksum.h>
0045 #include <net/ip6_checksum.h>
0046 #include <net/udp_tunnel.h>
0047 #include <linux/workqueue.h>
0048 #include <linux/prefetch.h>
0049 #include <linux/cache.h>
0050 #include <linux/log2.h>
0051 #include <linux/aer.h>
0052 #include <linux/bitmap.h>
0053 #include <linux/cpu_rmap.h>
0054 #include <linux/cpumask.h>
0055 #include <net/pkt_cls.h>
0056 #include <linux/hwmon.h>
0057 #include <linux/hwmon-sysfs.h>
0058 #include <net/page_pool.h>
0059 #include <linux/align.h>
0060 
0061 #include "bnxt_hsi.h"
0062 #include "bnxt.h"
0063 #include "bnxt_hwrm.h"
0064 #include "bnxt_ulp.h"
0065 #include "bnxt_sriov.h"
0066 #include "bnxt_ethtool.h"
0067 #include "bnxt_dcb.h"
0068 #include "bnxt_xdp.h"
0069 #include "bnxt_ptp.h"
0070 #include "bnxt_vfr.h"
0071 #include "bnxt_tc.h"
0072 #include "bnxt_devlink.h"
0073 #include "bnxt_debugfs.h"
0074 
0075 #define BNXT_TX_TIMEOUT     (5 * HZ)
0076 #define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW | \
0077                  NETIF_MSG_TX_ERR)
0078 
0079 MODULE_LICENSE("GPL");
0080 MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
0081 
0082 #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
0083 #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
0084 #define BNXT_RX_COPY_THRESH 256
0085 
0086 #define BNXT_TX_PUSH_THRESH 164
0087 
0088 /* indexed by enum board_idx */
0089 static const struct {
0090     char *name;
0091 } board_info[] = {
0092     [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
0093     [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
0094     [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
0095     [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
0096     [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
0097     [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
0098     [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
0099     [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
0100     [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
0101     [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
0102     [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
0103     [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
0104     [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
0105     [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
0106     [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
0107     [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
0108     [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
0109     [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
0110     [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
0111     [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
0112     [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
0113     [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
0114     [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
0115     [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
0116     [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
0117     [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
0118     [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
0119     [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
0120     [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
0121     [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
0122     [BCM57504] = { "Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
0123     [BCM57502] = { "Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb Ethernet" },
0124     [BCM57508_NPAR] = { "Broadcom BCM57508 NetXtreme-E Ethernet Partition" },
0125     [BCM57504_NPAR] = { "Broadcom BCM57504 NetXtreme-E Ethernet Partition" },
0126     [BCM57502_NPAR] = { "Broadcom BCM57502 NetXtreme-E Ethernet Partition" },
0127     [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
0128     [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
0129     [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
0130     [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
0131     [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
0132     [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
0133     [NETXTREME_C_VF_HV] = { "Broadcom NetXtreme-C Virtual Function for Hyper-V" },
0134     [NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
0135     [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
0136     [NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
0137 };
0138 
0139 static const struct pci_device_id bnxt_pci_tbl[] = {
0140     { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
0141     { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
0142     { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
0143     { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
0144     { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
0145     { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
0146     { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
0147     { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
0148     { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
0149     { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
0150     { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
0151     { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
0152     { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
0153     { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
0154     { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
0155     { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
0156     { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
0157     { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
0158     { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
0159     { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
0160     { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
0161     { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
0162     { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
0163     { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
0164     { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
0165     { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
0166     { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
0167     { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
0168     { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
0169     { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
0170     { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
0171     { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
0172     { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
0173     { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
0174     { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
0175     { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
0176     { PCI_VDEVICE(BROADCOM, 0x1751), .driver_data = BCM57504 },
0177     { PCI_VDEVICE(BROADCOM, 0x1752), .driver_data = BCM57502 },
0178     { PCI_VDEVICE(BROADCOM, 0x1800), .driver_data = BCM57508_NPAR },
0179     { PCI_VDEVICE(BROADCOM, 0x1801), .driver_data = BCM57504_NPAR },
0180     { PCI_VDEVICE(BROADCOM, 0x1802), .driver_data = BCM57502_NPAR },
0181     { PCI_VDEVICE(BROADCOM, 0x1803), .driver_data = BCM57508_NPAR },
0182     { PCI_VDEVICE(BROADCOM, 0x1804), .driver_data = BCM57504_NPAR },
0183     { PCI_VDEVICE(BROADCOM, 0x1805), .driver_data = BCM57502_NPAR },
0184     { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
0185     { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
0186 #ifdef CONFIG_BNXT_SRIOV
0187     { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
0188     { PCI_VDEVICE(BROADCOM, 0x1607), .driver_data = NETXTREME_E_VF_HV },
0189     { PCI_VDEVICE(BROADCOM, 0x1608), .driver_data = NETXTREME_E_VF_HV },
0190     { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
0191     { PCI_VDEVICE(BROADCOM, 0x16bd), .driver_data = NETXTREME_E_VF_HV },
0192     { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
0193     { PCI_VDEVICE(BROADCOM, 0x16c2), .driver_data = NETXTREME_C_VF_HV },
0194     { PCI_VDEVICE(BROADCOM, 0x16c3), .driver_data = NETXTREME_C_VF_HV },
0195     { PCI_VDEVICE(BROADCOM, 0x16c4), .driver_data = NETXTREME_E_VF_HV },
0196     { PCI_VDEVICE(BROADCOM, 0x16c5), .driver_data = NETXTREME_E_VF_HV },
0197     { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
0198     { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
0199     { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
0200     { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
0201     { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
0202     { PCI_VDEVICE(BROADCOM, 0x16e6), .driver_data = NETXTREME_C_VF_HV },
0203     { PCI_VDEVICE(BROADCOM, 0x1806), .driver_data = NETXTREME_E_P5_VF },
0204     { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
0205     { PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
0206     { PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
0207     { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
0208 #endif
0209     { 0 }
0210 };
0211 
0212 MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
0213 
0214 static const u16 bnxt_vf_req_snif[] = {
0215     HWRM_FUNC_CFG,
0216     HWRM_FUNC_VF_CFG,
0217     HWRM_PORT_PHY_QCFG,
0218     HWRM_CFA_L2_FILTER_ALLOC,
0219 };
0220 
0221 static const u16 bnxt_async_events_arr[] = {
0222     ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
0223     ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
0224     ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
0225     ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
0226     ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
0227     ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
0228     ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
0229     ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
0230     ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
0231     ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
0232     ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
0233     ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
0234     ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
0235     ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
0236     ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
0237     ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
0238 };
0239 
0240 static struct workqueue_struct *bnxt_pf_wq;
0241 
0242 static bool bnxt_vf_pciid(enum board_idx idx)
0243 {
0244     return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
0245         idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
0246         idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
0247         idx == NETXTREME_E_P5_VF_HV);
0248 }
0249 
0250 #define DB_CP_REARM_FLAGS   (DB_KEY_CP | DB_IDX_VALID)
0251 #define DB_CP_FLAGS     (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
0252 #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
0253 
0254 #define BNXT_CP_DB_IRQ_DIS(db)                      \
0255         writel(DB_CP_IRQ_DIS_FLAGS, db)
0256 
0257 #define BNXT_DB_CQ(db, idx)                     \
0258     writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
0259 
0260 #define BNXT_DB_NQ_P5(db, idx)                      \
0261     bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx),   \
0262             (db)->doorbell)
0263 
0264 #define BNXT_DB_CQ_ARM(db, idx)                     \
0265     writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
0266 
0267 #define BNXT_DB_NQ_ARM_P5(db, idx)                  \
0268     bnxt_writeq(bp, (db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx),\
0269             (db)->doorbell)
0270 
0271 static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
0272 {
0273     if (bp->flags & BNXT_FLAG_CHIP_P5)
0274         BNXT_DB_NQ_P5(db, idx);
0275     else
0276         BNXT_DB_CQ(db, idx);
0277 }
0278 
0279 static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
0280 {
0281     if (bp->flags & BNXT_FLAG_CHIP_P5)
0282         BNXT_DB_NQ_ARM_P5(db, idx);
0283     else
0284         BNXT_DB_CQ_ARM(db, idx);
0285 }
0286 
0287 static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
0288 {
0289     if (bp->flags & BNXT_FLAG_CHIP_P5)
0290         bnxt_writeq(bp, db->db_key64 | DBR_TYPE_CQ_ARMALL |
0291                 RING_CMP(idx), db->doorbell);
0292     else
0293         BNXT_DB_CQ(db, idx);
0294 }
0295 
0296 const u16 bnxt_lhint_arr[] = {
0297     TX_BD_FLAGS_LHINT_512_AND_SMALLER,
0298     TX_BD_FLAGS_LHINT_512_TO_1023,
0299     TX_BD_FLAGS_LHINT_1024_TO_2047,
0300     TX_BD_FLAGS_LHINT_1024_TO_2047,
0301     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0302     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0303     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0304     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0305     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0306     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0307     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0308     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0309     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0310     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0311     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0312     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0313     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0314     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0315     TX_BD_FLAGS_LHINT_2048_AND_LARGER,
0316 };
0317 
0318 static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
0319 {
0320     struct metadata_dst *md_dst = skb_metadata_dst(skb);
0321 
0322     if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
0323         return 0;
0324 
0325     return md_dst->u.port_info.port_id;
0326 }
0327 
0328 static void bnxt_txr_db_kick(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
0329                  u16 prod)
0330 {
0331     bnxt_db_write(bp, &txr->tx_db, prod);
0332     txr->kick_pending = 0;
0333 }
0334 
0335 static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
0336                       struct bnxt_tx_ring_info *txr,
0337                       struct netdev_queue *txq)
0338 {
0339     netif_tx_stop_queue(txq);
0340 
0341     /* netif_tx_stop_queue() must be done before checking
0342      * tx index in bnxt_tx_avail() below, because in
0343      * bnxt_tx_int(), we update tx index before checking for
0344      * netif_tx_queue_stopped().
0345      */
0346     smp_mb();
0347     if (bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh) {
0348         netif_tx_wake_queue(txq);
0349         return false;
0350     }
0351 
0352     return true;
0353 }
0354 
0355 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
0356 {
0357     struct bnxt *bp = netdev_priv(dev);
0358     struct tx_bd *txbd;
0359     struct tx_bd_ext *txbd1;
0360     struct netdev_queue *txq;
0361     int i;
0362     dma_addr_t mapping;
0363     unsigned int length, pad = 0;
0364     u32 len, free_size, vlan_tag_flags, cfa_action, flags;
0365     u16 prod, last_frag;
0366     struct pci_dev *pdev = bp->pdev;
0367     struct bnxt_tx_ring_info *txr;
0368     struct bnxt_sw_tx_bd *tx_buf;
0369     __le32 lflags = 0;
0370 
0371     i = skb_get_queue_mapping(skb);
0372     if (unlikely(i >= bp->tx_nr_rings)) {
0373         dev_kfree_skb_any(skb);
0374         dev_core_stats_tx_dropped_inc(dev);
0375         return NETDEV_TX_OK;
0376     }
0377 
0378     txq = netdev_get_tx_queue(dev, i);
0379     txr = &bp->tx_ring[bp->tx_ring_map[i]];
0380     prod = txr->tx_prod;
0381 
0382     free_size = bnxt_tx_avail(bp, txr);
0383     if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
0384         /* We must have raced with NAPI cleanup */
0385         if (net_ratelimit() && txr->kick_pending)
0386             netif_warn(bp, tx_err, dev,
0387                    "bnxt: ring busy w/ flush pending!\n");
0388         if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
0389             return NETDEV_TX_BUSY;
0390     }
0391 
0392     length = skb->len;
0393     len = skb_headlen(skb);
0394     last_frag = skb_shinfo(skb)->nr_frags;
0395 
0396     txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
0397 
0398     txbd->tx_bd_opaque = prod;
0399 
0400     tx_buf = &txr->tx_buf_ring[prod];
0401     tx_buf->skb = skb;
0402     tx_buf->nr_frags = last_frag;
0403 
0404     vlan_tag_flags = 0;
0405     cfa_action = bnxt_xmit_get_cfa_action(skb);
0406     if (skb_vlan_tag_present(skb)) {
0407         vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
0408                  skb_vlan_tag_get(skb);
0409         /* Currently supports 8021Q, 8021AD vlan offloads
0410          * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
0411          */
0412         if (skb->vlan_proto == htons(ETH_P_8021Q))
0413             vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
0414     }
0415 
0416     if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
0417         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
0418 
0419         if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
0420             atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
0421             if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
0422                         &ptp->tx_hdr_off)) {
0423                 if (vlan_tag_flags)
0424                     ptp->tx_hdr_off += VLAN_HLEN;
0425                 lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
0426                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
0427             } else {
0428                 atomic_inc(&bp->ptp_cfg->tx_avail);
0429             }
0430         }
0431     }
0432 
0433     if (unlikely(skb->no_fcs))
0434         lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
0435 
0436     if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh &&
0437         !lflags) {
0438         struct tx_push_buffer *tx_push_buf = txr->tx_push;
0439         struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
0440         struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
0441         void __iomem *db = txr->tx_db.doorbell;
0442         void *pdata = tx_push_buf->data;
0443         u64 *end;
0444         int j, push_len;
0445 
0446         /* Set COAL_NOW to be ready quickly for the next push */
0447         tx_push->tx_bd_len_flags_type =
0448             cpu_to_le32((length << TX_BD_LEN_SHIFT) |
0449                     TX_BD_TYPE_LONG_TX_BD |
0450                     TX_BD_FLAGS_LHINT_512_AND_SMALLER |
0451                     TX_BD_FLAGS_COAL_NOW |
0452                     TX_BD_FLAGS_PACKET_END |
0453                     (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
0454 
0455         if (skb->ip_summed == CHECKSUM_PARTIAL)
0456             tx_push1->tx_bd_hsize_lflags =
0457                     cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
0458         else
0459             tx_push1->tx_bd_hsize_lflags = 0;
0460 
0461         tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
0462         tx_push1->tx_bd_cfa_action =
0463             cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
0464 
0465         end = pdata + length;
0466         end = PTR_ALIGN(end, 8) - 1;
0467         *end = 0;
0468 
0469         skb_copy_from_linear_data(skb, pdata, len);
0470         pdata += len;
0471         for (j = 0; j < last_frag; j++) {
0472             skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
0473             void *fptr;
0474 
0475             fptr = skb_frag_address_safe(frag);
0476             if (!fptr)
0477                 goto normal_tx;
0478 
0479             memcpy(pdata, fptr, skb_frag_size(frag));
0480             pdata += skb_frag_size(frag);
0481         }
0482 
0483         txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
0484         txbd->tx_bd_haddr = txr->data_mapping;
0485         prod = NEXT_TX(prod);
0486         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
0487         memcpy(txbd, tx_push1, sizeof(*txbd));
0488         prod = NEXT_TX(prod);
0489         tx_push->doorbell =
0490             cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
0491         txr->tx_prod = prod;
0492 
0493         tx_buf->is_push = 1;
0494         netdev_tx_sent_queue(txq, skb->len);
0495         wmb();  /* Sync is_push and byte queue before pushing data */
0496 
0497         push_len = (length + sizeof(*tx_push) + 7) / 8;
0498         if (push_len > 16) {
0499             __iowrite64_copy(db, tx_push_buf, 16);
0500             __iowrite32_copy(db + 4, tx_push_buf + 1,
0501                      (push_len - 16) << 1);
0502         } else {
0503             __iowrite64_copy(db, tx_push_buf, push_len);
0504         }
0505 
0506         goto tx_done;
0507     }
0508 
0509 normal_tx:
0510     if (length < BNXT_MIN_PKT_SIZE) {
0511         pad = BNXT_MIN_PKT_SIZE - length;
0512         if (skb_pad(skb, pad))
0513             /* SKB already freed. */
0514             goto tx_kick_pending;
0515         length = BNXT_MIN_PKT_SIZE;
0516     }
0517 
0518     mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
0519 
0520     if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
0521         goto tx_free;
0522 
0523     dma_unmap_addr_set(tx_buf, mapping, mapping);
0524     flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
0525         ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
0526 
0527     txbd->tx_bd_haddr = cpu_to_le64(mapping);
0528 
0529     prod = NEXT_TX(prod);
0530     txbd1 = (struct tx_bd_ext *)
0531         &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
0532 
0533     txbd1->tx_bd_hsize_lflags = lflags;
0534     if (skb_is_gso(skb)) {
0535         u32 hdr_len;
0536 
0537         if (skb->encapsulation)
0538             hdr_len = skb_inner_tcp_all_headers(skb);
0539         else
0540             hdr_len = skb_tcp_all_headers(skb);
0541 
0542         txbd1->tx_bd_hsize_lflags |= cpu_to_le32(TX_BD_FLAGS_LSO |
0543                     TX_BD_FLAGS_T_IPID |
0544                     (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
0545         length = skb_shinfo(skb)->gso_size;
0546         txbd1->tx_bd_mss = cpu_to_le32(length);
0547         length += hdr_len;
0548     } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
0549         txbd1->tx_bd_hsize_lflags |=
0550             cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
0551         txbd1->tx_bd_mss = 0;
0552     }
0553 
0554     length >>= 9;
0555     if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
0556         dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
0557                      skb->len);
0558         i = 0;
0559         goto tx_dma_error;
0560     }
0561     flags |= bnxt_lhint_arr[length];
0562     txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
0563 
0564     txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
0565     txbd1->tx_bd_cfa_action =
0566             cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
0567     for (i = 0; i < last_frag; i++) {
0568         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
0569 
0570         prod = NEXT_TX(prod);
0571         txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
0572 
0573         len = skb_frag_size(frag);
0574         mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
0575                        DMA_TO_DEVICE);
0576 
0577         if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
0578             goto tx_dma_error;
0579 
0580         tx_buf = &txr->tx_buf_ring[prod];
0581         dma_unmap_addr_set(tx_buf, mapping, mapping);
0582 
0583         txbd->tx_bd_haddr = cpu_to_le64(mapping);
0584 
0585         flags = len << TX_BD_LEN_SHIFT;
0586         txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
0587     }
0588 
0589     flags &= ~TX_BD_LEN;
0590     txbd->tx_bd_len_flags_type =
0591         cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
0592                 TX_BD_FLAGS_PACKET_END);
0593 
0594     netdev_tx_sent_queue(txq, skb->len);
0595 
0596     skb_tx_timestamp(skb);
0597 
0598     /* Sync BD data before updating doorbell */
0599     wmb();
0600 
0601     prod = NEXT_TX(prod);
0602     txr->tx_prod = prod;
0603 
0604     if (!netdev_xmit_more() || netif_xmit_stopped(txq))
0605         bnxt_txr_db_kick(bp, txr, prod);
0606     else
0607         txr->kick_pending = 1;
0608 
0609 tx_done:
0610 
0611     if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
0612         if (netdev_xmit_more() && !tx_buf->is_push)
0613             bnxt_txr_db_kick(bp, txr, prod);
0614 
0615         bnxt_txr_netif_try_stop_queue(bp, txr, txq);
0616     }
0617     return NETDEV_TX_OK;
0618 
0619 tx_dma_error:
0620     if (BNXT_TX_PTP_IS_SET(lflags))
0621         atomic_inc(&bp->ptp_cfg->tx_avail);
0622 
0623     last_frag = i;
0624 
0625     /* start back at beginning and unmap skb */
0626     prod = txr->tx_prod;
0627     tx_buf = &txr->tx_buf_ring[prod];
0628     dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
0629              skb_headlen(skb), DMA_TO_DEVICE);
0630     prod = NEXT_TX(prod);
0631 
0632     /* unmap remaining mapped pages */
0633     for (i = 0; i < last_frag; i++) {
0634         prod = NEXT_TX(prod);
0635         tx_buf = &txr->tx_buf_ring[prod];
0636         dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
0637                    skb_frag_size(&skb_shinfo(skb)->frags[i]),
0638                    DMA_TO_DEVICE);
0639     }
0640 
0641 tx_free:
0642     dev_kfree_skb_any(skb);
0643 tx_kick_pending:
0644     if (txr->kick_pending)
0645         bnxt_txr_db_kick(bp, txr, txr->tx_prod);
0646     txr->tx_buf_ring[txr->tx_prod].skb = NULL;
0647     dev_core_stats_tx_dropped_inc(dev);
0648     return NETDEV_TX_OK;
0649 }
0650 
0651 static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
0652 {
0653     struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
0654     struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
0655     u16 cons = txr->tx_cons;
0656     struct pci_dev *pdev = bp->pdev;
0657     int i;
0658     unsigned int tx_bytes = 0;
0659 
0660     for (i = 0; i < nr_pkts; i++) {
0661         struct bnxt_sw_tx_bd *tx_buf;
0662         struct sk_buff *skb;
0663         int j, last;
0664 
0665         tx_buf = &txr->tx_buf_ring[cons];
0666         cons = NEXT_TX(cons);
0667         skb = tx_buf->skb;
0668         tx_buf->skb = NULL;
0669 
0670         tx_bytes += skb->len;
0671 
0672         if (tx_buf->is_push) {
0673             tx_buf->is_push = 0;
0674             goto next_tx_int;
0675         }
0676 
0677         dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
0678                  skb_headlen(skb), DMA_TO_DEVICE);
0679         last = tx_buf->nr_frags;
0680 
0681         for (j = 0; j < last; j++) {
0682             cons = NEXT_TX(cons);
0683             tx_buf = &txr->tx_buf_ring[cons];
0684             dma_unmap_page(
0685                 &pdev->dev,
0686                 dma_unmap_addr(tx_buf, mapping),
0687                 skb_frag_size(&skb_shinfo(skb)->frags[j]),
0688                 DMA_TO_DEVICE);
0689         }
0690         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
0691             if (bp->flags & BNXT_FLAG_CHIP_P5) {
0692                 /* PTP worker takes ownership of the skb */
0693                 if (!bnxt_get_tx_ts_p5(bp, skb))
0694                     skb = NULL;
0695                 else
0696                     atomic_inc(&bp->ptp_cfg->tx_avail);
0697             }
0698         }
0699 
0700 next_tx_int:
0701         cons = NEXT_TX(cons);
0702 
0703         dev_kfree_skb_any(skb);
0704     }
0705 
0706     netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
0707     txr->tx_cons = cons;
0708 
0709     /* Need to make the tx_cons update visible to bnxt_start_xmit()
0710      * before checking for netif_tx_queue_stopped().  Without the
0711      * memory barrier, there is a small possibility that bnxt_start_xmit()
0712      * will miss it and cause the queue to be stopped forever.
0713      */
0714     smp_mb();
0715 
0716     if (unlikely(netif_tx_queue_stopped(txq)) &&
0717         bnxt_tx_avail(bp, txr) >= bp->tx_wake_thresh &&
0718         READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
0719         netif_tx_wake_queue(txq);
0720 }
0721 
0722 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
0723                      struct bnxt_rx_ring_info *rxr,
0724                      gfp_t gfp)
0725 {
0726     struct device *dev = &bp->pdev->dev;
0727     struct page *page;
0728 
0729     page = page_pool_dev_alloc_pages(rxr->page_pool);
0730     if (!page)
0731         return NULL;
0732 
0733     *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
0734                       DMA_ATTR_WEAK_ORDERING);
0735     if (dma_mapping_error(dev, *mapping)) {
0736         page_pool_recycle_direct(rxr->page_pool, page);
0737         return NULL;
0738     }
0739     return page;
0740 }
0741 
0742 static inline u8 *__bnxt_alloc_rx_frag(struct bnxt *bp, dma_addr_t *mapping,
0743                        gfp_t gfp)
0744 {
0745     u8 *data;
0746     struct pci_dev *pdev = bp->pdev;
0747 
0748     if (gfp == GFP_ATOMIC)
0749         data = napi_alloc_frag(bp->rx_buf_size);
0750     else
0751         data = netdev_alloc_frag(bp->rx_buf_size);
0752     if (!data)
0753         return NULL;
0754 
0755     *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
0756                     bp->rx_buf_use_size, bp->rx_dir,
0757                     DMA_ATTR_WEAK_ORDERING);
0758 
0759     if (dma_mapping_error(&pdev->dev, *mapping)) {
0760         skb_free_frag(data);
0761         data = NULL;
0762     }
0763     return data;
0764 }
0765 
0766 int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
0767                u16 prod, gfp_t gfp)
0768 {
0769     struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
0770     struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
0771     dma_addr_t mapping;
0772 
0773     if (BNXT_RX_PAGE_MODE(bp)) {
0774         struct page *page =
0775             __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
0776 
0777         if (!page)
0778             return -ENOMEM;
0779 
0780         mapping += bp->rx_dma_offset;
0781         rx_buf->data = page;
0782         rx_buf->data_ptr = page_address(page) + bp->rx_offset;
0783     } else {
0784         u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
0785 
0786         if (!data)
0787             return -ENOMEM;
0788 
0789         rx_buf->data = data;
0790         rx_buf->data_ptr = data + bp->rx_offset;
0791     }
0792     rx_buf->mapping = mapping;
0793 
0794     rxbd->rx_bd_haddr = cpu_to_le64(mapping);
0795     return 0;
0796 }
0797 
0798 void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
0799 {
0800     u16 prod = rxr->rx_prod;
0801     struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
0802     struct rx_bd *cons_bd, *prod_bd;
0803 
0804     prod_rx_buf = &rxr->rx_buf_ring[prod];
0805     cons_rx_buf = &rxr->rx_buf_ring[cons];
0806 
0807     prod_rx_buf->data = data;
0808     prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
0809 
0810     prod_rx_buf->mapping = cons_rx_buf->mapping;
0811 
0812     prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
0813     cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
0814 
0815     prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
0816 }
0817 
0818 static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
0819 {
0820     u16 next, max = rxr->rx_agg_bmap_size;
0821 
0822     next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
0823     if (next >= max)
0824         next = find_first_zero_bit(rxr->rx_agg_bmap, max);
0825     return next;
0826 }
0827 
0828 static inline int bnxt_alloc_rx_page(struct bnxt *bp,
0829                      struct bnxt_rx_ring_info *rxr,
0830                      u16 prod, gfp_t gfp)
0831 {
0832     struct rx_bd *rxbd =
0833         &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
0834     struct bnxt_sw_rx_agg_bd *rx_agg_buf;
0835     struct pci_dev *pdev = bp->pdev;
0836     struct page *page;
0837     dma_addr_t mapping;
0838     u16 sw_prod = rxr->rx_sw_agg_prod;
0839     unsigned int offset = 0;
0840 
0841     if (BNXT_RX_PAGE_MODE(bp)) {
0842         page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
0843 
0844         if (!page)
0845             return -ENOMEM;
0846 
0847     } else {
0848         if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
0849             page = rxr->rx_page;
0850             if (!page) {
0851                 page = alloc_page(gfp);
0852                 if (!page)
0853                     return -ENOMEM;
0854                 rxr->rx_page = page;
0855                 rxr->rx_page_offset = 0;
0856             }
0857             offset = rxr->rx_page_offset;
0858             rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
0859             if (rxr->rx_page_offset == PAGE_SIZE)
0860                 rxr->rx_page = NULL;
0861             else
0862                 get_page(page);
0863         } else {
0864             page = alloc_page(gfp);
0865             if (!page)
0866                 return -ENOMEM;
0867         }
0868 
0869         mapping = dma_map_page_attrs(&pdev->dev, page, offset,
0870                          BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
0871                          DMA_ATTR_WEAK_ORDERING);
0872         if (dma_mapping_error(&pdev->dev, mapping)) {
0873             __free_page(page);
0874             return -EIO;
0875         }
0876     }
0877 
0878     if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
0879         sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
0880 
0881     __set_bit(sw_prod, rxr->rx_agg_bmap);
0882     rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
0883     rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
0884 
0885     rx_agg_buf->page = page;
0886     rx_agg_buf->offset = offset;
0887     rx_agg_buf->mapping = mapping;
0888     rxbd->rx_bd_haddr = cpu_to_le64(mapping);
0889     rxbd->rx_bd_opaque = sw_prod;
0890     return 0;
0891 }
0892 
0893 static struct rx_agg_cmp *bnxt_get_agg(struct bnxt *bp,
0894                        struct bnxt_cp_ring_info *cpr,
0895                        u16 cp_cons, u16 curr)
0896 {
0897     struct rx_agg_cmp *agg;
0898 
0899     cp_cons = RING_CMP(ADV_RAW_CMP(cp_cons, curr));
0900     agg = (struct rx_agg_cmp *)
0901         &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
0902     return agg;
0903 }
0904 
0905 static struct rx_agg_cmp *bnxt_get_tpa_agg_p5(struct bnxt *bp,
0906                           struct bnxt_rx_ring_info *rxr,
0907                           u16 agg_id, u16 curr)
0908 {
0909     struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
0910 
0911     return &tpa_info->agg_arr[curr];
0912 }
0913 
0914 static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 idx,
0915                    u16 start, u32 agg_bufs, bool tpa)
0916 {
0917     struct bnxt_napi *bnapi = cpr->bnapi;
0918     struct bnxt *bp = bnapi->bp;
0919     struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
0920     u16 prod = rxr->rx_agg_prod;
0921     u16 sw_prod = rxr->rx_sw_agg_prod;
0922     bool p5_tpa = false;
0923     u32 i;
0924 
0925     if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
0926         p5_tpa = true;
0927 
0928     for (i = 0; i < agg_bufs; i++) {
0929         u16 cons;
0930         struct rx_agg_cmp *agg;
0931         struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
0932         struct rx_bd *prod_bd;
0933         struct page *page;
0934 
0935         if (p5_tpa)
0936             agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, start + i);
0937         else
0938             agg = bnxt_get_agg(bp, cpr, idx, start + i);
0939         cons = agg->rx_agg_cmp_opaque;
0940         __clear_bit(cons, rxr->rx_agg_bmap);
0941 
0942         if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
0943             sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
0944 
0945         __set_bit(sw_prod, rxr->rx_agg_bmap);
0946         prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
0947         cons_rx_buf = &rxr->rx_agg_ring[cons];
0948 
0949         /* It is possible for sw_prod to be equal to cons, so
0950          * set cons_rx_buf->page to NULL first.
0951          */
0952         page = cons_rx_buf->page;
0953         cons_rx_buf->page = NULL;
0954         prod_rx_buf->page = page;
0955         prod_rx_buf->offset = cons_rx_buf->offset;
0956 
0957         prod_rx_buf->mapping = cons_rx_buf->mapping;
0958 
0959         prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
0960 
0961         prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
0962         prod_bd->rx_bd_opaque = sw_prod;
0963 
0964         prod = NEXT_RX_AGG(prod);
0965         sw_prod = NEXT_RX_AGG(sw_prod);
0966     }
0967     rxr->rx_agg_prod = prod;
0968     rxr->rx_sw_agg_prod = sw_prod;
0969 }
0970 
0971 static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
0972                           struct bnxt_rx_ring_info *rxr,
0973                           u16 cons, void *data, u8 *data_ptr,
0974                           dma_addr_t dma_addr,
0975                           unsigned int offset_and_len)
0976 {
0977     unsigned int len = offset_and_len & 0xffff;
0978     struct page *page = data;
0979     u16 prod = rxr->rx_prod;
0980     struct sk_buff *skb;
0981     int err;
0982 
0983     err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
0984     if (unlikely(err)) {
0985         bnxt_reuse_rx_data(rxr, cons, data);
0986         return NULL;
0987     }
0988     dma_addr -= bp->rx_dma_offset;
0989     dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
0990                  DMA_ATTR_WEAK_ORDERING);
0991     skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
0992                         bp->rx_dma_offset);
0993     if (!skb) {
0994         __free_page(page);
0995         return NULL;
0996     }
0997     skb_mark_for_recycle(skb);
0998     skb_reserve(skb, bp->rx_dma_offset);
0999     __skb_put(skb, len);
1000 
1001     return skb;
1002 }
1003 
1004 static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
1005                     struct bnxt_rx_ring_info *rxr,
1006                     u16 cons, void *data, u8 *data_ptr,
1007                     dma_addr_t dma_addr,
1008                     unsigned int offset_and_len)
1009 {
1010     unsigned int payload = offset_and_len >> 16;
1011     unsigned int len = offset_and_len & 0xffff;
1012     skb_frag_t *frag;
1013     struct page *page = data;
1014     u16 prod = rxr->rx_prod;
1015     struct sk_buff *skb;
1016     int off, err;
1017 
1018     err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1019     if (unlikely(err)) {
1020         bnxt_reuse_rx_data(rxr, cons, data);
1021         return NULL;
1022     }
1023     dma_addr -= bp->rx_dma_offset;
1024     dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
1025                  DMA_ATTR_WEAK_ORDERING);
1026 
1027     if (unlikely(!payload))
1028         payload = eth_get_headlen(bp->dev, data_ptr, len);
1029 
1030     skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
1031     if (!skb) {
1032         __free_page(page);
1033         return NULL;
1034     }
1035 
1036     skb_mark_for_recycle(skb);
1037     off = (void *)data_ptr - page_address(page);
1038     skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
1039     memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
1040            payload + NET_IP_ALIGN);
1041 
1042     frag = &skb_shinfo(skb)->frags[0];
1043     skb_frag_size_sub(frag, payload);
1044     skb_frag_off_add(frag, payload);
1045     skb->data_len -= payload;
1046     skb->tail += payload;
1047 
1048     return skb;
1049 }
1050 
1051 static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
1052                    struct bnxt_rx_ring_info *rxr, u16 cons,
1053                    void *data, u8 *data_ptr,
1054                    dma_addr_t dma_addr,
1055                    unsigned int offset_and_len)
1056 {
1057     u16 prod = rxr->rx_prod;
1058     struct sk_buff *skb;
1059     int err;
1060 
1061     err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
1062     if (unlikely(err)) {
1063         bnxt_reuse_rx_data(rxr, cons, data);
1064         return NULL;
1065     }
1066 
1067     skb = build_skb(data, bp->rx_buf_size);
1068     dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
1069                    bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
1070     if (!skb) {
1071         skb_free_frag(data);
1072         return NULL;
1073     }
1074 
1075     skb_reserve(skb, bp->rx_offset);
1076     skb_put(skb, offset_and_len & 0xffff);
1077     return skb;
1078 }
1079 
1080 static u32 __bnxt_rx_agg_pages(struct bnxt *bp,
1081                    struct bnxt_cp_ring_info *cpr,
1082                    struct skb_shared_info *shinfo,
1083                    u16 idx, u32 agg_bufs, bool tpa,
1084                    struct xdp_buff *xdp)
1085 {
1086     struct bnxt_napi *bnapi = cpr->bnapi;
1087     struct pci_dev *pdev = bp->pdev;
1088     struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1089     u16 prod = rxr->rx_agg_prod;
1090     u32 i, total_frag_len = 0;
1091     bool p5_tpa = false;
1092 
1093     if ((bp->flags & BNXT_FLAG_CHIP_P5) && tpa)
1094         p5_tpa = true;
1095 
1096     for (i = 0; i < agg_bufs; i++) {
1097         skb_frag_t *frag = &shinfo->frags[i];
1098         u16 cons, frag_len;
1099         struct rx_agg_cmp *agg;
1100         struct bnxt_sw_rx_agg_bd *cons_rx_buf;
1101         struct page *page;
1102         dma_addr_t mapping;
1103 
1104         if (p5_tpa)
1105             agg = bnxt_get_tpa_agg_p5(bp, rxr, idx, i);
1106         else
1107             agg = bnxt_get_agg(bp, cpr, idx, i);
1108         cons = agg->rx_agg_cmp_opaque;
1109         frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
1110                 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
1111 
1112         cons_rx_buf = &rxr->rx_agg_ring[cons];
1113         skb_frag_off_set(frag, cons_rx_buf->offset);
1114         skb_frag_size_set(frag, frag_len);
1115         __skb_frag_set_page(frag, cons_rx_buf->page);
1116         shinfo->nr_frags = i + 1;
1117         __clear_bit(cons, rxr->rx_agg_bmap);
1118 
1119         /* It is possible for bnxt_alloc_rx_page() to allocate
1120          * a sw_prod index that equals the cons index, so we
1121          * need to clear the cons entry now.
1122          */
1123         mapping = cons_rx_buf->mapping;
1124         page = cons_rx_buf->page;
1125         cons_rx_buf->page = NULL;
1126 
1127         if (xdp && page_is_pfmemalloc(page))
1128             xdp_buff_set_frag_pfmemalloc(xdp);
1129 
1130         if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
1131             unsigned int nr_frags;
1132 
1133             nr_frags = --shinfo->nr_frags;
1134             __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
1135             cons_rx_buf->page = page;
1136 
1137             /* Update prod since possibly some pages have been
1138              * allocated already.
1139              */
1140             rxr->rx_agg_prod = prod;
1141             bnxt_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
1142             return 0;
1143         }
1144 
1145         dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
1146                      bp->rx_dir,
1147                      DMA_ATTR_WEAK_ORDERING);
1148 
1149         total_frag_len += frag_len;
1150         prod = NEXT_RX_AGG(prod);
1151     }
1152     rxr->rx_agg_prod = prod;
1153     return total_frag_len;
1154 }
1155 
1156 static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
1157                          struct bnxt_cp_ring_info *cpr,
1158                          struct sk_buff *skb, u16 idx,
1159                          u32 agg_bufs, bool tpa)
1160 {
1161     struct skb_shared_info *shinfo = skb_shinfo(skb);
1162     u32 total_frag_len = 0;
1163 
1164     total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo, idx,
1165                          agg_bufs, tpa, NULL);
1166     if (!total_frag_len) {
1167         dev_kfree_skb(skb);
1168         return NULL;
1169     }
1170 
1171     skb->data_len += total_frag_len;
1172     skb->len += total_frag_len;
1173     skb->truesize += PAGE_SIZE * agg_bufs;
1174     return skb;
1175 }
1176 
1177 static u32 bnxt_rx_agg_pages_xdp(struct bnxt *bp,
1178                  struct bnxt_cp_ring_info *cpr,
1179                  struct xdp_buff *xdp, u16 idx,
1180                  u32 agg_bufs, bool tpa)
1181 {
1182     struct skb_shared_info *shinfo = xdp_get_shared_info_from_buff(xdp);
1183     u32 total_frag_len = 0;
1184 
1185     if (!xdp_buff_has_frags(xdp))
1186         shinfo->nr_frags = 0;
1187 
1188     total_frag_len = __bnxt_rx_agg_pages(bp, cpr, shinfo,
1189                          idx, agg_bufs, tpa, xdp);
1190     if (total_frag_len) {
1191         xdp_buff_set_frags_flag(xdp);
1192         shinfo->nr_frags = agg_bufs;
1193         shinfo->xdp_frags_size = total_frag_len;
1194     }
1195     return total_frag_len;
1196 }
1197 
1198 static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1199                    u8 agg_bufs, u32 *raw_cons)
1200 {
1201     u16 last;
1202     struct rx_agg_cmp *agg;
1203 
1204     *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1205     last = RING_CMP(*raw_cons);
1206     agg = (struct rx_agg_cmp *)
1207         &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1208     return RX_AGG_CMP_VALID(agg, *raw_cons);
1209 }
1210 
1211 static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1212                         unsigned int len,
1213                         dma_addr_t mapping)
1214 {
1215     struct bnxt *bp = bnapi->bp;
1216     struct pci_dev *pdev = bp->pdev;
1217     struct sk_buff *skb;
1218 
1219     skb = napi_alloc_skb(&bnapi->napi, len);
1220     if (!skb)
1221         return NULL;
1222 
1223     dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1224                 bp->rx_dir);
1225 
1226     memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1227            len + NET_IP_ALIGN);
1228 
1229     dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1230                    bp->rx_dir);
1231 
1232     skb_put(skb, len);
1233     return skb;
1234 }
1235 
1236 static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1237                u32 *raw_cons, void *cmp)
1238 {
1239     struct rx_cmp *rxcmp = cmp;
1240     u32 tmp_raw_cons = *raw_cons;
1241     u8 cmp_type, agg_bufs = 0;
1242 
1243     cmp_type = RX_CMP_TYPE(rxcmp);
1244 
1245     if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1246         agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1247                 RX_CMP_AGG_BUFS) >>
1248                RX_CMP_AGG_BUFS_SHIFT;
1249     } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1250         struct rx_tpa_end_cmp *tpa_end = cmp;
1251 
1252         if (bp->flags & BNXT_FLAG_CHIP_P5)
1253             return 0;
1254 
1255         agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1256     }
1257 
1258     if (agg_bufs) {
1259         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1260             return -EBUSY;
1261     }
1262     *raw_cons = tmp_raw_cons;
1263     return 0;
1264 }
1265 
1266 static void bnxt_queue_fw_reset_work(struct bnxt *bp, unsigned long delay)
1267 {
1268     if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
1269         return;
1270 
1271     if (BNXT_PF(bp))
1272         queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
1273     else
1274         schedule_delayed_work(&bp->fw_reset_task, delay);
1275 }
1276 
1277 static void bnxt_queue_sp_work(struct bnxt *bp)
1278 {
1279     if (BNXT_PF(bp))
1280         queue_work(bnxt_pf_wq, &bp->sp_task);
1281     else
1282         schedule_work(&bp->sp_task);
1283 }
1284 
1285 static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1286 {
1287     if (!rxr->bnapi->in_reset) {
1288         rxr->bnapi->in_reset = true;
1289         if (bp->flags & BNXT_FLAG_CHIP_P5)
1290             set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
1291         else
1292             set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
1293         bnxt_queue_sp_work(bp);
1294     }
1295     rxr->rx_next_cons = 0xffff;
1296 }
1297 
1298 static u16 bnxt_alloc_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1299 {
1300     struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1301     u16 idx = agg_id & MAX_TPA_P5_MASK;
1302 
1303     if (test_bit(idx, map->agg_idx_bmap))
1304         idx = find_first_zero_bit(map->agg_idx_bmap,
1305                       BNXT_AGG_IDX_BMAP_SIZE);
1306     __set_bit(idx, map->agg_idx_bmap);
1307     map->agg_id_tbl[agg_id] = idx;
1308     return idx;
1309 }
1310 
1311 static void bnxt_free_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
1312 {
1313     struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1314 
1315     __clear_bit(idx, map->agg_idx_bmap);
1316 }
1317 
1318 static u16 bnxt_lookup_agg_idx(struct bnxt_rx_ring_info *rxr, u16 agg_id)
1319 {
1320     struct bnxt_tpa_idx_map *map = rxr->rx_tpa_idx_map;
1321 
1322     return map->agg_id_tbl[agg_id];
1323 }
1324 
1325 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1326                struct rx_tpa_start_cmp *tpa_start,
1327                struct rx_tpa_start_cmp_ext *tpa_start1)
1328 {
1329     struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1330     struct bnxt_tpa_info *tpa_info;
1331     u16 cons, prod, agg_id;
1332     struct rx_bd *prod_bd;
1333     dma_addr_t mapping;
1334 
1335     if (bp->flags & BNXT_FLAG_CHIP_P5) {
1336         agg_id = TPA_START_AGG_ID_P5(tpa_start);
1337         agg_id = bnxt_alloc_agg_idx(rxr, agg_id);
1338     } else {
1339         agg_id = TPA_START_AGG_ID(tpa_start);
1340     }
1341     cons = tpa_start->rx_tpa_start_cmp_opaque;
1342     prod = rxr->rx_prod;
1343     cons_rx_buf = &rxr->rx_buf_ring[cons];
1344     prod_rx_buf = &rxr->rx_buf_ring[prod];
1345     tpa_info = &rxr->rx_tpa[agg_id];
1346 
1347     if (unlikely(cons != rxr->rx_next_cons ||
1348              TPA_START_ERROR(tpa_start))) {
1349         netdev_warn(bp->dev, "TPA cons %x, expected cons %x, error code %x\n",
1350                 cons, rxr->rx_next_cons,
1351                 TPA_START_ERROR_CODE(tpa_start1));
1352         bnxt_sched_reset(bp, rxr);
1353         return;
1354     }
1355     /* Store cfa_code in tpa_info to use in tpa_end
1356      * completion processing.
1357      */
1358     tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
1359     prod_rx_buf->data = tpa_info->data;
1360     prod_rx_buf->data_ptr = tpa_info->data_ptr;
1361 
1362     mapping = tpa_info->mapping;
1363     prod_rx_buf->mapping = mapping;
1364 
1365     prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1366 
1367     prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1368 
1369     tpa_info->data = cons_rx_buf->data;
1370     tpa_info->data_ptr = cons_rx_buf->data_ptr;
1371     cons_rx_buf->data = NULL;
1372     tpa_info->mapping = cons_rx_buf->mapping;
1373 
1374     tpa_info->len =
1375         le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1376                 RX_TPA_START_CMP_LEN_SHIFT;
1377     if (likely(TPA_START_HASH_VALID(tpa_start))) {
1378         u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1379 
1380         tpa_info->hash_type = PKT_HASH_TYPE_L4;
1381         tpa_info->gso_type = SKB_GSO_TCPV4;
1382         /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1383         if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
1384             tpa_info->gso_type = SKB_GSO_TCPV6;
1385         tpa_info->rss_hash =
1386             le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1387     } else {
1388         tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1389         tpa_info->gso_type = 0;
1390         netif_warn(bp, rx_err, bp->dev, "TPA packet without valid hash\n");
1391     }
1392     tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1393     tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
1394     tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
1395     tpa_info->agg_count = 0;
1396 
1397     rxr->rx_prod = NEXT_RX(prod);
1398     cons = NEXT_RX(cons);
1399     rxr->rx_next_cons = NEXT_RX(cons);
1400     cons_rx_buf = &rxr->rx_buf_ring[cons];
1401 
1402     bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1403     rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1404     cons_rx_buf->data = NULL;
1405 }
1406 
1407 static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
1408 {
1409     if (agg_bufs)
1410         bnxt_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
1411 }
1412 
1413 #ifdef CONFIG_INET
1414 static void bnxt_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
1415 {
1416     struct udphdr *uh = NULL;
1417 
1418     if (ip_proto == htons(ETH_P_IP)) {
1419         struct iphdr *iph = (struct iphdr *)skb->data;
1420 
1421         if (iph->protocol == IPPROTO_UDP)
1422             uh = (struct udphdr *)(iph + 1);
1423     } else {
1424         struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1425 
1426         if (iph->nexthdr == IPPROTO_UDP)
1427             uh = (struct udphdr *)(iph + 1);
1428     }
1429     if (uh) {
1430         if (uh->check)
1431             skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
1432         else
1433             skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1434     }
1435 }
1436 #endif
1437 
1438 static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1439                        int payload_off, int tcp_ts,
1440                        struct sk_buff *skb)
1441 {
1442 #ifdef CONFIG_INET
1443     struct tcphdr *th;
1444     int len, nw_off;
1445     u16 outer_ip_off, inner_ip_off, inner_mac_off;
1446     u32 hdr_info = tpa_info->hdr_info;
1447     bool loopback = false;
1448 
1449     inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1450     inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1451     outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1452 
1453     /* If the packet is an internal loopback packet, the offsets will
1454      * have an extra 4 bytes.
1455      */
1456     if (inner_mac_off == 4) {
1457         loopback = true;
1458     } else if (inner_mac_off > 4) {
1459         __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1460                         ETH_HLEN - 2));
1461 
1462         /* We only support inner iPv4/ipv6.  If we don't see the
1463          * correct protocol ID, it must be a loopback packet where
1464          * the offsets are off by 4.
1465          */
1466         if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
1467             loopback = true;
1468     }
1469     if (loopback) {
1470         /* internal loopback packet, subtract all offsets by 4 */
1471         inner_ip_off -= 4;
1472         inner_mac_off -= 4;
1473         outer_ip_off -= 4;
1474     }
1475 
1476     nw_off = inner_ip_off - ETH_HLEN;
1477     skb_set_network_header(skb, nw_off);
1478     if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1479         struct ipv6hdr *iph = ipv6_hdr(skb);
1480 
1481         skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1482         len = skb->len - skb_transport_offset(skb);
1483         th = tcp_hdr(skb);
1484         th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1485     } else {
1486         struct iphdr *iph = ip_hdr(skb);
1487 
1488         skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1489         len = skb->len - skb_transport_offset(skb);
1490         th = tcp_hdr(skb);
1491         th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1492     }
1493 
1494     if (inner_mac_off) { /* tunnel */
1495         __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1496                         ETH_HLEN - 2));
1497 
1498         bnxt_gro_tunnel(skb, proto);
1499     }
1500 #endif
1501     return skb;
1502 }
1503 
1504 static struct sk_buff *bnxt_gro_func_5750x(struct bnxt_tpa_info *tpa_info,
1505                        int payload_off, int tcp_ts,
1506                        struct sk_buff *skb)
1507 {
1508 #ifdef CONFIG_INET
1509     u16 outer_ip_off, inner_ip_off, inner_mac_off;
1510     u32 hdr_info = tpa_info->hdr_info;
1511     int iphdr_len, nw_off;
1512 
1513     inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1514     inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1515     outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1516 
1517     nw_off = inner_ip_off - ETH_HLEN;
1518     skb_set_network_header(skb, nw_off);
1519     iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
1520              sizeof(struct ipv6hdr) : sizeof(struct iphdr);
1521     skb_set_transport_header(skb, nw_off + iphdr_len);
1522 
1523     if (inner_mac_off) { /* tunnel */
1524         __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1525                         ETH_HLEN - 2));
1526 
1527         bnxt_gro_tunnel(skb, proto);
1528     }
1529 #endif
1530     return skb;
1531 }
1532 
1533 #define BNXT_IPV4_HDR_SIZE  (sizeof(struct iphdr) + sizeof(struct tcphdr))
1534 #define BNXT_IPV6_HDR_SIZE  (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1535 
1536 static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1537                        int payload_off, int tcp_ts,
1538                        struct sk_buff *skb)
1539 {
1540 #ifdef CONFIG_INET
1541     struct tcphdr *th;
1542     int len, nw_off, tcp_opt_len = 0;
1543 
1544     if (tcp_ts)
1545         tcp_opt_len = 12;
1546 
1547     if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1548         struct iphdr *iph;
1549 
1550         nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1551              ETH_HLEN;
1552         skb_set_network_header(skb, nw_off);
1553         iph = ip_hdr(skb);
1554         skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1555         len = skb->len - skb_transport_offset(skb);
1556         th = tcp_hdr(skb);
1557         th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1558     } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1559         struct ipv6hdr *iph;
1560 
1561         nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1562              ETH_HLEN;
1563         skb_set_network_header(skb, nw_off);
1564         iph = ipv6_hdr(skb);
1565         skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1566         len = skb->len - skb_transport_offset(skb);
1567         th = tcp_hdr(skb);
1568         th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1569     } else {
1570         dev_kfree_skb_any(skb);
1571         return NULL;
1572     }
1573 
1574     if (nw_off) /* tunnel */
1575         bnxt_gro_tunnel(skb, skb->protocol);
1576 #endif
1577     return skb;
1578 }
1579 
1580 static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1581                        struct bnxt_tpa_info *tpa_info,
1582                        struct rx_tpa_end_cmp *tpa_end,
1583                        struct rx_tpa_end_cmp_ext *tpa_end1,
1584                        struct sk_buff *skb)
1585 {
1586 #ifdef CONFIG_INET
1587     int payload_off;
1588     u16 segs;
1589 
1590     segs = TPA_END_TPA_SEGS(tpa_end);
1591     if (segs == 1)
1592         return skb;
1593 
1594     NAPI_GRO_CB(skb)->count = segs;
1595     skb_shinfo(skb)->gso_size =
1596         le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1597     skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1598     if (bp->flags & BNXT_FLAG_CHIP_P5)
1599         payload_off = TPA_END_PAYLOAD_OFF_P5(tpa_end1);
1600     else
1601         payload_off = TPA_END_PAYLOAD_OFF(tpa_end);
1602     skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
1603     if (likely(skb))
1604         tcp_gro_complete(skb);
1605 #endif
1606     return skb;
1607 }
1608 
1609 /* Given the cfa_code of a received packet determine which
1610  * netdev (vf-rep or PF) the packet is destined to.
1611  */
1612 static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1613 {
1614     struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1615 
1616     /* if vf-rep dev is NULL, the must belongs to the PF */
1617     return dev ? dev : bp->dev;
1618 }
1619 
1620 static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1621                        struct bnxt_cp_ring_info *cpr,
1622                        u32 *raw_cons,
1623                        struct rx_tpa_end_cmp *tpa_end,
1624                        struct rx_tpa_end_cmp_ext *tpa_end1,
1625                        u8 *event)
1626 {
1627     struct bnxt_napi *bnapi = cpr->bnapi;
1628     struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1629     u8 *data_ptr, agg_bufs;
1630     unsigned int len;
1631     struct bnxt_tpa_info *tpa_info;
1632     dma_addr_t mapping;
1633     struct sk_buff *skb;
1634     u16 idx = 0, agg_id;
1635     void *data;
1636     bool gro;
1637 
1638     if (unlikely(bnapi->in_reset)) {
1639         int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
1640 
1641         if (rc < 0)
1642             return ERR_PTR(-EBUSY);
1643         return NULL;
1644     }
1645 
1646     if (bp->flags & BNXT_FLAG_CHIP_P5) {
1647         agg_id = TPA_END_AGG_ID_P5(tpa_end);
1648         agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1649         agg_bufs = TPA_END_AGG_BUFS_P5(tpa_end1);
1650         tpa_info = &rxr->rx_tpa[agg_id];
1651         if (unlikely(agg_bufs != tpa_info->agg_count)) {
1652             netdev_warn(bp->dev, "TPA end agg_buf %d != expected agg_bufs %d\n",
1653                     agg_bufs, tpa_info->agg_count);
1654             agg_bufs = tpa_info->agg_count;
1655         }
1656         tpa_info->agg_count = 0;
1657         *event |= BNXT_AGG_EVENT;
1658         bnxt_free_agg_idx(rxr, agg_id);
1659         idx = agg_id;
1660         gro = !!(bp->flags & BNXT_FLAG_GRO);
1661     } else {
1662         agg_id = TPA_END_AGG_ID(tpa_end);
1663         agg_bufs = TPA_END_AGG_BUFS(tpa_end);
1664         tpa_info = &rxr->rx_tpa[agg_id];
1665         idx = RING_CMP(*raw_cons);
1666         if (agg_bufs) {
1667             if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1668                 return ERR_PTR(-EBUSY);
1669 
1670             *event |= BNXT_AGG_EVENT;
1671             idx = NEXT_CMP(idx);
1672         }
1673         gro = !!TPA_END_GRO(tpa_end);
1674     }
1675     data = tpa_info->data;
1676     data_ptr = tpa_info->data_ptr;
1677     prefetch(data_ptr);
1678     len = tpa_info->len;
1679     mapping = tpa_info->mapping;
1680 
1681     if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
1682         bnxt_abort_tpa(cpr, idx, agg_bufs);
1683         if (agg_bufs > MAX_SKB_FRAGS)
1684             netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1685                     agg_bufs, (int)MAX_SKB_FRAGS);
1686         return NULL;
1687     }
1688 
1689     if (len <= bp->rx_copy_thresh) {
1690         skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
1691         if (!skb) {
1692             bnxt_abort_tpa(cpr, idx, agg_bufs);
1693             cpr->sw_stats.rx.rx_oom_discards += 1;
1694             return NULL;
1695         }
1696     } else {
1697         u8 *new_data;
1698         dma_addr_t new_mapping;
1699 
1700         new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
1701         if (!new_data) {
1702             bnxt_abort_tpa(cpr, idx, agg_bufs);
1703             cpr->sw_stats.rx.rx_oom_discards += 1;
1704             return NULL;
1705         }
1706 
1707         tpa_info->data = new_data;
1708         tpa_info->data_ptr = new_data + bp->rx_offset;
1709         tpa_info->mapping = new_mapping;
1710 
1711         skb = build_skb(data, bp->rx_buf_size);
1712         dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1713                        bp->rx_buf_use_size, bp->rx_dir,
1714                        DMA_ATTR_WEAK_ORDERING);
1715 
1716         if (!skb) {
1717             skb_free_frag(data);
1718             bnxt_abort_tpa(cpr, idx, agg_bufs);
1719             cpr->sw_stats.rx.rx_oom_discards += 1;
1720             return NULL;
1721         }
1722         skb_reserve(skb, bp->rx_offset);
1723         skb_put(skb, len);
1724     }
1725 
1726     if (agg_bufs) {
1727         skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
1728         if (!skb) {
1729             /* Page reuse already handled by bnxt_rx_pages(). */
1730             cpr->sw_stats.rx.rx_oom_discards += 1;
1731             return NULL;
1732         }
1733     }
1734 
1735     skb->protocol =
1736         eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
1737 
1738     if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1739         skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1740 
1741     if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1742         (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
1743         __be16 vlan_proto = htons(tpa_info->metadata >>
1744                       RX_CMP_FLAGS2_METADATA_TPID_SFT);
1745         u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1746 
1747         if (eth_type_vlan(vlan_proto)) {
1748             __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
1749         } else {
1750             dev_kfree_skb(skb);
1751             return NULL;
1752         }
1753     }
1754 
1755     skb_checksum_none_assert(skb);
1756     if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1757         skb->ip_summed = CHECKSUM_UNNECESSARY;
1758         skb->csum_level =
1759             (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1760     }
1761 
1762     if (gro)
1763         skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
1764 
1765     return skb;
1766 }
1767 
1768 static void bnxt_tpa_agg(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1769              struct rx_agg_cmp *rx_agg)
1770 {
1771     u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
1772     struct bnxt_tpa_info *tpa_info;
1773 
1774     agg_id = bnxt_lookup_agg_idx(rxr, agg_id);
1775     tpa_info = &rxr->rx_tpa[agg_id];
1776     BUG_ON(tpa_info->agg_count >= MAX_SKB_FRAGS);
1777     tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
1778 }
1779 
1780 static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1781                  struct sk_buff *skb)
1782 {
1783     if (skb->dev != bp->dev) {
1784         /* this packet belongs to a vf-rep */
1785         bnxt_vf_rep_rx(bp, skb);
1786         return;
1787     }
1788     skb_record_rx_queue(skb, bnapi->index);
1789     napi_gro_receive(&bnapi->napi, skb);
1790 }
1791 
1792 /* returns the following:
1793  * 1       - 1 packet successfully received
1794  * 0       - successful TPA_START, packet not completed yet
1795  * -EBUSY  - completion ring does not have all the agg buffers yet
1796  * -ENOMEM - packet aborted due to out of memory
1797  * -EIO    - packet aborted due to hw error indicated in BD
1798  */
1799 static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1800                u32 *raw_cons, u8 *event)
1801 {
1802     struct bnxt_napi *bnapi = cpr->bnapi;
1803     struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1804     struct net_device *dev = bp->dev;
1805     struct rx_cmp *rxcmp;
1806     struct rx_cmp_ext *rxcmp1;
1807     u32 tmp_raw_cons = *raw_cons;
1808     u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1809     struct bnxt_sw_rx_bd *rx_buf;
1810     unsigned int len;
1811     u8 *data_ptr, agg_bufs, cmp_type;
1812     bool xdp_active = false;
1813     dma_addr_t dma_addr;
1814     struct sk_buff *skb;
1815     struct xdp_buff xdp;
1816     u32 flags, misc;
1817     void *data;
1818     int rc = 0;
1819 
1820     rxcmp = (struct rx_cmp *)
1821             &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1822 
1823     cmp_type = RX_CMP_TYPE(rxcmp);
1824 
1825     if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
1826         bnxt_tpa_agg(bp, rxr, (struct rx_agg_cmp *)rxcmp);
1827         goto next_rx_no_prod_no_len;
1828     }
1829 
1830     tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1831     cp_cons = RING_CMP(tmp_raw_cons);
1832     rxcmp1 = (struct rx_cmp_ext *)
1833             &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1834 
1835     if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1836         return -EBUSY;
1837 
1838     /* The valid test of the entry must be done first before
1839      * reading any further.
1840      */
1841     dma_rmb();
1842     prod = rxr->rx_prod;
1843 
1844     if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1845         bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1846                    (struct rx_tpa_start_cmp_ext *)rxcmp1);
1847 
1848         *event |= BNXT_RX_EVENT;
1849         goto next_rx_no_prod_no_len;
1850 
1851     } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1852         skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
1853                    (struct rx_tpa_end_cmp *)rxcmp,
1854                    (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
1855 
1856         if (IS_ERR(skb))
1857             return -EBUSY;
1858 
1859         rc = -ENOMEM;
1860         if (likely(skb)) {
1861             bnxt_deliver_skb(bp, bnapi, skb);
1862             rc = 1;
1863         }
1864         *event |= BNXT_RX_EVENT;
1865         goto next_rx_no_prod_no_len;
1866     }
1867 
1868     cons = rxcmp->rx_cmp_opaque;
1869     if (unlikely(cons != rxr->rx_next_cons)) {
1870         int rc1 = bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp);
1871 
1872         /* 0xffff is forced error, don't print it */
1873         if (rxr->rx_next_cons != 0xffff)
1874             netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
1875                     cons, rxr->rx_next_cons);
1876         bnxt_sched_reset(bp, rxr);
1877         if (rc1)
1878             return rc1;
1879         goto next_rx_no_prod_no_len;
1880     }
1881     rx_buf = &rxr->rx_buf_ring[cons];
1882     data = rx_buf->data;
1883     data_ptr = rx_buf->data_ptr;
1884     prefetch(data_ptr);
1885 
1886     misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1887     agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
1888 
1889     if (agg_bufs) {
1890         if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1891             return -EBUSY;
1892 
1893         cp_cons = NEXT_CMP(cp_cons);
1894         *event |= BNXT_AGG_EVENT;
1895     }
1896     *event |= BNXT_RX_EVENT;
1897 
1898     rx_buf->data = NULL;
1899     if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1900         u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
1901 
1902         bnxt_reuse_rx_data(rxr, cons, data);
1903         if (agg_bufs)
1904             bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
1905                            false);
1906 
1907         rc = -EIO;
1908         if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
1909             bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
1910             if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
1911                 !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
1912                 netdev_warn_once(bp->dev, "RX buffer error %x\n",
1913                          rx_err);
1914                 bnxt_sched_reset(bp, rxr);
1915             }
1916         }
1917         goto next_rx_no_len;
1918     }
1919 
1920     flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
1921     len = flags >> RX_CMP_LEN_SHIFT;
1922     dma_addr = rx_buf->mapping;
1923 
1924     if (bnxt_xdp_attached(bp, rxr)) {
1925         bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
1926         if (agg_bufs) {
1927             u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
1928                                  cp_cons, agg_bufs,
1929                                  false);
1930             if (!frag_len) {
1931                 cpr->sw_stats.rx.rx_oom_discards += 1;
1932                 rc = -ENOMEM;
1933                 goto next_rx;
1934             }
1935         }
1936         xdp_active = true;
1937     }
1938 
1939     if (xdp_active) {
1940         if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
1941             rc = 1;
1942             goto next_rx;
1943         }
1944     }
1945 
1946     if (len <= bp->rx_copy_thresh) {
1947         skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
1948         bnxt_reuse_rx_data(rxr, cons, data);
1949         if (!skb) {
1950             if (agg_bufs) {
1951                 if (!xdp_active)
1952                     bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0,
1953                                    agg_bufs, false);
1954                 else
1955                     bnxt_xdp_buff_frags_free(rxr, &xdp);
1956             }
1957             cpr->sw_stats.rx.rx_oom_discards += 1;
1958             rc = -ENOMEM;
1959             goto next_rx;
1960         }
1961     } else {
1962         u32 payload;
1963 
1964         if (rx_buf->data_ptr == data_ptr)
1965             payload = misc & RX_CMP_PAYLOAD_OFFSET;
1966         else
1967             payload = 0;
1968         skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
1969                       payload | len);
1970         if (!skb) {
1971             cpr->sw_stats.rx.rx_oom_discards += 1;
1972             rc = -ENOMEM;
1973             goto next_rx;
1974         }
1975     }
1976 
1977     if (agg_bufs) {
1978         if (!xdp_active) {
1979             skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, cp_cons, agg_bufs, false);
1980             if (!skb) {
1981                 cpr->sw_stats.rx.rx_oom_discards += 1;
1982                 rc = -ENOMEM;
1983                 goto next_rx;
1984             }
1985         } else {
1986             skb = bnxt_xdp_build_skb(bp, skb, agg_bufs, rxr->page_pool, &xdp, rxcmp1);
1987             if (!skb) {
1988                 /* we should be able to free the old skb here */
1989                 bnxt_xdp_buff_frags_free(rxr, &xdp);
1990                 cpr->sw_stats.rx.rx_oom_discards += 1;
1991                 rc = -ENOMEM;
1992                 goto next_rx;
1993             }
1994         }
1995     }
1996 
1997     if (RX_CMP_HASH_VALID(rxcmp)) {
1998         u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1999         enum pkt_hash_types type = PKT_HASH_TYPE_L4;
2000 
2001         /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
2002         if (hash_type != 1 && hash_type != 3)
2003             type = PKT_HASH_TYPE_L3;
2004         skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
2005     }
2006 
2007     cfa_code = RX_CMP_CFA_CODE(rxcmp1);
2008     skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
2009 
2010     if ((rxcmp1->rx_cmp_flags2 &
2011          cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
2012         (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) {
2013         u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
2014         u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
2015         __be16 vlan_proto = htons(meta_data >>
2016                       RX_CMP_FLAGS2_METADATA_TPID_SFT);
2017 
2018         if (eth_type_vlan(vlan_proto)) {
2019             __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
2020         } else {
2021             dev_kfree_skb(skb);
2022             goto next_rx;
2023         }
2024     }
2025 
2026     skb_checksum_none_assert(skb);
2027     if (RX_CMP_L4_CS_OK(rxcmp1)) {
2028         if (dev->features & NETIF_F_RXCSUM) {
2029             skb->ip_summed = CHECKSUM_UNNECESSARY;
2030             skb->csum_level = RX_CMP_ENCAP(rxcmp1);
2031         }
2032     } else {
2033         if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
2034             if (dev->features & NETIF_F_RXCSUM)
2035                 bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
2036         }
2037     }
2038 
2039     if (unlikely((flags & RX_CMP_FLAGS_ITYPES_MASK) ==
2040              RX_CMP_FLAGS_ITYPE_PTP_W_TS) || bp->ptp_all_rx_tstamp) {
2041         if (bp->flags & BNXT_FLAG_CHIP_P5) {
2042             u32 cmpl_ts = le32_to_cpu(rxcmp1->rx_cmp_timestamp);
2043             u64 ns, ts;
2044 
2045             if (!bnxt_get_rx_ts_p5(bp, &ts, cmpl_ts)) {
2046                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2047 
2048                 spin_lock_bh(&ptp->ptp_lock);
2049                 ns = timecounter_cyc2time(&ptp->tc, ts);
2050                 spin_unlock_bh(&ptp->ptp_lock);
2051                 memset(skb_hwtstamps(skb), 0,
2052                        sizeof(*skb_hwtstamps(skb)));
2053                 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
2054             }
2055         }
2056     }
2057     bnxt_deliver_skb(bp, bnapi, skb);
2058     rc = 1;
2059 
2060 next_rx:
2061     cpr->rx_packets += 1;
2062     cpr->rx_bytes += len;
2063 
2064 next_rx_no_len:
2065     rxr->rx_prod = NEXT_RX(prod);
2066     rxr->rx_next_cons = NEXT_RX(cons);
2067 
2068 next_rx_no_prod_no_len:
2069     *raw_cons = tmp_raw_cons;
2070 
2071     return rc;
2072 }
2073 
2074 /* In netpoll mode, if we are using a combined completion ring, we need to
2075  * discard the rx packets and recycle the buffers.
2076  */
2077 static int bnxt_force_rx_discard(struct bnxt *bp,
2078                  struct bnxt_cp_ring_info *cpr,
2079                  u32 *raw_cons, u8 *event)
2080 {
2081     u32 tmp_raw_cons = *raw_cons;
2082     struct rx_cmp_ext *rxcmp1;
2083     struct rx_cmp *rxcmp;
2084     u16 cp_cons;
2085     u8 cmp_type;
2086     int rc;
2087 
2088     cp_cons = RING_CMP(tmp_raw_cons);
2089     rxcmp = (struct rx_cmp *)
2090             &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2091 
2092     tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
2093     cp_cons = RING_CMP(tmp_raw_cons);
2094     rxcmp1 = (struct rx_cmp_ext *)
2095             &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2096 
2097     if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2098         return -EBUSY;
2099 
2100     /* The valid test of the entry must be done first before
2101      * reading any further.
2102      */
2103     dma_rmb();
2104     cmp_type = RX_CMP_TYPE(rxcmp);
2105     if (cmp_type == CMP_TYPE_RX_L2_CMP) {
2106         rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2107             cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2108     } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
2109         struct rx_tpa_end_cmp_ext *tpa_end1;
2110 
2111         tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
2112         tpa_end1->rx_tpa_end_cmp_errors_v2 |=
2113             cpu_to_le32(RX_TPA_END_CMP_ERRORS);
2114     }
2115     rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
2116     if (rc && rc != -EBUSY)
2117         cpr->sw_stats.rx.rx_netpoll_discards += 1;
2118     return rc;
2119 }
2120 
2121 u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
2122 {
2123     struct bnxt_fw_health *fw_health = bp->fw_health;
2124     u32 reg = fw_health->regs[reg_idx];
2125     u32 reg_type, reg_off, val = 0;
2126 
2127     reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
2128     reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
2129     switch (reg_type) {
2130     case BNXT_FW_HEALTH_REG_TYPE_CFG:
2131         pci_read_config_dword(bp->pdev, reg_off, &val);
2132         break;
2133     case BNXT_FW_HEALTH_REG_TYPE_GRC:
2134         reg_off = fw_health->mapped_regs[reg_idx];
2135         fallthrough;
2136     case BNXT_FW_HEALTH_REG_TYPE_BAR0:
2137         val = readl(bp->bar0 + reg_off);
2138         break;
2139     case BNXT_FW_HEALTH_REG_TYPE_BAR1:
2140         val = readl(bp->bar1 + reg_off);
2141         break;
2142     }
2143     if (reg_idx == BNXT_FW_RESET_INPROG_REG)
2144         val &= fw_health->fw_reset_inprog_reg_mask;
2145     return val;
2146 }
2147 
2148 static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
2149 {
2150     int i;
2151 
2152     for (i = 0; i < bp->rx_nr_rings; i++) {
2153         u16 grp_idx = bp->rx_ring[i].bnapi->index;
2154         struct bnxt_ring_grp_info *grp_info;
2155 
2156         grp_info = &bp->grp_info[grp_idx];
2157         if (grp_info->agg_fw_ring_id == ring_id)
2158             return grp_idx;
2159     }
2160     return INVALID_HW_RING_ID;
2161 }
2162 
2163 static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
2164 {
2165     u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
2166 
2167     switch (err_type) {
2168     case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
2169         netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n",
2170                BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
2171         break;
2172     case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
2173         netdev_warn(bp->dev, "Pause Storm detected!\n");
2174         break;
2175     case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
2176         netdev_warn(bp->dev, "One or more MMIO doorbells dropped by the device!\n");
2177         break;
2178     default:
2179         netdev_err(bp->dev, "FW reported unknown error type %u\n",
2180                err_type);
2181         break;
2182     }
2183 }
2184 
2185 #define BNXT_GET_EVENT_PORT(data)   \
2186     ((data) &           \
2187      ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
2188 
2189 #define BNXT_EVENT_RING_TYPE(data2) \
2190     ((data2) &          \
2191      ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
2192 
2193 #define BNXT_EVENT_RING_TYPE_RX(data2)  \
2194     (BNXT_EVENT_RING_TYPE(data2) == \
2195      ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
2196 
2197 #define BNXT_EVENT_PHC_EVENT_TYPE(data1)    \
2198     (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_MASK) >>\
2199      ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_SFT)
2200 
2201 #define BNXT_EVENT_PHC_RTC_UPDATE(data1)    \
2202     (((data1) & ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_MASK) >>\
2203      ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_PHC_TIME_MSB_SFT)
2204 
2205 #define BNXT_PHC_BITS   48
2206 
2207 static int bnxt_async_event_process(struct bnxt *bp,
2208                     struct hwrm_async_event_cmpl *cmpl)
2209 {
2210     u16 event_id = le16_to_cpu(cmpl->event_id);
2211     u32 data1 = le32_to_cpu(cmpl->event_data1);
2212     u32 data2 = le32_to_cpu(cmpl->event_data2);
2213 
2214     netdev_dbg(bp->dev, "hwrm event 0x%x {0x%x, 0x%x}\n",
2215            event_id, data1, data2);
2216 
2217     /* TODO CHIMP_FW: Define event id's for link change, error etc */
2218     switch (event_id) {
2219     case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
2220         struct bnxt_link_info *link_info = &bp->link_info;
2221 
2222         if (BNXT_VF(bp))
2223             goto async_event_process_exit;
2224 
2225         /* print unsupported speed warning in forced speed mode only */
2226         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
2227             (data1 & 0x20000)) {
2228             u16 fw_speed = link_info->force_link_speed;
2229             u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
2230 
2231             if (speed != SPEED_UNKNOWN)
2232                 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
2233                         speed);
2234         }
2235         set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
2236     }
2237         fallthrough;
2238     case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
2239     case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE:
2240         set_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT, &bp->sp_event);
2241         fallthrough;
2242     case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
2243         set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
2244         break;
2245     case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
2246         set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
2247         break;
2248     case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
2249         u16 port_id = BNXT_GET_EVENT_PORT(data1);
2250 
2251         if (BNXT_VF(bp))
2252             break;
2253 
2254         if (bp->pf.port_id != port_id)
2255             break;
2256 
2257         set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
2258         break;
2259     }
2260     case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
2261         if (BNXT_PF(bp))
2262             goto async_event_process_exit;
2263         set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
2264         break;
2265     case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
2266         char *type_str = "Solicited";
2267 
2268         if (!bp->fw_health)
2269             goto async_event_process_exit;
2270 
2271         bp->fw_reset_timestamp = jiffies;
2272         bp->fw_reset_min_dsecs = cmpl->timestamp_lo;
2273         if (!bp->fw_reset_min_dsecs)
2274             bp->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
2275         bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
2276         if (!bp->fw_reset_max_dsecs)
2277             bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
2278         if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
2279             set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
2280         } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
2281             type_str = "Fatal";
2282             bp->fw_health->fatalities++;
2283             set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
2284         } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
2285                EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
2286             type_str = "Non-fatal";
2287             bp->fw_health->survivals++;
2288             set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
2289         }
2290         netif_warn(bp, hw, bp->dev,
2291                "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
2292                type_str, data1, data2,
2293                bp->fw_reset_min_dsecs * 100,
2294                bp->fw_reset_max_dsecs * 100);
2295         set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
2296         break;
2297     }
2298     case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
2299         struct bnxt_fw_health *fw_health = bp->fw_health;
2300         char *status_desc = "healthy";
2301         u32 status;
2302 
2303         if (!fw_health)
2304             goto async_event_process_exit;
2305 
2306         if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
2307             fw_health->enabled = false;
2308             netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
2309             break;
2310         }
2311         fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
2312         fw_health->tmr_multiplier =
2313             DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
2314                      bp->current_interval * 10);
2315         fw_health->tmr_counter = fw_health->tmr_multiplier;
2316         if (!fw_health->enabled)
2317             fw_health->last_fw_heartbeat =
2318                 bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
2319         fw_health->last_fw_reset_cnt =
2320             bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
2321         status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
2322         if (status != BNXT_FW_STATUS_HEALTHY)
2323             status_desc = "unhealthy";
2324         netif_info(bp, drv, bp->dev,
2325                "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
2326                fw_health->primary ? "primary" : "backup", status,
2327                status_desc, fw_health->last_fw_reset_cnt);
2328         if (!fw_health->enabled) {
2329             /* Make sure tmr_counter is set and visible to
2330              * bnxt_health_check() before setting enabled to true.
2331              */
2332             smp_wmb();
2333             fw_health->enabled = true;
2334         }
2335         goto async_event_process_exit;
2336     }
2337     case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
2338         netif_notice(bp, hw, bp->dev,
2339                  "Received firmware debug notification, data1: 0x%x, data2: 0x%x\n",
2340                  data1, data2);
2341         goto async_event_process_exit;
2342     case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
2343         struct bnxt_rx_ring_info *rxr;
2344         u16 grp_idx;
2345 
2346         if (bp->flags & BNXT_FLAG_CHIP_P5)
2347             goto async_event_process_exit;
2348 
2349         netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
2350                 BNXT_EVENT_RING_TYPE(data2), data1);
2351         if (!BNXT_EVENT_RING_TYPE_RX(data2))
2352             goto async_event_process_exit;
2353 
2354         grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
2355         if (grp_idx == INVALID_HW_RING_ID) {
2356             netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
2357                     data1);
2358             goto async_event_process_exit;
2359         }
2360         rxr = bp->bnapi[grp_idx]->rx_ring;
2361         bnxt_sched_reset(bp, rxr);
2362         goto async_event_process_exit;
2363     }
2364     case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
2365         struct bnxt_fw_health *fw_health = bp->fw_health;
2366 
2367         netif_notice(bp, hw, bp->dev,
2368                  "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
2369                  data1, data2);
2370         if (fw_health) {
2371             fw_health->echo_req_data1 = data1;
2372             fw_health->echo_req_data2 = data2;
2373             set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
2374             break;
2375         }
2376         goto async_event_process_exit;
2377     }
2378     case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: {
2379         bnxt_ptp_pps_event(bp, data1, data2);
2380         goto async_event_process_exit;
2381     }
2382     case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
2383         bnxt_event_error_report(bp, data1, data2);
2384         goto async_event_process_exit;
2385     }
2386     case ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE: {
2387         switch (BNXT_EVENT_PHC_EVENT_TYPE(data1)) {
2388         case ASYNC_EVENT_CMPL_PHC_UPDATE_EVENT_DATA1_FLAGS_PHC_RTC_UPDATE:
2389             if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) {
2390                 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
2391                 u64 ns;
2392 
2393                 spin_lock_bh(&ptp->ptp_lock);
2394                 bnxt_ptp_update_current_time(bp);
2395                 ns = (((u64)BNXT_EVENT_PHC_RTC_UPDATE(data1) <<
2396                        BNXT_PHC_BITS) | ptp->current_time);
2397                 bnxt_ptp_rtc_timecounter_init(ptp, ns);
2398                 spin_unlock_bh(&ptp->ptp_lock);
2399             }
2400             break;
2401         }
2402         goto async_event_process_exit;
2403     }
2404     case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: {
2405         u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff;
2406 
2407         hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED);
2408         goto async_event_process_exit;
2409     }
2410     default:
2411         goto async_event_process_exit;
2412     }
2413     bnxt_queue_sp_work(bp);
2414 async_event_process_exit:
2415     bnxt_ulp_async_events(bp, cmpl);
2416     return 0;
2417 }
2418 
2419 static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
2420 {
2421     u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
2422     struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
2423     struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
2424                 (struct hwrm_fwd_req_cmpl *)txcmp;
2425 
2426     switch (cmpl_type) {
2427     case CMPL_BASE_TYPE_HWRM_DONE:
2428         seq_id = le16_to_cpu(h_cmpl->sequence_id);
2429         hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE);
2430         break;
2431 
2432     case CMPL_BASE_TYPE_HWRM_FWD_REQ:
2433         vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
2434 
2435         if ((vf_id < bp->pf.first_vf_id) ||
2436             (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
2437             netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
2438                    vf_id);
2439             return -EINVAL;
2440         }
2441 
2442         set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
2443         set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
2444         bnxt_queue_sp_work(bp);
2445         break;
2446 
2447     case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
2448         bnxt_async_event_process(bp,
2449                      (struct hwrm_async_event_cmpl *)txcmp);
2450         break;
2451 
2452     default:
2453         break;
2454     }
2455 
2456     return 0;
2457 }
2458 
2459 static irqreturn_t bnxt_msix(int irq, void *dev_instance)
2460 {
2461     struct bnxt_napi *bnapi = dev_instance;
2462     struct bnxt *bp = bnapi->bp;
2463     struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2464     u32 cons = RING_CMP(cpr->cp_raw_cons);
2465 
2466     cpr->event_ctr++;
2467     prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2468     napi_schedule(&bnapi->napi);
2469     return IRQ_HANDLED;
2470 }
2471 
2472 static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
2473 {
2474     u32 raw_cons = cpr->cp_raw_cons;
2475     u16 cons = RING_CMP(raw_cons);
2476     struct tx_cmp *txcmp;
2477 
2478     txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2479 
2480     return TX_CMP_VALID(txcmp, raw_cons);
2481 }
2482 
2483 static irqreturn_t bnxt_inta(int irq, void *dev_instance)
2484 {
2485     struct bnxt_napi *bnapi = dev_instance;
2486     struct bnxt *bp = bnapi->bp;
2487     struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2488     u32 cons = RING_CMP(cpr->cp_raw_cons);
2489     u32 int_status;
2490 
2491     prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
2492 
2493     if (!bnxt_has_work(bp, cpr)) {
2494         int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
2495         /* return if erroneous interrupt */
2496         if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
2497             return IRQ_NONE;
2498     }
2499 
2500     /* disable ring IRQ */
2501     BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
2502 
2503     /* Return here if interrupt is shared and is disabled. */
2504     if (unlikely(atomic_read(&bp->intr_sem) != 0))
2505         return IRQ_HANDLED;
2506 
2507     napi_schedule(&bnapi->napi);
2508     return IRQ_HANDLED;
2509 }
2510 
2511 static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2512                 int budget)
2513 {
2514     struct bnxt_napi *bnapi = cpr->bnapi;
2515     u32 raw_cons = cpr->cp_raw_cons;
2516     u32 cons;
2517     int tx_pkts = 0;
2518     int rx_pkts = 0;
2519     u8 event = 0;
2520     struct tx_cmp *txcmp;
2521 
2522     cpr->has_more_work = 0;
2523     cpr->had_work_done = 1;
2524     while (1) {
2525         int rc;
2526 
2527         cons = RING_CMP(raw_cons);
2528         txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2529 
2530         if (!TX_CMP_VALID(txcmp, raw_cons))
2531             break;
2532 
2533         /* The valid test of the entry must be done first before
2534          * reading any further.
2535          */
2536         dma_rmb();
2537         if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
2538             tx_pkts++;
2539             /* return full budget so NAPI will complete. */
2540             if (unlikely(tx_pkts >= bp->tx_wake_thresh)) {
2541                 rx_pkts = budget;
2542                 raw_cons = NEXT_RAW_CMP(raw_cons);
2543                 if (budget)
2544                     cpr->has_more_work = 1;
2545                 break;
2546             }
2547         } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2548             if (likely(budget))
2549                 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2550             else
2551                 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
2552                                &event);
2553             if (likely(rc >= 0))
2554                 rx_pkts += rc;
2555             /* Increment rx_pkts when rc is -ENOMEM to count towards
2556              * the NAPI budget.  Otherwise, we may potentially loop
2557              * here forever if we consistently cannot allocate
2558              * buffers.
2559              */
2560             else if (rc == -ENOMEM && budget)
2561                 rx_pkts++;
2562             else if (rc == -EBUSY)  /* partial completion */
2563                 break;
2564         } else if (unlikely((TX_CMP_TYPE(txcmp) ==
2565                      CMPL_BASE_TYPE_HWRM_DONE) ||
2566                     (TX_CMP_TYPE(txcmp) ==
2567                      CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
2568                     (TX_CMP_TYPE(txcmp) ==
2569                      CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
2570             bnxt_hwrm_handler(bp, txcmp);
2571         }
2572         raw_cons = NEXT_RAW_CMP(raw_cons);
2573 
2574         if (rx_pkts && rx_pkts == budget) {
2575             cpr->has_more_work = 1;
2576             break;
2577         }
2578     }
2579 
2580     if (event & BNXT_REDIRECT_EVENT)
2581         xdp_do_flush();
2582 
2583     if (event & BNXT_TX_EVENT) {
2584         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
2585         u16 prod = txr->tx_prod;
2586 
2587         /* Sync BD data before updating doorbell */
2588         wmb();
2589 
2590         bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
2591     }
2592 
2593     cpr->cp_raw_cons = raw_cons;
2594     bnapi->tx_pkts += tx_pkts;
2595     bnapi->events |= event;
2596     return rx_pkts;
2597 }
2598 
2599 static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
2600 {
2601     if (bnapi->tx_pkts) {
2602         bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
2603         bnapi->tx_pkts = 0;
2604     }
2605 
2606     if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
2607         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2608 
2609         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2610     }
2611     if (bnapi->events & BNXT_AGG_EVENT) {
2612         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2613 
2614         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2615     }
2616     bnapi->events = 0;
2617 }
2618 
2619 static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
2620               int budget)
2621 {
2622     struct bnxt_napi *bnapi = cpr->bnapi;
2623     int rx_pkts;
2624 
2625     rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2626 
2627     /* ACK completion ring before freeing tx ring and producing new
2628      * buffers in rx/agg rings to prevent overflowing the completion
2629      * ring.
2630      */
2631     bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
2632 
2633     __bnxt_poll_work_done(bp, bnapi);
2634     return rx_pkts;
2635 }
2636 
2637 static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2638 {
2639     struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2640     struct bnxt *bp = bnapi->bp;
2641     struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2642     struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2643     struct tx_cmp *txcmp;
2644     struct rx_cmp_ext *rxcmp1;
2645     u32 cp_cons, tmp_raw_cons;
2646     u32 raw_cons = cpr->cp_raw_cons;
2647     u32 rx_pkts = 0;
2648     u8 event = 0;
2649 
2650     while (1) {
2651         int rc;
2652 
2653         cp_cons = RING_CMP(raw_cons);
2654         txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2655 
2656         if (!TX_CMP_VALID(txcmp, raw_cons))
2657             break;
2658 
2659         /* The valid test of the entry must be done first before
2660          * reading any further.
2661          */
2662         dma_rmb();
2663         if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2664             tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2665             cp_cons = RING_CMP(tmp_raw_cons);
2666             rxcmp1 = (struct rx_cmp_ext *)
2667               &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2668 
2669             if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2670                 break;
2671 
2672             /* force an error to recycle the buffer */
2673             rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2674                 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2675 
2676             rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
2677             if (likely(rc == -EIO) && budget)
2678                 rx_pkts++;
2679             else if (rc == -EBUSY)  /* partial completion */
2680                 break;
2681         } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2682                     CMPL_BASE_TYPE_HWRM_DONE)) {
2683             bnxt_hwrm_handler(bp, txcmp);
2684         } else {
2685             netdev_err(bp->dev,
2686                    "Invalid completion received on special ring\n");
2687         }
2688         raw_cons = NEXT_RAW_CMP(raw_cons);
2689 
2690         if (rx_pkts == budget)
2691             break;
2692     }
2693 
2694     cpr->cp_raw_cons = raw_cons;
2695     BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2696     bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
2697 
2698     if (event & BNXT_AGG_EVENT)
2699         bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
2700 
2701     if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
2702         napi_complete_done(napi, rx_pkts);
2703         BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2704     }
2705     return rx_pkts;
2706 }
2707 
2708 static int bnxt_poll(struct napi_struct *napi, int budget)
2709 {
2710     struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2711     struct bnxt *bp = bnapi->bp;
2712     struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2713     int work_done = 0;
2714 
2715     if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2716         napi_complete(napi);
2717         return 0;
2718     }
2719     while (1) {
2720         work_done += bnxt_poll_work(bp, cpr, budget - work_done);
2721 
2722         if (work_done >= budget) {
2723             if (!budget)
2724                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2725             break;
2726         }
2727 
2728         if (!bnxt_has_work(bp, cpr)) {
2729             if (napi_complete_done(napi, work_done))
2730                 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
2731             break;
2732         }
2733     }
2734     if (bp->flags & BNXT_FLAG_DIM) {
2735         struct dim_sample dim_sample = {};
2736 
2737         dim_update_sample(cpr->event_ctr,
2738                   cpr->rx_packets,
2739                   cpr->rx_bytes,
2740                   &dim_sample);
2741         net_dim(&cpr->dim, dim_sample);
2742     }
2743     return work_done;
2744 }
2745 
2746 static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2747 {
2748     struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2749     int i, work_done = 0;
2750 
2751     for (i = 0; i < 2; i++) {
2752         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2753 
2754         if (cpr2) {
2755             work_done += __bnxt_poll_work(bp, cpr2,
2756                               budget - work_done);
2757             cpr->has_more_work |= cpr2->has_more_work;
2758         }
2759     }
2760     return work_done;
2761 }
2762 
2763 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2764                  u64 dbr_type)
2765 {
2766     struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2767     int i;
2768 
2769     for (i = 0; i < 2; i++) {
2770         struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2771         struct bnxt_db_info *db;
2772 
2773         if (cpr2 && cpr2->had_work_done) {
2774             db = &cpr2->cp_db;
2775             bnxt_writeq(bp, db->db_key64 | dbr_type |
2776                     RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2777             cpr2->had_work_done = 0;
2778         }
2779     }
2780     __bnxt_poll_work_done(bp, bnapi);
2781 }
2782 
2783 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2784 {
2785     struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2786     struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2787     struct bnxt_cp_ring_info *cpr_rx;
2788     u32 raw_cons = cpr->cp_raw_cons;
2789     struct bnxt *bp = bnapi->bp;
2790     struct nqe_cn *nqcmp;
2791     int work_done = 0;
2792     u32 cons;
2793 
2794     if (unlikely(test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))) {
2795         napi_complete(napi);
2796         return 0;
2797     }
2798     if (cpr->has_more_work) {
2799         cpr->has_more_work = 0;
2800         work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2801     }
2802     while (1) {
2803         cons = RING_CMP(raw_cons);
2804         nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2805 
2806         if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2807             if (cpr->has_more_work)
2808                 break;
2809 
2810             __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
2811             cpr->cp_raw_cons = raw_cons;
2812             if (napi_complete_done(napi, work_done))
2813                 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2814                           cpr->cp_raw_cons);
2815             goto poll_done;
2816         }
2817 
2818         /* The valid test of the entry must be done first before
2819          * reading any further.
2820          */
2821         dma_rmb();
2822 
2823         if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2824             u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2825             struct bnxt_cp_ring_info *cpr2;
2826 
2827             /* No more budget for RX work */
2828             if (budget && work_done >= budget && idx == BNXT_RX_HDL)
2829                 break;
2830 
2831             cpr2 = cpr->cp_ring_arr[idx];
2832             work_done += __bnxt_poll_work(bp, cpr2,
2833                               budget - work_done);
2834             cpr->has_more_work |= cpr2->has_more_work;
2835         } else {
2836             bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2837         }
2838         raw_cons = NEXT_RAW_CMP(raw_cons);
2839     }
2840     __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
2841     if (raw_cons != cpr->cp_raw_cons) {
2842         cpr->cp_raw_cons = raw_cons;
2843         BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
2844     }
2845 poll_done:
2846     cpr_rx = cpr->cp_ring_arr[BNXT_RX_HDL];
2847     if (cpr_rx && (bp->flags & BNXT_FLAG_DIM)) {
2848         struct dim_sample dim_sample = {};
2849 
2850         dim_update_sample(cpr->event_ctr,
2851                   cpr_rx->rx_packets,
2852                   cpr_rx->rx_bytes,
2853                   &dim_sample);
2854         net_dim(&cpr->dim, dim_sample);
2855     }
2856     return work_done;
2857 }
2858 
2859 static void bnxt_free_tx_skbs(struct bnxt *bp)
2860 {
2861     int i, max_idx;
2862     struct pci_dev *pdev = bp->pdev;
2863 
2864     if (!bp->tx_ring)
2865         return;
2866 
2867     max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2868     for (i = 0; i < bp->tx_nr_rings; i++) {
2869         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
2870         int j;
2871 
2872         if (!txr->tx_buf_ring)
2873             continue;
2874 
2875         for (j = 0; j < max_idx;) {
2876             struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2877             struct sk_buff *skb;
2878             int k, last;
2879 
2880             if (i < bp->tx_nr_rings_xdp &&
2881                 tx_buf->action == XDP_REDIRECT) {
2882                 dma_unmap_single(&pdev->dev,
2883                     dma_unmap_addr(tx_buf, mapping),
2884                     dma_unmap_len(tx_buf, len),
2885                     DMA_TO_DEVICE);
2886                 xdp_return_frame(tx_buf->xdpf);
2887                 tx_buf->action = 0;
2888                 tx_buf->xdpf = NULL;
2889                 j++;
2890                 continue;
2891             }
2892 
2893             skb = tx_buf->skb;
2894             if (!skb) {
2895                 j++;
2896                 continue;
2897             }
2898 
2899             tx_buf->skb = NULL;
2900 
2901             if (tx_buf->is_push) {
2902                 dev_kfree_skb(skb);
2903                 j += 2;
2904                 continue;
2905             }
2906 
2907             dma_unmap_single(&pdev->dev,
2908                      dma_unmap_addr(tx_buf, mapping),
2909                      skb_headlen(skb),
2910                      DMA_TO_DEVICE);
2911 
2912             last = tx_buf->nr_frags;
2913             j += 2;
2914             for (k = 0; k < last; k++, j++) {
2915                 int ring_idx = j & bp->tx_ring_mask;
2916                 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2917 
2918                 tx_buf = &txr->tx_buf_ring[ring_idx];
2919                 dma_unmap_page(
2920                     &pdev->dev,
2921                     dma_unmap_addr(tx_buf, mapping),
2922                     skb_frag_size(frag), DMA_TO_DEVICE);
2923             }
2924             dev_kfree_skb(skb);
2925         }
2926         netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2927     }
2928 }
2929 
2930 static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
2931 {
2932     struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
2933     struct pci_dev *pdev = bp->pdev;
2934     struct bnxt_tpa_idx_map *map;
2935     int i, max_idx, max_agg_idx;
2936 
2937     max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2938     max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2939     if (!rxr->rx_tpa)
2940         goto skip_rx_tpa_free;
2941 
2942     for (i = 0; i < bp->max_tpa; i++) {
2943         struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
2944         u8 *data = tpa_info->data;
2945 
2946         if (!data)
2947             continue;
2948 
2949         dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
2950                        bp->rx_buf_use_size, bp->rx_dir,
2951                        DMA_ATTR_WEAK_ORDERING);
2952 
2953         tpa_info->data = NULL;
2954 
2955         skb_free_frag(data);
2956     }
2957 
2958 skip_rx_tpa_free:
2959     if (!rxr->rx_buf_ring)
2960         goto skip_rx_buf_free;
2961 
2962     for (i = 0; i < max_idx; i++) {
2963         struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
2964         dma_addr_t mapping = rx_buf->mapping;
2965         void *data = rx_buf->data;
2966 
2967         if (!data)
2968             continue;
2969 
2970         rx_buf->data = NULL;
2971         if (BNXT_RX_PAGE_MODE(bp)) {
2972             mapping -= bp->rx_dma_offset;
2973             dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
2974                          bp->rx_dir,
2975                          DMA_ATTR_WEAK_ORDERING);
2976             page_pool_recycle_direct(rxr->page_pool, data);
2977         } else {
2978             dma_unmap_single_attrs(&pdev->dev, mapping,
2979                            bp->rx_buf_use_size, bp->rx_dir,
2980                            DMA_ATTR_WEAK_ORDERING);
2981             skb_free_frag(data);
2982         }
2983     }
2984 
2985 skip_rx_buf_free:
2986     if (!rxr->rx_agg_ring)
2987         goto skip_rx_agg_free;
2988 
2989     for (i = 0; i < max_agg_idx; i++) {
2990         struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
2991         struct page *page = rx_agg_buf->page;
2992 
2993         if (!page)
2994             continue;
2995 
2996         if (BNXT_RX_PAGE_MODE(bp)) {
2997             dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2998                          BNXT_RX_PAGE_SIZE, bp->rx_dir,
2999                          DMA_ATTR_WEAK_ORDERING);
3000             rx_agg_buf->page = NULL;
3001             __clear_bit(i, rxr->rx_agg_bmap);
3002 
3003             page_pool_recycle_direct(rxr->page_pool, page);
3004         } else {
3005             dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
3006                          BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE,
3007                          DMA_ATTR_WEAK_ORDERING);
3008             rx_agg_buf->page = NULL;
3009             __clear_bit(i, rxr->rx_agg_bmap);
3010 
3011             __free_page(page);
3012         }
3013     }
3014 
3015 skip_rx_agg_free:
3016     if (rxr->rx_page) {
3017         __free_page(rxr->rx_page);
3018         rxr->rx_page = NULL;
3019     }
3020     map = rxr->rx_tpa_idx_map;
3021     if (map)
3022         memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
3023 }
3024 
3025 static void bnxt_free_rx_skbs(struct bnxt *bp)
3026 {
3027     int i;
3028 
3029     if (!bp->rx_ring)
3030         return;
3031 
3032     for (i = 0; i < bp->rx_nr_rings; i++)
3033         bnxt_free_one_rx_ring_skbs(bp, i);
3034 }
3035 
3036 static void bnxt_free_skbs(struct bnxt *bp)
3037 {
3038     bnxt_free_tx_skbs(bp);
3039     bnxt_free_rx_skbs(bp);
3040 }
3041 
3042 static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
3043 {
3044     u8 init_val = mem_init->init_val;
3045     u16 offset = mem_init->offset;
3046     u8 *p2 = p;
3047     int i;
3048 
3049     if (!init_val)
3050         return;
3051     if (offset == BNXT_MEM_INVALID_OFFSET) {
3052         memset(p, init_val, len);
3053         return;
3054     }
3055     for (i = 0; i < len; i += mem_init->size)
3056         *(p2 + i + offset) = init_val;
3057 }
3058 
3059 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3060 {
3061     struct pci_dev *pdev = bp->pdev;
3062     int i;
3063 
3064     if (!rmem->pg_arr)
3065         goto skip_pages;
3066 
3067     for (i = 0; i < rmem->nr_pages; i++) {
3068         if (!rmem->pg_arr[i])
3069             continue;
3070 
3071         dma_free_coherent(&pdev->dev, rmem->page_size,
3072                   rmem->pg_arr[i], rmem->dma_arr[i]);
3073 
3074         rmem->pg_arr[i] = NULL;
3075     }
3076 skip_pages:
3077     if (rmem->pg_tbl) {
3078         size_t pg_tbl_size = rmem->nr_pages * 8;
3079 
3080         if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3081             pg_tbl_size = rmem->page_size;
3082         dma_free_coherent(&pdev->dev, pg_tbl_size,
3083                   rmem->pg_tbl, rmem->pg_tbl_map);
3084         rmem->pg_tbl = NULL;
3085     }
3086     if (rmem->vmem_size && *rmem->vmem) {
3087         vfree(*rmem->vmem);
3088         *rmem->vmem = NULL;
3089     }
3090 }
3091 
3092 static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
3093 {
3094     struct pci_dev *pdev = bp->pdev;
3095     u64 valid_bit = 0;
3096     int i;
3097 
3098     if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
3099         valid_bit = PTU_PTE_VALID;
3100     if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) {
3101         size_t pg_tbl_size = rmem->nr_pages * 8;
3102 
3103         if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG)
3104             pg_tbl_size = rmem->page_size;
3105         rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size,
3106                           &rmem->pg_tbl_map,
3107                           GFP_KERNEL);
3108         if (!rmem->pg_tbl)
3109             return -ENOMEM;
3110     }
3111 
3112     for (i = 0; i < rmem->nr_pages; i++) {
3113         u64 extra_bits = valid_bit;
3114 
3115         rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
3116                              rmem->page_size,
3117                              &rmem->dma_arr[i],
3118                              GFP_KERNEL);
3119         if (!rmem->pg_arr[i])
3120             return -ENOMEM;
3121 
3122         if (rmem->mem_init)
3123             bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
3124                       rmem->page_size);
3125         if (rmem->nr_pages > 1 || rmem->depth > 0) {
3126             if (i == rmem->nr_pages - 2 &&
3127                 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3128                 extra_bits |= PTU_PTE_NEXT_TO_LAST;
3129             else if (i == rmem->nr_pages - 1 &&
3130                  (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
3131                 extra_bits |= PTU_PTE_LAST;
3132             rmem->pg_tbl[i] =
3133                 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
3134         }
3135     }
3136 
3137     if (rmem->vmem_size) {
3138         *rmem->vmem = vzalloc(rmem->vmem_size);
3139         if (!(*rmem->vmem))
3140             return -ENOMEM;
3141     }
3142     return 0;
3143 }
3144 
3145 static void bnxt_free_tpa_info(struct bnxt *bp)
3146 {
3147     int i;
3148 
3149     for (i = 0; i < bp->rx_nr_rings; i++) {
3150         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3151 
3152         kfree(rxr->rx_tpa_idx_map);
3153         rxr->rx_tpa_idx_map = NULL;
3154         if (rxr->rx_tpa) {
3155             kfree(rxr->rx_tpa[0].agg_arr);
3156             rxr->rx_tpa[0].agg_arr = NULL;
3157         }
3158         kfree(rxr->rx_tpa);
3159         rxr->rx_tpa = NULL;
3160     }
3161 }
3162 
3163 static int bnxt_alloc_tpa_info(struct bnxt *bp)
3164 {
3165     int i, j, total_aggs = 0;
3166 
3167     bp->max_tpa = MAX_TPA;
3168     if (bp->flags & BNXT_FLAG_CHIP_P5) {
3169         if (!bp->max_tpa_v2)
3170             return 0;
3171         bp->max_tpa = max_t(u16, bp->max_tpa_v2, MAX_TPA_P5);
3172         total_aggs = bp->max_tpa * MAX_SKB_FRAGS;
3173     }
3174 
3175     for (i = 0; i < bp->rx_nr_rings; i++) {
3176         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3177         struct rx_agg_cmp *agg;
3178 
3179         rxr->rx_tpa = kcalloc(bp->max_tpa, sizeof(struct bnxt_tpa_info),
3180                       GFP_KERNEL);
3181         if (!rxr->rx_tpa)
3182             return -ENOMEM;
3183 
3184         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3185             continue;
3186         agg = kcalloc(total_aggs, sizeof(*agg), GFP_KERNEL);
3187         rxr->rx_tpa[0].agg_arr = agg;
3188         if (!agg)
3189             return -ENOMEM;
3190         for (j = 1; j < bp->max_tpa; j++)
3191             rxr->rx_tpa[j].agg_arr = agg + j * MAX_SKB_FRAGS;
3192         rxr->rx_tpa_idx_map = kzalloc(sizeof(*rxr->rx_tpa_idx_map),
3193                           GFP_KERNEL);
3194         if (!rxr->rx_tpa_idx_map)
3195             return -ENOMEM;
3196     }
3197     return 0;
3198 }
3199 
3200 static void bnxt_free_rx_rings(struct bnxt *bp)
3201 {
3202     int i;
3203 
3204     if (!bp->rx_ring)
3205         return;
3206 
3207     bnxt_free_tpa_info(bp);
3208     for (i = 0; i < bp->rx_nr_rings; i++) {
3209         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3210         struct bnxt_ring_struct *ring;
3211 
3212         if (rxr->xdp_prog)
3213             bpf_prog_put(rxr->xdp_prog);
3214 
3215         if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
3216             xdp_rxq_info_unreg(&rxr->xdp_rxq);
3217 
3218         page_pool_destroy(rxr->page_pool);
3219         rxr->page_pool = NULL;
3220 
3221         kfree(rxr->rx_agg_bmap);
3222         rxr->rx_agg_bmap = NULL;
3223 
3224         ring = &rxr->rx_ring_struct;
3225         bnxt_free_ring(bp, &ring->ring_mem);
3226 
3227         ring = &rxr->rx_agg_ring_struct;
3228         bnxt_free_ring(bp, &ring->ring_mem);
3229     }
3230 }
3231 
3232 static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
3233                    struct bnxt_rx_ring_info *rxr)
3234 {
3235     struct page_pool_params pp = { 0 };
3236 
3237     pp.pool_size = bp->rx_ring_size;
3238     pp.nid = dev_to_node(&bp->pdev->dev);
3239     pp.dev = &bp->pdev->dev;
3240     pp.dma_dir = DMA_BIDIRECTIONAL;
3241 
3242     rxr->page_pool = page_pool_create(&pp);
3243     if (IS_ERR(rxr->page_pool)) {
3244         int err = PTR_ERR(rxr->page_pool);
3245 
3246         rxr->page_pool = NULL;
3247         return err;
3248     }
3249     return 0;
3250 }
3251 
3252 static int bnxt_alloc_rx_rings(struct bnxt *bp)
3253 {
3254     int i, rc = 0, agg_rings = 0;
3255 
3256     if (!bp->rx_ring)
3257         return -ENOMEM;
3258 
3259     if (bp->flags & BNXT_FLAG_AGG_RINGS)
3260         agg_rings = 1;
3261 
3262     for (i = 0; i < bp->rx_nr_rings; i++) {
3263         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3264         struct bnxt_ring_struct *ring;
3265 
3266         ring = &rxr->rx_ring_struct;
3267 
3268         rc = bnxt_alloc_rx_page_pool(bp, rxr);
3269         if (rc)
3270             return rc;
3271 
3272         rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i, 0);
3273         if (rc < 0)
3274             return rc;
3275 
3276         rc = xdp_rxq_info_reg_mem_model(&rxr->xdp_rxq,
3277                         MEM_TYPE_PAGE_POOL,
3278                         rxr->page_pool);
3279         if (rc) {
3280             xdp_rxq_info_unreg(&rxr->xdp_rxq);
3281             return rc;
3282         }
3283 
3284         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3285         if (rc)
3286             return rc;
3287 
3288         ring->grp_idx = i;
3289         if (agg_rings) {
3290             u16 mem_size;
3291 
3292             ring = &rxr->rx_agg_ring_struct;
3293             rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3294             if (rc)
3295                 return rc;
3296 
3297             ring->grp_idx = i;
3298             rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
3299             mem_size = rxr->rx_agg_bmap_size / 8;
3300             rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
3301             if (!rxr->rx_agg_bmap)
3302                 return -ENOMEM;
3303         }
3304     }
3305     if (bp->flags & BNXT_FLAG_TPA)
3306         rc = bnxt_alloc_tpa_info(bp);
3307     return rc;
3308 }
3309 
3310 static void bnxt_free_tx_rings(struct bnxt *bp)
3311 {
3312     int i;
3313     struct pci_dev *pdev = bp->pdev;
3314 
3315     if (!bp->tx_ring)
3316         return;
3317 
3318     for (i = 0; i < bp->tx_nr_rings; i++) {
3319         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3320         struct bnxt_ring_struct *ring;
3321 
3322         if (txr->tx_push) {
3323             dma_free_coherent(&pdev->dev, bp->tx_push_size,
3324                       txr->tx_push, txr->tx_push_mapping);
3325             txr->tx_push = NULL;
3326         }
3327 
3328         ring = &txr->tx_ring_struct;
3329 
3330         bnxt_free_ring(bp, &ring->ring_mem);
3331     }
3332 }
3333 
3334 static int bnxt_alloc_tx_rings(struct bnxt *bp)
3335 {
3336     int i, j, rc;
3337     struct pci_dev *pdev = bp->pdev;
3338 
3339     bp->tx_push_size = 0;
3340     if (bp->tx_push_thresh) {
3341         int push_size;
3342 
3343         push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
3344                     bp->tx_push_thresh);
3345 
3346         if (push_size > 256) {
3347             push_size = 0;
3348             bp->tx_push_thresh = 0;
3349         }
3350 
3351         bp->tx_push_size = push_size;
3352     }
3353 
3354     for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
3355         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3356         struct bnxt_ring_struct *ring;
3357         u8 qidx;
3358 
3359         ring = &txr->tx_ring_struct;
3360 
3361         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3362         if (rc)
3363             return rc;
3364 
3365         ring->grp_idx = txr->bnapi->index;
3366         if (bp->tx_push_size) {
3367             dma_addr_t mapping;
3368 
3369             /* One pre-allocated DMA buffer to backup
3370              * TX push operation
3371              */
3372             txr->tx_push = dma_alloc_coherent(&pdev->dev,
3373                         bp->tx_push_size,
3374                         &txr->tx_push_mapping,
3375                         GFP_KERNEL);
3376 
3377             if (!txr->tx_push)
3378                 return -ENOMEM;
3379 
3380             mapping = txr->tx_push_mapping +
3381                 sizeof(struct tx_push_bd);
3382             txr->data_mapping = cpu_to_le64(mapping);
3383         }
3384         qidx = bp->tc_to_qidx[j];
3385         ring->queue_id = bp->q_info[qidx].queue_id;
3386         spin_lock_init(&txr->xdp_tx_lock);
3387         if (i < bp->tx_nr_rings_xdp)
3388             continue;
3389         if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
3390             j++;
3391     }
3392     return 0;
3393 }
3394 
3395 static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr)
3396 {
3397     struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3398 
3399     kfree(cpr->cp_desc_ring);
3400     cpr->cp_desc_ring = NULL;
3401     ring->ring_mem.pg_arr = NULL;
3402     kfree(cpr->cp_desc_mapping);
3403     cpr->cp_desc_mapping = NULL;
3404     ring->ring_mem.dma_arr = NULL;
3405 }
3406 
3407 static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n)
3408 {
3409     cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL);
3410     if (!cpr->cp_desc_ring)
3411         return -ENOMEM;
3412     cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping),
3413                        GFP_KERNEL);
3414     if (!cpr->cp_desc_mapping)
3415         return -ENOMEM;
3416     return 0;
3417 }
3418 
3419 static void bnxt_free_all_cp_arrays(struct bnxt *bp)
3420 {
3421     int i;
3422 
3423     if (!bp->bnapi)
3424         return;
3425     for (i = 0; i < bp->cp_nr_rings; i++) {
3426         struct bnxt_napi *bnapi = bp->bnapi[i];
3427 
3428         if (!bnapi)
3429             continue;
3430         bnxt_free_cp_arrays(&bnapi->cp_ring);
3431     }
3432 }
3433 
3434 static int bnxt_alloc_all_cp_arrays(struct bnxt *bp)
3435 {
3436     int i, n = bp->cp_nr_pages;
3437 
3438     for (i = 0; i < bp->cp_nr_rings; i++) {
3439         struct bnxt_napi *bnapi = bp->bnapi[i];
3440         int rc;
3441 
3442         if (!bnapi)
3443             continue;
3444         rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n);
3445         if (rc)
3446             return rc;
3447     }
3448     return 0;
3449 }
3450 
3451 static void bnxt_free_cp_rings(struct bnxt *bp)
3452 {
3453     int i;
3454 
3455     if (!bp->bnapi)
3456         return;
3457 
3458     for (i = 0; i < bp->cp_nr_rings; i++) {
3459         struct bnxt_napi *bnapi = bp->bnapi[i];
3460         struct bnxt_cp_ring_info *cpr;
3461         struct bnxt_ring_struct *ring;
3462         int j;
3463 
3464         if (!bnapi)
3465             continue;
3466 
3467         cpr = &bnapi->cp_ring;
3468         ring = &cpr->cp_ring_struct;
3469 
3470         bnxt_free_ring(bp, &ring->ring_mem);
3471 
3472         for (j = 0; j < 2; j++) {
3473             struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3474 
3475             if (cpr2) {
3476                 ring = &cpr2->cp_ring_struct;
3477                 bnxt_free_ring(bp, &ring->ring_mem);
3478                 bnxt_free_cp_arrays(cpr2);
3479                 kfree(cpr2);
3480                 cpr->cp_ring_arr[j] = NULL;
3481             }
3482         }
3483     }
3484 }
3485 
3486 static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
3487 {
3488     struct bnxt_ring_mem_info *rmem;
3489     struct bnxt_ring_struct *ring;
3490     struct bnxt_cp_ring_info *cpr;
3491     int rc;
3492 
3493     cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
3494     if (!cpr)
3495         return NULL;
3496 
3497     rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages);
3498     if (rc) {
3499         bnxt_free_cp_arrays(cpr);
3500         kfree(cpr);
3501         return NULL;
3502     }
3503     ring = &cpr->cp_ring_struct;
3504     rmem = &ring->ring_mem;
3505     rmem->nr_pages = bp->cp_nr_pages;
3506     rmem->page_size = HW_CMPD_RING_SIZE;
3507     rmem->pg_arr = (void **)cpr->cp_desc_ring;
3508     rmem->dma_arr = cpr->cp_desc_mapping;
3509     rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
3510     rc = bnxt_alloc_ring(bp, rmem);
3511     if (rc) {
3512         bnxt_free_ring(bp, rmem);
3513         bnxt_free_cp_arrays(cpr);
3514         kfree(cpr);
3515         cpr = NULL;
3516     }
3517     return cpr;
3518 }
3519 
3520 static int bnxt_alloc_cp_rings(struct bnxt *bp)
3521 {
3522     bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
3523     int i, rc, ulp_base_vec, ulp_msix;
3524 
3525     ulp_msix = bnxt_get_ulp_msix_num(bp);
3526     ulp_base_vec = bnxt_get_ulp_msix_base(bp);
3527     for (i = 0; i < bp->cp_nr_rings; i++) {
3528         struct bnxt_napi *bnapi = bp->bnapi[i];
3529         struct bnxt_cp_ring_info *cpr;
3530         struct bnxt_ring_struct *ring;
3531 
3532         if (!bnapi)
3533             continue;
3534 
3535         cpr = &bnapi->cp_ring;
3536         cpr->bnapi = bnapi;
3537         ring = &cpr->cp_ring_struct;
3538 
3539         rc = bnxt_alloc_ring(bp, &ring->ring_mem);
3540         if (rc)
3541             return rc;
3542 
3543         if (ulp_msix && i >= ulp_base_vec)
3544             ring->map_idx = i + ulp_msix;
3545         else
3546             ring->map_idx = i;
3547 
3548         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
3549             continue;
3550 
3551         if (i < bp->rx_nr_rings) {
3552             struct bnxt_cp_ring_info *cpr2 =
3553                 bnxt_alloc_cp_sub_ring(bp);
3554 
3555             cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
3556             if (!cpr2)
3557                 return -ENOMEM;
3558             cpr2->bnapi = bnapi;
3559         }
3560         if ((sh && i < bp->tx_nr_rings) ||
3561             (!sh && i >= bp->rx_nr_rings)) {
3562             struct bnxt_cp_ring_info *cpr2 =
3563                 bnxt_alloc_cp_sub_ring(bp);
3564 
3565             cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
3566             if (!cpr2)
3567                 return -ENOMEM;
3568             cpr2->bnapi = bnapi;
3569         }
3570     }
3571     return 0;
3572 }
3573 
3574 static void bnxt_init_ring_struct(struct bnxt *bp)
3575 {
3576     int i;
3577 
3578     for (i = 0; i < bp->cp_nr_rings; i++) {
3579         struct bnxt_napi *bnapi = bp->bnapi[i];
3580         struct bnxt_ring_mem_info *rmem;
3581         struct bnxt_cp_ring_info *cpr;
3582         struct bnxt_rx_ring_info *rxr;
3583         struct bnxt_tx_ring_info *txr;
3584         struct bnxt_ring_struct *ring;
3585 
3586         if (!bnapi)
3587             continue;
3588 
3589         cpr = &bnapi->cp_ring;
3590         ring = &cpr->cp_ring_struct;
3591         rmem = &ring->ring_mem;
3592         rmem->nr_pages = bp->cp_nr_pages;
3593         rmem->page_size = HW_CMPD_RING_SIZE;
3594         rmem->pg_arr = (void **)cpr->cp_desc_ring;
3595         rmem->dma_arr = cpr->cp_desc_mapping;
3596         rmem->vmem_size = 0;
3597 
3598         rxr = bnapi->rx_ring;
3599         if (!rxr)
3600             goto skip_rx;
3601 
3602         ring = &rxr->rx_ring_struct;
3603         rmem = &ring->ring_mem;
3604         rmem->nr_pages = bp->rx_nr_pages;
3605         rmem->page_size = HW_RXBD_RING_SIZE;
3606         rmem->pg_arr = (void **)rxr->rx_desc_ring;
3607         rmem->dma_arr = rxr->rx_desc_mapping;
3608         rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
3609         rmem->vmem = (void **)&rxr->rx_buf_ring;
3610 
3611         ring = &rxr->rx_agg_ring_struct;
3612         rmem = &ring->ring_mem;
3613         rmem->nr_pages = bp->rx_agg_nr_pages;
3614         rmem->page_size = HW_RXBD_RING_SIZE;
3615         rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
3616         rmem->dma_arr = rxr->rx_agg_desc_mapping;
3617         rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
3618         rmem->vmem = (void **)&rxr->rx_agg_ring;
3619 
3620 skip_rx:
3621         txr = bnapi->tx_ring;
3622         if (!txr)
3623             continue;
3624 
3625         ring = &txr->tx_ring_struct;
3626         rmem = &ring->ring_mem;
3627         rmem->nr_pages = bp->tx_nr_pages;
3628         rmem->page_size = HW_RXBD_RING_SIZE;
3629         rmem->pg_arr = (void **)txr->tx_desc_ring;
3630         rmem->dma_arr = txr->tx_desc_mapping;
3631         rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
3632         rmem->vmem = (void **)&txr->tx_buf_ring;
3633     }
3634 }
3635 
3636 static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
3637 {
3638     int i;
3639     u32 prod;
3640     struct rx_bd **rx_buf_ring;
3641 
3642     rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
3643     for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
3644         int j;
3645         struct rx_bd *rxbd;
3646 
3647         rxbd = rx_buf_ring[i];
3648         if (!rxbd)
3649             continue;
3650 
3651         for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
3652             rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
3653             rxbd->rx_bd_opaque = prod;
3654         }
3655     }
3656 }
3657 
3658 static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
3659 {
3660     struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
3661     struct net_device *dev = bp->dev;
3662     u32 prod;
3663     int i;
3664 
3665     prod = rxr->rx_prod;
3666     for (i = 0; i < bp->rx_ring_size; i++) {
3667         if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
3668             netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
3669                     ring_nr, i, bp->rx_ring_size);
3670             break;
3671         }
3672         prod = NEXT_RX(prod);
3673     }
3674     rxr->rx_prod = prod;
3675 
3676     if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
3677         return 0;
3678 
3679     prod = rxr->rx_agg_prod;
3680     for (i = 0; i < bp->rx_agg_ring_size; i++) {
3681         if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
3682             netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
3683                     ring_nr, i, bp->rx_ring_size);
3684             break;
3685         }
3686         prod = NEXT_RX_AGG(prod);
3687     }
3688     rxr->rx_agg_prod = prod;
3689 
3690     if (rxr->rx_tpa) {
3691         dma_addr_t mapping;
3692         u8 *data;
3693 
3694         for (i = 0; i < bp->max_tpa; i++) {
3695             data = __bnxt_alloc_rx_frag(bp, &mapping, GFP_KERNEL);
3696             if (!data)
3697                 return -ENOMEM;
3698 
3699             rxr->rx_tpa[i].data = data;
3700             rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
3701             rxr->rx_tpa[i].mapping = mapping;
3702         }
3703     }
3704     return 0;
3705 }
3706 
3707 static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
3708 {
3709     struct bnxt_rx_ring_info *rxr;
3710     struct bnxt_ring_struct *ring;
3711     u32 type;
3712 
3713     type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
3714         RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
3715 
3716     if (NET_IP_ALIGN == 2)
3717         type |= RX_BD_FLAGS_SOP;
3718 
3719     rxr = &bp->rx_ring[ring_nr];
3720     ring = &rxr->rx_ring_struct;
3721     bnxt_init_rxbd_pages(ring, type);
3722 
3723     if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
3724         bpf_prog_add(bp->xdp_prog, 1);
3725         rxr->xdp_prog = bp->xdp_prog;
3726     }
3727     ring->fw_ring_id = INVALID_HW_RING_ID;
3728 
3729     ring = &rxr->rx_agg_ring_struct;
3730     ring->fw_ring_id = INVALID_HW_RING_ID;
3731 
3732     if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
3733         type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
3734             RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
3735 
3736         bnxt_init_rxbd_pages(ring, type);
3737     }
3738 
3739     return bnxt_alloc_one_rx_ring(bp, ring_nr);
3740 }
3741 
3742 static void bnxt_init_cp_rings(struct bnxt *bp)
3743 {
3744     int i, j;
3745 
3746     for (i = 0; i < bp->cp_nr_rings; i++) {
3747         struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
3748         struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3749 
3750         ring->fw_ring_id = INVALID_HW_RING_ID;
3751         cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3752         cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3753         for (j = 0; j < 2; j++) {
3754             struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
3755 
3756             if (!cpr2)
3757                 continue;
3758 
3759             ring = &cpr2->cp_ring_struct;
3760             ring->fw_ring_id = INVALID_HW_RING_ID;
3761             cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
3762             cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
3763         }
3764     }
3765 }
3766 
3767 static int bnxt_init_rx_rings(struct bnxt *bp)
3768 {
3769     int i, rc = 0;
3770 
3771     if (BNXT_RX_PAGE_MODE(bp)) {
3772         bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
3773         bp->rx_dma_offset = XDP_PACKET_HEADROOM;
3774     } else {
3775         bp->rx_offset = BNXT_RX_OFFSET;
3776         bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
3777     }
3778 
3779     for (i = 0; i < bp->rx_nr_rings; i++) {
3780         rc = bnxt_init_one_rx_ring(bp, i);
3781         if (rc)
3782             break;
3783     }
3784 
3785     return rc;
3786 }
3787 
3788 static int bnxt_init_tx_rings(struct bnxt *bp)
3789 {
3790     u16 i;
3791 
3792     bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
3793                    BNXT_MIN_TX_DESC_CNT);
3794 
3795     for (i = 0; i < bp->tx_nr_rings; i++) {
3796         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3797         struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3798 
3799         ring->fw_ring_id = INVALID_HW_RING_ID;
3800     }
3801 
3802     return 0;
3803 }
3804 
3805 static void bnxt_free_ring_grps(struct bnxt *bp)
3806 {
3807     kfree(bp->grp_info);
3808     bp->grp_info = NULL;
3809 }
3810 
3811 static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
3812 {
3813     int i;
3814 
3815     if (irq_re_init) {
3816         bp->grp_info = kcalloc(bp->cp_nr_rings,
3817                        sizeof(struct bnxt_ring_grp_info),
3818                        GFP_KERNEL);
3819         if (!bp->grp_info)
3820             return -ENOMEM;
3821     }
3822     for (i = 0; i < bp->cp_nr_rings; i++) {
3823         if (irq_re_init)
3824             bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
3825         bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3826         bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
3827         bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
3828         bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
3829     }
3830     return 0;
3831 }
3832 
3833 static void bnxt_free_vnics(struct bnxt *bp)
3834 {
3835     kfree(bp->vnic_info);
3836     bp->vnic_info = NULL;
3837     bp->nr_vnics = 0;
3838 }
3839 
3840 static int bnxt_alloc_vnics(struct bnxt *bp)
3841 {
3842     int num_vnics = 1;
3843 
3844 #ifdef CONFIG_RFS_ACCEL
3845     if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
3846         num_vnics += bp->rx_nr_rings;
3847 #endif
3848 
3849     if (BNXT_CHIP_TYPE_NITRO_A0(bp))
3850         num_vnics++;
3851 
3852     bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
3853                 GFP_KERNEL);
3854     if (!bp->vnic_info)
3855         return -ENOMEM;
3856 
3857     bp->nr_vnics = num_vnics;
3858     return 0;
3859 }
3860 
3861 static void bnxt_init_vnics(struct bnxt *bp)
3862 {
3863     int i;
3864 
3865     for (i = 0; i < bp->nr_vnics; i++) {
3866         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3867         int j;
3868 
3869         vnic->fw_vnic_id = INVALID_HW_RING_ID;
3870         for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3871             vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3872 
3873         vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3874 
3875         if (bp->vnic_info[i].rss_hash_key) {
3876             if (i == 0)
3877                 prandom_bytes(vnic->rss_hash_key,
3878                           HW_HASH_KEY_SIZE);
3879             else
3880                 memcpy(vnic->rss_hash_key,
3881                        bp->vnic_info[0].rss_hash_key,
3882                        HW_HASH_KEY_SIZE);
3883         }
3884     }
3885 }
3886 
3887 static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3888 {
3889     int pages;
3890 
3891     pages = ring_size / desc_per_pg;
3892 
3893     if (!pages)
3894         return 1;
3895 
3896     pages++;
3897 
3898     while (pages & (pages - 1))
3899         pages++;
3900 
3901     return pages;
3902 }
3903 
3904 void bnxt_set_tpa_flags(struct bnxt *bp)
3905 {
3906     bp->flags &= ~BNXT_FLAG_TPA;
3907     if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3908         return;
3909     if (bp->dev->features & NETIF_F_LRO)
3910         bp->flags |= BNXT_FLAG_LRO;
3911     else if (bp->dev->features & NETIF_F_GRO_HW)
3912         bp->flags |= BNXT_FLAG_GRO;
3913 }
3914 
3915 /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3916  * be set on entry.
3917  */
3918 void bnxt_set_ring_params(struct bnxt *bp)
3919 {
3920     u32 ring_size, rx_size, rx_space, max_rx_cmpl;
3921     u32 agg_factor = 0, agg_ring_size = 0;
3922 
3923     /* 8 for CRC and VLAN */
3924     rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3925 
3926     rx_space = rx_size + ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) +
3927         SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3928 
3929     bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3930     ring_size = bp->rx_ring_size;
3931     bp->rx_agg_ring_size = 0;
3932     bp->rx_agg_nr_pages = 0;
3933 
3934     if (bp->flags & BNXT_FLAG_TPA)
3935         agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
3936 
3937     bp->flags &= ~BNXT_FLAG_JUMBO;
3938     if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
3939         u32 jumbo_factor;
3940 
3941         bp->flags |= BNXT_FLAG_JUMBO;
3942         jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3943         if (jumbo_factor > agg_factor)
3944             agg_factor = jumbo_factor;
3945     }
3946     if (agg_factor) {
3947         if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) {
3948             ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA;
3949             netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n",
3950                     bp->rx_ring_size, ring_size);
3951             bp->rx_ring_size = ring_size;
3952         }
3953         agg_ring_size = ring_size * agg_factor;
3954 
3955         bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3956                             RX_DESC_CNT);
3957         if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3958             u32 tmp = agg_ring_size;
3959 
3960             bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3961             agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3962             netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3963                     tmp, agg_ring_size);
3964         }
3965         bp->rx_agg_ring_size = agg_ring_size;
3966         bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3967 
3968         if (BNXT_RX_PAGE_MODE(bp)) {
3969             rx_space = BNXT_PAGE_MODE_BUF_SIZE;
3970             rx_size = BNXT_MAX_PAGE_MODE_MTU;
3971         } else {
3972             rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3973             rx_space = rx_size + NET_SKB_PAD +
3974                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3975         }
3976     }
3977 
3978     bp->rx_buf_use_size = rx_size;
3979     bp->rx_buf_size = rx_space;
3980 
3981     bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3982     bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3983 
3984     ring_size = bp->tx_ring_size;
3985     bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3986     bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3987 
3988     max_rx_cmpl = bp->rx_ring_size;
3989     /* MAX TPA needs to be added because TPA_START completions are
3990      * immediately recycled, so the TPA completions are not bound by
3991      * the RX ring size.
3992      */
3993     if (bp->flags & BNXT_FLAG_TPA)
3994         max_rx_cmpl += bp->max_tpa;
3995     /* RX and TPA completions are 32-byte, all others are 16-byte */
3996     ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
3997     bp->cp_ring_size = ring_size;
3998 
3999     bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
4000     if (bp->cp_nr_pages > MAX_CP_PAGES) {
4001         bp->cp_nr_pages = MAX_CP_PAGES;
4002         bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
4003         netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
4004                 ring_size, bp->cp_ring_size);
4005     }
4006     bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
4007     bp->cp_ring_mask = bp->cp_bit - 1;
4008 }
4009 
4010 /* Changing allocation mode of RX rings.
4011  * TODO: Update when extending xdp_rxq_info to support allocation modes.
4012  */
4013 int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
4014 {
4015     if (page_mode) {
4016         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
4017         bp->flags |= BNXT_FLAG_RX_PAGE_MODE;
4018 
4019         if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
4020             bp->flags |= BNXT_FLAG_JUMBO;
4021             bp->rx_skb_func = bnxt_rx_multi_page_skb;
4022             bp->dev->max_mtu =
4023                 min_t(u16, bp->max_mtu, BNXT_MAX_MTU);
4024         } else {
4025             bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
4026             bp->rx_skb_func = bnxt_rx_page_skb;
4027             bp->dev->max_mtu =
4028                 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
4029         }
4030         bp->rx_dir = DMA_BIDIRECTIONAL;
4031         /* Disable LRO or GRO_HW */
4032         netdev_update_features(bp->dev);
4033     } else {
4034         bp->dev->max_mtu = bp->max_mtu;
4035         bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
4036         bp->rx_dir = DMA_FROM_DEVICE;
4037         bp->rx_skb_func = bnxt_rx_skb;
4038     }
4039     return 0;
4040 }
4041 
4042 static void bnxt_free_vnic_attributes(struct bnxt *bp)
4043 {
4044     int i;
4045     struct bnxt_vnic_info *vnic;
4046     struct pci_dev *pdev = bp->pdev;
4047 
4048     if (!bp->vnic_info)
4049         return;
4050 
4051     for (i = 0; i < bp->nr_vnics; i++) {
4052         vnic = &bp->vnic_info[i];
4053 
4054         kfree(vnic->fw_grp_ids);
4055         vnic->fw_grp_ids = NULL;
4056 
4057         kfree(vnic->uc_list);
4058         vnic->uc_list = NULL;
4059 
4060         if (vnic->mc_list) {
4061             dma_free_coherent(&pdev->dev, vnic->mc_list_size,
4062                       vnic->mc_list, vnic->mc_list_mapping);
4063             vnic->mc_list = NULL;
4064         }
4065 
4066         if (vnic->rss_table) {
4067             dma_free_coherent(&pdev->dev, vnic->rss_table_size,
4068                       vnic->rss_table,
4069                       vnic->rss_table_dma_addr);
4070             vnic->rss_table = NULL;
4071         }
4072 
4073         vnic->rss_hash_key = NULL;
4074         vnic->flags = 0;
4075     }
4076 }
4077 
4078 static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
4079 {
4080     int i, rc = 0, size;
4081     struct bnxt_vnic_info *vnic;
4082     struct pci_dev *pdev = bp->pdev;
4083     int max_rings;
4084 
4085     for (i = 0; i < bp->nr_vnics; i++) {
4086         vnic = &bp->vnic_info[i];
4087 
4088         if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
4089             int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
4090 
4091             if (mem_size > 0) {
4092                 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
4093                 if (!vnic->uc_list) {
4094                     rc = -ENOMEM;
4095                     goto out;
4096                 }
4097             }
4098         }
4099 
4100         if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
4101             vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
4102             vnic->mc_list =
4103                 dma_alloc_coherent(&pdev->dev,
4104                            vnic->mc_list_size,
4105                            &vnic->mc_list_mapping,
4106                            GFP_KERNEL);
4107             if (!vnic->mc_list) {
4108                 rc = -ENOMEM;
4109                 goto out;
4110             }
4111         }
4112 
4113         if (bp->flags & BNXT_FLAG_CHIP_P5)
4114             goto vnic_skip_grps;
4115 
4116         if (vnic->flags & BNXT_VNIC_RSS_FLAG)
4117             max_rings = bp->rx_nr_rings;
4118         else
4119             max_rings = 1;
4120 
4121         vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
4122         if (!vnic->fw_grp_ids) {
4123             rc = -ENOMEM;
4124             goto out;
4125         }
4126 vnic_skip_grps:
4127         if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
4128             !(vnic->flags & BNXT_VNIC_RSS_FLAG))
4129             continue;
4130 
4131         /* Allocate rss table and hash key */
4132         size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
4133         if (bp->flags & BNXT_FLAG_CHIP_P5)
4134             size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
4135 
4136         vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
4137         vnic->rss_table = dma_alloc_coherent(&pdev->dev,
4138                              vnic->rss_table_size,
4139                              &vnic->rss_table_dma_addr,
4140                              GFP_KERNEL);
4141         if (!vnic->rss_table) {
4142             rc = -ENOMEM;
4143             goto out;
4144         }
4145 
4146         vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
4147         vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
4148     }
4149     return 0;
4150 
4151 out:
4152     return rc;
4153 }
4154 
4155 static void bnxt_free_hwrm_resources(struct bnxt *bp)
4156 {
4157     struct bnxt_hwrm_wait_token *token;
4158 
4159     dma_pool_destroy(bp->hwrm_dma_pool);
4160     bp->hwrm_dma_pool = NULL;
4161 
4162     rcu_read_lock();
4163     hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node)
4164         WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED);
4165     rcu_read_unlock();
4166 }
4167 
4168 static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
4169 {
4170     bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev,
4171                         BNXT_HWRM_DMA_SIZE,
4172                         BNXT_HWRM_DMA_ALIGN, 0);
4173     if (!bp->hwrm_dma_pool)
4174         return -ENOMEM;
4175 
4176     INIT_HLIST_HEAD(&bp->hwrm_pending_list);
4177 
4178     return 0;
4179 }
4180 
4181 static void bnxt_free_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats)
4182 {
4183     kfree(stats->hw_masks);
4184     stats->hw_masks = NULL;
4185     kfree(stats->sw_stats);
4186     stats->sw_stats = NULL;
4187     if (stats->hw_stats) {
4188         dma_free_coherent(&bp->pdev->dev, stats->len, stats->hw_stats,
4189                   stats->hw_stats_map);
4190         stats->hw_stats = NULL;
4191     }
4192 }
4193 
4194 static int bnxt_alloc_stats_mem(struct bnxt *bp, struct bnxt_stats_mem *stats,
4195                 bool alloc_masks)
4196 {
4197     stats->hw_stats = dma_alloc_coherent(&bp->pdev->dev, stats->len,
4198                          &stats->hw_stats_map, GFP_KERNEL);
4199     if (!stats->hw_stats)
4200         return -ENOMEM;
4201 
4202     stats->sw_stats = kzalloc(stats->len, GFP_KERNEL);
4203     if (!stats->sw_stats)
4204         goto stats_mem_err;
4205 
4206     if (alloc_masks) {
4207         stats->hw_masks = kzalloc(stats->len, GFP_KERNEL);
4208         if (!stats->hw_masks)
4209             goto stats_mem_err;
4210     }
4211     return 0;
4212 
4213 stats_mem_err:
4214     bnxt_free_stats_mem(bp, stats);
4215     return -ENOMEM;
4216 }
4217 
4218 static void bnxt_fill_masks(u64 *mask_arr, u64 mask, int count)
4219 {
4220     int i;
4221 
4222     for (i = 0; i < count; i++)
4223         mask_arr[i] = mask;
4224 }
4225 
4226 static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count)
4227 {
4228     int i;
4229 
4230     for (i = 0; i < count; i++)
4231         mask_arr[i] = le64_to_cpu(hw_mask_arr[i]);
4232 }
4233 
4234 static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp,
4235                     struct bnxt_stats_mem *stats)
4236 {
4237     struct hwrm_func_qstats_ext_output *resp;
4238     struct hwrm_func_qstats_ext_input *req;
4239     __le64 *hw_masks;
4240     int rc;
4241 
4242     if (!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED) ||
4243         !(bp->flags & BNXT_FLAG_CHIP_P5))
4244         return -EOPNOTSUPP;
4245 
4246     rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT);
4247     if (rc)
4248         return rc;
4249 
4250     req->fid = cpu_to_le16(0xffff);
4251     req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4252 
4253     resp = hwrm_req_hold(bp, req);
4254     rc = hwrm_req_send(bp, req);
4255     if (!rc) {
4256         hw_masks = &resp->rx_ucast_pkts;
4257         bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8);
4258     }
4259     hwrm_req_drop(bp, req);
4260     return rc;
4261 }
4262 
4263 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags);
4264 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags);
4265 
4266 static void bnxt_init_stats(struct bnxt *bp)
4267 {
4268     struct bnxt_napi *bnapi = bp->bnapi[0];
4269     struct bnxt_cp_ring_info *cpr;
4270     struct bnxt_stats_mem *stats;
4271     __le64 *rx_stats, *tx_stats;
4272     int rc, rx_count, tx_count;
4273     u64 *rx_masks, *tx_masks;
4274     u64 mask;
4275     u8 flags;
4276 
4277     cpr = &bnapi->cp_ring;
4278     stats = &cpr->stats;
4279     rc = bnxt_hwrm_func_qstat_ext(bp, stats);
4280     if (rc) {
4281         if (bp->flags & BNXT_FLAG_CHIP_P5)
4282             mask = (1ULL << 48) - 1;
4283         else
4284             mask = -1ULL;
4285         bnxt_fill_masks(stats->hw_masks, mask, stats->len / 8);
4286     }
4287     if (bp->flags & BNXT_FLAG_PORT_STATS) {
4288         stats = &bp->port_stats;
4289         rx_stats = stats->hw_stats;
4290         rx_masks = stats->hw_masks;
4291         rx_count = sizeof(struct rx_port_stats) / 8;
4292         tx_stats = rx_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4293         tx_masks = rx_masks + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
4294         tx_count = sizeof(struct tx_port_stats) / 8;
4295 
4296         flags = PORT_QSTATS_REQ_FLAGS_COUNTER_MASK;
4297         rc = bnxt_hwrm_port_qstats(bp, flags);
4298         if (rc) {
4299             mask = (1ULL << 40) - 1;
4300 
4301             bnxt_fill_masks(rx_masks, mask, rx_count);
4302             bnxt_fill_masks(tx_masks, mask, tx_count);
4303         } else {
4304             bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4305             bnxt_copy_hw_masks(tx_masks, tx_stats, tx_count);
4306             bnxt_hwrm_port_qstats(bp, 0);
4307         }
4308     }
4309     if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
4310         stats = &bp->rx_port_stats_ext;
4311         rx_stats = stats->hw_stats;
4312         rx_masks = stats->hw_masks;
4313         rx_count = sizeof(struct rx_port_stats_ext) / 8;
4314         stats = &bp->tx_port_stats_ext;
4315         tx_stats = stats->hw_stats;
4316         tx_masks = stats->hw_masks;
4317         tx_count = sizeof(struct tx_port_stats_ext) / 8;
4318 
4319         flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK;
4320         rc = bnxt_hwrm_port_qstats_ext(bp, flags);
4321         if (rc) {
4322             mask = (1ULL << 40) - 1;
4323 
4324             bnxt_fill_masks(rx_masks, mask, rx_count);
4325             if (tx_stats)
4326                 bnxt_fill_masks(tx_masks, mask, tx_count);
4327         } else {
4328             bnxt_copy_hw_masks(rx_masks, rx_stats, rx_count);
4329             if (tx_stats)
4330                 bnxt_copy_hw_masks(tx_masks, tx_stats,
4331                            tx_count);
4332             bnxt_hwrm_port_qstats_ext(bp, 0);
4333         }
4334     }
4335 }
4336 
4337 static void bnxt_free_port_stats(struct bnxt *bp)
4338 {
4339     bp->flags &= ~BNXT_FLAG_PORT_STATS;
4340     bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
4341 
4342     bnxt_free_stats_mem(bp, &bp->port_stats);
4343     bnxt_free_stats_mem(bp, &bp->rx_port_stats_ext);
4344     bnxt_free_stats_mem(bp, &bp->tx_port_stats_ext);
4345 }
4346 
4347 static void bnxt_free_ring_stats(struct bnxt *bp)
4348 {
4349     int i;
4350 
4351     if (!bp->bnapi)
4352         return;
4353 
4354     for (i = 0; i < bp->cp_nr_rings; i++) {
4355         struct bnxt_napi *bnapi = bp->bnapi[i];
4356         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4357 
4358         bnxt_free_stats_mem(bp, &cpr->stats);
4359     }
4360 }
4361 
4362 static int bnxt_alloc_stats(struct bnxt *bp)
4363 {
4364     u32 size, i;
4365     int rc;
4366 
4367     size = bp->hw_ring_stats_size;
4368 
4369     for (i = 0; i < bp->cp_nr_rings; i++) {
4370         struct bnxt_napi *bnapi = bp->bnapi[i];
4371         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4372 
4373         cpr->stats.len = size;
4374         rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
4375         if (rc)
4376             return rc;
4377 
4378         cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
4379     }
4380 
4381     if (BNXT_VF(bp) || bp->chip_num == CHIP_NUM_58700)
4382         return 0;
4383 
4384     if (bp->port_stats.hw_stats)
4385         goto alloc_ext_stats;
4386 
4387     bp->port_stats.len = BNXT_PORT_STATS_SIZE;
4388     rc = bnxt_alloc_stats_mem(bp, &bp->port_stats, true);
4389     if (rc)
4390         return rc;
4391 
4392     bp->flags |= BNXT_FLAG_PORT_STATS;
4393 
4394 alloc_ext_stats:
4395     /* Display extended statistics only if FW supports it */
4396     if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900)
4397         if (!(bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED))
4398             return 0;
4399 
4400     if (bp->rx_port_stats_ext.hw_stats)
4401         goto alloc_tx_ext_stats;
4402 
4403     bp->rx_port_stats_ext.len = sizeof(struct rx_port_stats_ext);
4404     rc = bnxt_alloc_stats_mem(bp, &bp->rx_port_stats_ext, true);
4405     /* Extended stats are optional */
4406     if (rc)
4407         return 0;
4408 
4409 alloc_tx_ext_stats:
4410     if (bp->tx_port_stats_ext.hw_stats)
4411         return 0;
4412 
4413     if (bp->hwrm_spec_code >= 0x10902 ||
4414         (bp->fw_cap & BNXT_FW_CAP_EXT_STATS_SUPPORTED)) {
4415         bp->tx_port_stats_ext.len = sizeof(struct tx_port_stats_ext);
4416         rc = bnxt_alloc_stats_mem(bp, &bp->tx_port_stats_ext, true);
4417         /* Extended stats are optional */
4418         if (rc)
4419             return 0;
4420     }
4421     bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
4422     return 0;
4423 }
4424 
4425 static void bnxt_clear_ring_indices(struct bnxt *bp)
4426 {
4427     int i;
4428 
4429     if (!bp->bnapi)
4430         return;
4431 
4432     for (i = 0; i < bp->cp_nr_rings; i++) {
4433         struct bnxt_napi *bnapi = bp->bnapi[i];
4434         struct bnxt_cp_ring_info *cpr;
4435         struct bnxt_rx_ring_info *rxr;
4436         struct bnxt_tx_ring_info *txr;
4437 
4438         if (!bnapi)
4439             continue;
4440 
4441         cpr = &bnapi->cp_ring;
4442         cpr->cp_raw_cons = 0;
4443 
4444         txr = bnapi->tx_ring;
4445         if (txr) {
4446             txr->tx_prod = 0;
4447             txr->tx_cons = 0;
4448         }
4449 
4450         rxr = bnapi->rx_ring;
4451         if (rxr) {
4452             rxr->rx_prod = 0;
4453             rxr->rx_agg_prod = 0;
4454             rxr->rx_sw_agg_prod = 0;
4455             rxr->rx_next_cons = 0;
4456         }
4457     }
4458 }
4459 
4460 static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
4461 {
4462 #ifdef CONFIG_RFS_ACCEL
4463     int i;
4464 
4465     /* Under rtnl_lock and all our NAPIs have been disabled.  It's
4466      * safe to delete the hash table.
4467      */
4468     for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
4469         struct hlist_head *head;
4470         struct hlist_node *tmp;
4471         struct bnxt_ntuple_filter *fltr;
4472 
4473         head = &bp->ntp_fltr_hash_tbl[i];
4474         hlist_for_each_entry_safe(fltr, tmp, head, hash) {
4475             hlist_del(&fltr->hash);
4476             kfree(fltr);
4477         }
4478     }
4479     if (irq_reinit) {
4480         bitmap_free(bp->ntp_fltr_bmap);
4481         bp->ntp_fltr_bmap = NULL;
4482     }
4483     bp->ntp_fltr_count = 0;
4484 #endif
4485 }
4486 
4487 static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
4488 {
4489 #ifdef CONFIG_RFS_ACCEL
4490     int i, rc = 0;
4491 
4492     if (!(bp->flags & BNXT_FLAG_RFS))
4493         return 0;
4494 
4495     for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
4496         INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
4497 
4498     bp->ntp_fltr_count = 0;
4499     bp->ntp_fltr_bmap = bitmap_zalloc(BNXT_NTP_FLTR_MAX_FLTR, GFP_KERNEL);
4500 
4501     if (!bp->ntp_fltr_bmap)
4502         rc = -ENOMEM;
4503 
4504     return rc;
4505 #else
4506     return 0;
4507 #endif
4508 }
4509 
4510 static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
4511 {
4512     bnxt_free_vnic_attributes(bp);
4513     bnxt_free_tx_rings(bp);
4514     bnxt_free_rx_rings(bp);
4515     bnxt_free_cp_rings(bp);
4516     bnxt_free_all_cp_arrays(bp);
4517     bnxt_free_ntp_fltrs(bp, irq_re_init);
4518     if (irq_re_init) {
4519         bnxt_free_ring_stats(bp);
4520         if (!(bp->phy_flags & BNXT_PHY_FL_PORT_STATS_NO_RESET) ||
4521             test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
4522             bnxt_free_port_stats(bp);
4523         bnxt_free_ring_grps(bp);
4524         bnxt_free_vnics(bp);
4525         kfree(bp->tx_ring_map);
4526         bp->tx_ring_map = NULL;
4527         kfree(bp->tx_ring);
4528         bp->tx_ring = NULL;
4529         kfree(bp->rx_ring);
4530         bp->rx_ring = NULL;
4531         kfree(bp->bnapi);
4532         bp->bnapi = NULL;
4533     } else {
4534         bnxt_clear_ring_indices(bp);
4535     }
4536 }
4537 
4538 static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
4539 {
4540     int i, j, rc, size, arr_size;
4541     void *bnapi;
4542 
4543     if (irq_re_init) {
4544         /* Allocate bnapi mem pointer array and mem block for
4545          * all queues
4546          */
4547         arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
4548                 bp->cp_nr_rings);
4549         size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
4550         bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
4551         if (!bnapi)
4552             return -ENOMEM;
4553 
4554         bp->bnapi = bnapi;
4555         bnapi += arr_size;
4556         for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
4557             bp->bnapi[i] = bnapi;
4558             bp->bnapi[i]->index = i;
4559             bp->bnapi[i]->bp = bp;
4560             if (bp->flags & BNXT_FLAG_CHIP_P5) {
4561                 struct bnxt_cp_ring_info *cpr =
4562                     &bp->bnapi[i]->cp_ring;
4563 
4564                 cpr->cp_ring_struct.ring_mem.flags =
4565                     BNXT_RMEM_RING_PTE_FLAG;
4566             }
4567         }
4568 
4569         bp->rx_ring = kcalloc(bp->rx_nr_rings,
4570                       sizeof(struct bnxt_rx_ring_info),
4571                       GFP_KERNEL);
4572         if (!bp->rx_ring)
4573             return -ENOMEM;
4574 
4575         for (i = 0; i < bp->rx_nr_rings; i++) {
4576             struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
4577 
4578             if (bp->flags & BNXT_FLAG_CHIP_P5) {
4579                 rxr->rx_ring_struct.ring_mem.flags =
4580                     BNXT_RMEM_RING_PTE_FLAG;
4581                 rxr->rx_agg_ring_struct.ring_mem.flags =
4582                     BNXT_RMEM_RING_PTE_FLAG;
4583             }
4584             rxr->bnapi = bp->bnapi[i];
4585             bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
4586         }
4587 
4588         bp->tx_ring = kcalloc(bp->tx_nr_rings,
4589                       sizeof(struct bnxt_tx_ring_info),
4590                       GFP_KERNEL);
4591         if (!bp->tx_ring)
4592             return -ENOMEM;
4593 
4594         bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
4595                       GFP_KERNEL);
4596 
4597         if (!bp->tx_ring_map)
4598             return -ENOMEM;
4599 
4600         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
4601             j = 0;
4602         else
4603             j = bp->rx_nr_rings;
4604 
4605         for (i = 0; i < bp->tx_nr_rings; i++, j++) {
4606             struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
4607 
4608             if (bp->flags & BNXT_FLAG_CHIP_P5)
4609                 txr->tx_ring_struct.ring_mem.flags =
4610                     BNXT_RMEM_RING_PTE_FLAG;
4611             txr->bnapi = bp->bnapi[j];
4612             bp->bnapi[j]->tx_ring = txr;
4613             bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
4614             if (i >= bp->tx_nr_rings_xdp) {
4615                 txr->txq_index = i - bp->tx_nr_rings_xdp;
4616                 bp->bnapi[j]->tx_int = bnxt_tx_int;
4617             } else {
4618                 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
4619                 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
4620             }
4621         }
4622 
4623         rc = bnxt_alloc_stats(bp);
4624         if (rc)
4625             goto alloc_mem_err;
4626         bnxt_init_stats(bp);
4627 
4628         rc = bnxt_alloc_ntp_fltrs(bp);
4629         if (rc)
4630             goto alloc_mem_err;
4631 
4632         rc = bnxt_alloc_vnics(bp);
4633         if (rc)
4634             goto alloc_mem_err;
4635     }
4636 
4637     rc = bnxt_alloc_all_cp_arrays(bp);
4638     if (rc)
4639         goto alloc_mem_err;
4640 
4641     bnxt_init_ring_struct(bp);
4642 
4643     rc = bnxt_alloc_rx_rings(bp);
4644     if (rc)
4645         goto alloc_mem_err;
4646 
4647     rc = bnxt_alloc_tx_rings(bp);
4648     if (rc)
4649         goto alloc_mem_err;
4650 
4651     rc = bnxt_alloc_cp_rings(bp);
4652     if (rc)
4653         goto alloc_mem_err;
4654 
4655     bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
4656                   BNXT_VNIC_UCAST_FLAG;
4657     rc = bnxt_alloc_vnic_attributes(bp);
4658     if (rc)
4659         goto alloc_mem_err;
4660     return 0;
4661 
4662 alloc_mem_err:
4663     bnxt_free_mem(bp, true);
4664     return rc;
4665 }
4666 
4667 static void bnxt_disable_int(struct bnxt *bp)
4668 {
4669     int i;
4670 
4671     if (!bp->bnapi)
4672         return;
4673 
4674     for (i = 0; i < bp->cp_nr_rings; i++) {
4675         struct bnxt_napi *bnapi = bp->bnapi[i];
4676         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4677         struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
4678 
4679         if (ring->fw_ring_id != INVALID_HW_RING_ID)
4680             bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
4681     }
4682 }
4683 
4684 static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
4685 {
4686     struct bnxt_napi *bnapi = bp->bnapi[n];
4687     struct bnxt_cp_ring_info *cpr;
4688 
4689     cpr = &bnapi->cp_ring;
4690     return cpr->cp_ring_struct.map_idx;
4691 }
4692 
4693 static void bnxt_disable_int_sync(struct bnxt *bp)
4694 {
4695     int i;
4696 
4697     if (!bp->irq_tbl)
4698         return;
4699 
4700     atomic_inc(&bp->intr_sem);
4701 
4702     bnxt_disable_int(bp);
4703     for (i = 0; i < bp->cp_nr_rings; i++) {
4704         int map_idx = bnxt_cp_num_to_irq_num(bp, i);
4705 
4706         synchronize_irq(bp->irq_tbl[map_idx].vector);
4707     }
4708 }
4709 
4710 static void bnxt_enable_int(struct bnxt *bp)
4711 {
4712     int i;
4713 
4714     atomic_set(&bp->intr_sem, 0);
4715     for (i = 0; i < bp->cp_nr_rings; i++) {
4716         struct bnxt_napi *bnapi = bp->bnapi[i];
4717         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4718 
4719         bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
4720     }
4721 }
4722 
4723 int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
4724                 bool async_only)
4725 {
4726     DECLARE_BITMAP(async_events_bmap, 256);
4727     u32 *events = (u32 *)async_events_bmap;
4728     struct hwrm_func_drv_rgtr_output *resp;
4729     struct hwrm_func_drv_rgtr_input *req;
4730     u32 flags;
4731     int rc, i;
4732 
4733     rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR);
4734     if (rc)
4735         return rc;
4736 
4737     req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
4738                    FUNC_DRV_RGTR_REQ_ENABLES_VER |
4739                    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4740 
4741     req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
4742     flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE;
4743     if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
4744         flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT;
4745     if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
4746         flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT |
4747              FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT;
4748     req->flags = cpu_to_le32(flags);
4749     req->ver_maj_8b = DRV_VER_MAJ;
4750     req->ver_min_8b = DRV_VER_MIN;
4751     req->ver_upd_8b = DRV_VER_UPD;
4752     req->ver_maj = cpu_to_le16(DRV_VER_MAJ);
4753     req->ver_min = cpu_to_le16(DRV_VER_MIN);
4754     req->ver_upd = cpu_to_le16(DRV_VER_UPD);
4755 
4756     if (BNXT_PF(bp)) {
4757         u32 data[8];
4758         int i;
4759 
4760         memset(data, 0, sizeof(data));
4761         for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
4762             u16 cmd = bnxt_vf_req_snif[i];
4763             unsigned int bit, idx;
4764 
4765             idx = cmd / 32;
4766             bit = cmd % 32;
4767             data[idx] |= 1 << bit;
4768         }
4769 
4770         for (i = 0; i < 8; i++)
4771             req->vf_req_fwd[i] = cpu_to_le32(data[i]);
4772 
4773         req->enables |=
4774             cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
4775     }
4776 
4777     if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
4778         req->flags |= cpu_to_le32(
4779             FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
4780 
4781     memset(async_events_bmap, 0, sizeof(async_events_bmap));
4782     for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
4783         u16 event_id = bnxt_async_events_arr[i];
4784 
4785         if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
4786             !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
4787             continue;
4788         __set_bit(bnxt_async_events_arr[i], async_events_bmap);
4789     }
4790     if (bmap && bmap_size) {
4791         for (i = 0; i < bmap_size; i++) {
4792             if (test_bit(i, bmap))
4793                 __set_bit(i, async_events_bmap);
4794         }
4795     }
4796     for (i = 0; i < 8; i++)
4797         req->async_event_fwd[i] |= cpu_to_le32(events[i]);
4798 
4799     if (async_only)
4800         req->enables =
4801             cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
4802 
4803     resp = hwrm_req_hold(bp, req);
4804     rc = hwrm_req_send(bp, req);
4805     if (!rc) {
4806         set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state);
4807         if (resp->flags &
4808             cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
4809             bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4810     }
4811     hwrm_req_drop(bp, req);
4812     return rc;
4813 }
4814 
4815 int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4816 {
4817     struct hwrm_func_drv_unrgtr_input *req;
4818     int rc;
4819 
4820     if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state))
4821         return 0;
4822 
4823     rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR);
4824     if (rc)
4825         return rc;
4826     return hwrm_req_send(bp, req);
4827 }
4828 
4829 static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4830 {
4831     struct hwrm_tunnel_dst_port_free_input *req;
4832     int rc;
4833 
4834     if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN &&
4835         bp->vxlan_fw_dst_port_id == INVALID_HW_RING_ID)
4836         return 0;
4837     if (tunnel_type == TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE &&
4838         bp->nge_fw_dst_port_id == INVALID_HW_RING_ID)
4839         return 0;
4840 
4841     rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE);
4842     if (rc)
4843         return rc;
4844 
4845     req->tunnel_type = tunnel_type;
4846 
4847     switch (tunnel_type) {
4848     case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4849         req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id);
4850         bp->vxlan_port = 0;
4851         bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
4852         break;
4853     case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4854         req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id);
4855         bp->nge_port = 0;
4856         bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
4857         break;
4858     default:
4859         break;
4860     }
4861 
4862     rc = hwrm_req_send(bp, req);
4863     if (rc)
4864         netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4865                rc);
4866     return rc;
4867 }
4868 
4869 static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4870                        u8 tunnel_type)
4871 {
4872     struct hwrm_tunnel_dst_port_alloc_output *resp;
4873     struct hwrm_tunnel_dst_port_alloc_input *req;
4874     int rc;
4875 
4876     rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC);
4877     if (rc)
4878         return rc;
4879 
4880     req->tunnel_type = tunnel_type;
4881     req->tunnel_dst_port_val = port;
4882 
4883     resp = hwrm_req_hold(bp, req);
4884     rc = hwrm_req_send(bp, req);
4885     if (rc) {
4886         netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4887                rc);
4888         goto err_out;
4889     }
4890 
4891     switch (tunnel_type) {
4892     case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
4893         bp->vxlan_port = port;
4894         bp->vxlan_fw_dst_port_id =
4895             le16_to_cpu(resp->tunnel_dst_port_id);
4896         break;
4897     case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
4898         bp->nge_port = port;
4899         bp->nge_fw_dst_port_id = le16_to_cpu(resp->tunnel_dst_port_id);
4900         break;
4901     default:
4902         break;
4903     }
4904 
4905 err_out:
4906     hwrm_req_drop(bp, req);
4907     return rc;
4908 }
4909 
4910 static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4911 {
4912     struct hwrm_cfa_l2_set_rx_mask_input *req;
4913     struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4914     int rc;
4915 
4916     rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK);
4917     if (rc)
4918         return rc;
4919 
4920     req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4921     if (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST) {
4922         req->num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4923         req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4924     }
4925     req->mask = cpu_to_le32(vnic->rx_mask);
4926     return hwrm_req_send_silent(bp, req);
4927 }
4928 
4929 #ifdef CONFIG_RFS_ACCEL
4930 static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4931                         struct bnxt_ntuple_filter *fltr)
4932 {
4933     struct hwrm_cfa_ntuple_filter_free_input *req;
4934     int rc;
4935 
4936     rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE);
4937     if (rc)
4938         return rc;
4939 
4940     req->ntuple_filter_id = fltr->filter_id;
4941     return hwrm_req_send(bp, req);
4942 }
4943 
4944 #define BNXT_NTP_FLTR_FLAGS                 \
4945     (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4946      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |    \
4947      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |  \
4948      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |  \
4949      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |   \
4950      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |  \
4951      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |   \
4952      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |  \
4953      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |  \
4954      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |     \
4955      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |    \
4956      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |     \
4957      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |    \
4958      CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
4959 
4960 #define BNXT_NTP_TUNNEL_FLTR_FLAG               \
4961         CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4962 
4963 static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4964                          struct bnxt_ntuple_filter *fltr)
4965 {
4966     struct hwrm_cfa_ntuple_filter_alloc_output *resp;
4967     struct hwrm_cfa_ntuple_filter_alloc_input *req;
4968     struct flow_keys *keys = &fltr->fkeys;
4969     struct bnxt_vnic_info *vnic;
4970     u32 flags = 0;
4971     int rc;
4972 
4973     rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC);
4974     if (rc)
4975         return rc;
4976 
4977     req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
4978 
4979     if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
4980         flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX;
4981         req->dst_id = cpu_to_le16(fltr->rxq);
4982     } else {
4983         vnic = &bp->vnic_info[fltr->rxq + 1];
4984         req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
4985     }
4986     req->flags = cpu_to_le32(flags);
4987     req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4988 
4989     req->ethertype = htons(ETH_P_IP);
4990     memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN);
4991     req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
4992     req->ip_protocol = keys->basic.ip_proto;
4993 
4994     if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4995         int i;
4996 
4997         req->ethertype = htons(ETH_P_IPV6);
4998         req->ip_addr_type =
4999             CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
5000         *(struct in6_addr *)&req->src_ipaddr[0] =
5001             keys->addrs.v6addrs.src;
5002         *(struct in6_addr *)&req->dst_ipaddr[0] =
5003             keys->addrs.v6addrs.dst;
5004         for (i = 0; i < 4; i++) {
5005             req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
5006             req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
5007         }
5008     } else {
5009         req->src_ipaddr[0] = keys->addrs.v4addrs.src;
5010         req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
5011         req->dst_ipaddr[0] = keys->addrs.v4addrs.dst;
5012         req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
5013     }
5014     if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
5015         req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
5016         req->tunnel_type =
5017             CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
5018     }
5019 
5020     req->src_port = keys->ports.src;
5021     req->src_port_mask = cpu_to_be16(0xffff);
5022     req->dst_port = keys->ports.dst;
5023     req->dst_port_mask = cpu_to_be16(0xffff);
5024 
5025     resp = hwrm_req_hold(bp, req);
5026     rc = hwrm_req_send(bp, req);
5027     if (!rc)
5028         fltr->filter_id = resp->ntuple_filter_id;
5029     hwrm_req_drop(bp, req);
5030     return rc;
5031 }
5032 #endif
5033 
5034 static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
5035                      const u8 *mac_addr)
5036 {
5037     struct hwrm_cfa_l2_filter_alloc_output *resp;
5038     struct hwrm_cfa_l2_filter_alloc_input *req;
5039     int rc;
5040 
5041     rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC);
5042     if (rc)
5043         return rc;
5044 
5045     req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
5046     if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
5047         req->flags |=
5048             cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
5049     req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
5050     req->enables =
5051         cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
5052                 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
5053                 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
5054     memcpy(req->l2_addr, mac_addr, ETH_ALEN);
5055     req->l2_addr_mask[0] = 0xff;
5056     req->l2_addr_mask[1] = 0xff;
5057     req->l2_addr_mask[2] = 0xff;
5058     req->l2_addr_mask[3] = 0xff;
5059     req->l2_addr_mask[4] = 0xff;
5060     req->l2_addr_mask[5] = 0xff;
5061 
5062     resp = hwrm_req_hold(bp, req);
5063     rc = hwrm_req_send(bp, req);
5064     if (!rc)
5065         bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
5066                             resp->l2_filter_id;
5067     hwrm_req_drop(bp, req);
5068     return rc;
5069 }
5070 
5071 static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
5072 {
5073     struct hwrm_cfa_l2_filter_free_input *req;
5074     u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
5075     int rc;
5076 
5077     /* Any associated ntuple filters will also be cleared by firmware. */
5078     rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
5079     if (rc)
5080         return rc;
5081     hwrm_req_hold(bp, req);
5082     for (i = 0; i < num_of_vnics; i++) {
5083         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5084 
5085         for (j = 0; j < vnic->uc_filter_count; j++) {
5086             req->l2_filter_id = vnic->fw_l2_filter_id[j];
5087 
5088             rc = hwrm_req_send(bp, req);
5089         }
5090         vnic->uc_filter_count = 0;
5091     }
5092     hwrm_req_drop(bp, req);
5093     return rc;
5094 }
5095 
5096 static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
5097 {
5098     struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5099     u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
5100     struct hwrm_vnic_tpa_cfg_input *req;
5101     int rc;
5102 
5103     if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
5104         return 0;
5105 
5106     rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG);
5107     if (rc)
5108         return rc;
5109 
5110     if (tpa_flags) {
5111         u16 mss = bp->dev->mtu - 40;
5112         u32 nsegs, n, segs = 0, flags;
5113 
5114         flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
5115             VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
5116             VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
5117             VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
5118             VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
5119         if (tpa_flags & BNXT_FLAG_GRO)
5120             flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
5121 
5122         req->flags = cpu_to_le32(flags);
5123 
5124         req->enables =
5125             cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
5126                     VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
5127                     VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
5128 
5129         /* Number of segs are log2 units, and first packet is not
5130          * included as part of this units.
5131          */
5132         if (mss <= BNXT_RX_PAGE_SIZE) {
5133             n = BNXT_RX_PAGE_SIZE / mss;
5134             nsegs = (MAX_SKB_FRAGS - 1) * n;
5135         } else {
5136             n = mss / BNXT_RX_PAGE_SIZE;
5137             if (mss & (BNXT_RX_PAGE_SIZE - 1))
5138                 n++;
5139             nsegs = (MAX_SKB_FRAGS - n) / n;
5140         }
5141 
5142         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5143             segs = MAX_TPA_SEGS_P5;
5144             max_aggs = bp->max_tpa;
5145         } else {
5146             segs = ilog2(nsegs);
5147         }
5148         req->max_agg_segs = cpu_to_le16(segs);
5149         req->max_aggs = cpu_to_le16(max_aggs);
5150 
5151         req->min_agg_len = cpu_to_le32(512);
5152     }
5153     req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5154 
5155     return hwrm_req_send(bp, req);
5156 }
5157 
5158 static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
5159 {
5160     struct bnxt_ring_grp_info *grp_info;
5161 
5162     grp_info = &bp->grp_info[ring->grp_idx];
5163     return grp_info->cp_fw_ring_id;
5164 }
5165 
5166 static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
5167 {
5168     if (bp->flags & BNXT_FLAG_CHIP_P5) {
5169         struct bnxt_napi *bnapi = rxr->bnapi;
5170         struct bnxt_cp_ring_info *cpr;
5171 
5172         cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
5173         return cpr->cp_ring_struct.fw_ring_id;
5174     } else {
5175         return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
5176     }
5177 }
5178 
5179 static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
5180 {
5181     if (bp->flags & BNXT_FLAG_CHIP_P5) {
5182         struct bnxt_napi *bnapi = txr->bnapi;
5183         struct bnxt_cp_ring_info *cpr;
5184 
5185         cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
5186         return cpr->cp_ring_struct.fw_ring_id;
5187     } else {
5188         return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
5189     }
5190 }
5191 
5192 static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
5193 {
5194     int entries;
5195 
5196     if (bp->flags & BNXT_FLAG_CHIP_P5)
5197         entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5;
5198     else
5199         entries = HW_HASH_INDEX_SIZE;
5200 
5201     bp->rss_indir_tbl_entries = entries;
5202     bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
5203                       GFP_KERNEL);
5204     if (!bp->rss_indir_tbl)
5205         return -ENOMEM;
5206     return 0;
5207 }
5208 
5209 static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
5210 {
5211     u16 max_rings, max_entries, pad, i;
5212 
5213     if (!bp->rx_nr_rings)
5214         return;
5215 
5216     if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5217         max_rings = bp->rx_nr_rings - 1;
5218     else
5219         max_rings = bp->rx_nr_rings;
5220 
5221     max_entries = bnxt_get_rxfh_indir_size(bp->dev);
5222 
5223     for (i = 0; i < max_entries; i++)
5224         bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
5225 
5226     pad = bp->rss_indir_tbl_entries - max_entries;
5227     if (pad)
5228         memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
5229 }
5230 
5231 static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
5232 {
5233     u16 i, tbl_size, max_ring = 0;
5234 
5235     if (!bp->rss_indir_tbl)
5236         return 0;
5237 
5238     tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5239     for (i = 0; i < tbl_size; i++)
5240         max_ring = max(max_ring, bp->rss_indir_tbl[i]);
5241     return max_ring;
5242 }
5243 
5244 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
5245 {
5246     if (bp->flags & BNXT_FLAG_CHIP_P5)
5247         return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
5248     if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5249         return 2;
5250     return 1;
5251 }
5252 
5253 static void __bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5254 {
5255     bool no_rss = !(vnic->flags & BNXT_VNIC_RSS_FLAG);
5256     u16 i, j;
5257 
5258     /* Fill the RSS indirection table with ring group ids */
5259     for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
5260         if (!no_rss)
5261             j = bp->rss_indir_tbl[i];
5262         vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
5263     }
5264 }
5265 
5266 static void __bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
5267                       struct bnxt_vnic_info *vnic)
5268 {
5269     __le16 *ring_tbl = vnic->rss_table;
5270     struct bnxt_rx_ring_info *rxr;
5271     u16 tbl_size, i;
5272 
5273     tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
5274 
5275     for (i = 0; i < tbl_size; i++) {
5276         u16 ring_id, j;
5277 
5278         j = bp->rss_indir_tbl[i];
5279         rxr = &bp->rx_ring[j];
5280 
5281         ring_id = rxr->rx_ring_struct.fw_ring_id;
5282         *ring_tbl++ = cpu_to_le16(ring_id);
5283         ring_id = bnxt_cp_ring_for_rx(bp, rxr);
5284         *ring_tbl++ = cpu_to_le16(ring_id);
5285     }
5286 }
5287 
5288 static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic)
5289 {
5290     if (bp->flags & BNXT_FLAG_CHIP_P5)
5291         __bnxt_fill_hw_rss_tbl_p5(bp, vnic);
5292     else
5293         __bnxt_fill_hw_rss_tbl(bp, vnic);
5294 }
5295 
5296 static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
5297 {
5298     struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5299     struct hwrm_vnic_rss_cfg_input *req;
5300     int rc;
5301 
5302     if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
5303         vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
5304         return 0;
5305 
5306     rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5307     if (rc)
5308         return rc;
5309 
5310     if (set_rss) {
5311         bnxt_fill_hw_rss_tbl(bp, vnic);
5312         req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5313         req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5314         req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
5315         req->hash_key_tbl_addr =
5316             cpu_to_le64(vnic->rss_hash_key_dma_addr);
5317     }
5318     req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5319     return hwrm_req_send(bp, req);
5320 }
5321 
5322 static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
5323 {
5324     struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5325     struct hwrm_vnic_rss_cfg_input *req;
5326     dma_addr_t ring_tbl_map;
5327     u32 i, nr_ctxs;
5328     int rc;
5329 
5330     rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG);
5331     if (rc)
5332         return rc;
5333 
5334     req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5335     if (!set_rss)
5336         return hwrm_req_send(bp, req);
5337 
5338     bnxt_fill_hw_rss_tbl(bp, vnic);
5339     req->hash_type = cpu_to_le32(bp->rss_hash_cfg);
5340     req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
5341     req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
5342     ring_tbl_map = vnic->rss_table_dma_addr;
5343     nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
5344 
5345     hwrm_req_hold(bp, req);
5346     for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) {
5347         req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map);
5348         req->ring_table_pair_index = i;
5349         req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
5350         rc = hwrm_req_send(bp, req);
5351         if (rc)
5352             goto exit;
5353     }
5354 
5355 exit:
5356     hwrm_req_drop(bp, req);
5357     return rc;
5358 }
5359 
5360 static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
5361 {
5362     struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5363     struct hwrm_vnic_plcmodes_cfg_input *req;
5364     int rc;
5365 
5366     rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG);
5367     if (rc)
5368         return rc;
5369 
5370     req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
5371     req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
5372 
5373     if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
5374         req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
5375                       VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
5376         req->enables |=
5377             cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
5378     }
5379     /* thresholds not implemented in firmware yet */
5380     req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
5381     req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
5382     req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
5383     return hwrm_req_send(bp, req);
5384 }
5385 
5386 static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
5387                     u16 ctx_idx)
5388 {
5389     struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
5390 
5391     if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE))
5392         return;
5393 
5394     req->rss_cos_lb_ctx_id =
5395         cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
5396 
5397     hwrm_req_send(bp, req);
5398     bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
5399 }
5400 
5401 static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
5402 {
5403     int i, j;
5404 
5405     for (i = 0; i < bp->nr_vnics; i++) {
5406         struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
5407 
5408         for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
5409             if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
5410                 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
5411         }
5412     }
5413     bp->rsscos_nr_ctxs = 0;
5414 }
5415 
5416 static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
5417 {
5418     struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
5419     struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
5420     int rc;
5421 
5422     rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC);
5423     if (rc)
5424         return rc;
5425 
5426     resp = hwrm_req_hold(bp, req);
5427     rc = hwrm_req_send(bp, req);
5428     if (!rc)
5429         bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
5430             le16_to_cpu(resp->rss_cos_lb_ctx_id);
5431     hwrm_req_drop(bp, req);
5432 
5433     return rc;
5434 }
5435 
5436 static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
5437 {
5438     if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
5439         return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
5440     return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
5441 }
5442 
5443 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
5444 {
5445     struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5446     struct hwrm_vnic_cfg_input *req;
5447     unsigned int ring = 0, grp_idx;
5448     u16 def_vlan = 0;
5449     int rc;
5450 
5451     rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG);
5452     if (rc)
5453         return rc;
5454 
5455     if (bp->flags & BNXT_FLAG_CHIP_P5) {
5456         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
5457 
5458         req->default_rx_ring_id =
5459             cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
5460         req->default_cmpl_ring_id =
5461             cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
5462         req->enables =
5463             cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
5464                     VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
5465         goto vnic_mru;
5466     }
5467     req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
5468     /* Only RSS support for now TBD: COS & LB */
5469     if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
5470         req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
5471         req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5472                        VNIC_CFG_REQ_ENABLES_MRU);
5473     } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
5474         req->rss_rule =
5475             cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
5476         req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
5477                        VNIC_CFG_REQ_ENABLES_MRU);
5478         req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
5479     } else {
5480         req->rss_rule = cpu_to_le16(0xffff);
5481     }
5482 
5483     if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
5484         (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
5485         req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
5486         req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
5487     } else {
5488         req->cos_rule = cpu_to_le16(0xffff);
5489     }
5490 
5491     if (vnic->flags & BNXT_VNIC_RSS_FLAG)
5492         ring = 0;
5493     else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
5494         ring = vnic_id - 1;
5495     else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
5496         ring = bp->rx_nr_rings - 1;
5497 
5498     grp_idx = bp->rx_ring[ring].bnapi->index;
5499     req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
5500     req->lb_rule = cpu_to_le16(0xffff);
5501 vnic_mru:
5502     req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN);
5503 
5504     req->vnic_id = cpu_to_le16(vnic->fw_vnic_id);
5505 #ifdef CONFIG_BNXT_SRIOV
5506     if (BNXT_VF(bp))
5507         def_vlan = bp->vf.vlan;
5508 #endif
5509     if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
5510         req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
5511     if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
5512         req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
5513 
5514     return hwrm_req_send(bp, req);
5515 }
5516 
5517 static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
5518 {
5519     if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
5520         struct hwrm_vnic_free_input *req;
5521 
5522         if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
5523             return;
5524 
5525         req->vnic_id =
5526             cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
5527 
5528         hwrm_req_send(bp, req);
5529         bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
5530     }
5531 }
5532 
5533 static void bnxt_hwrm_vnic_free(struct bnxt *bp)
5534 {
5535     u16 i;
5536 
5537     for (i = 0; i < bp->nr_vnics; i++)
5538         bnxt_hwrm_vnic_free_one(bp, i);
5539 }
5540 
5541 static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
5542                 unsigned int start_rx_ring_idx,
5543                 unsigned int nr_rings)
5544 {
5545     unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
5546     struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
5547     struct hwrm_vnic_alloc_output *resp;
5548     struct hwrm_vnic_alloc_input *req;
5549     int rc;
5550 
5551     rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC);
5552     if (rc)
5553         return rc;
5554 
5555     if (bp->flags & BNXT_FLAG_CHIP_P5)
5556         goto vnic_no_ring_grps;
5557 
5558     /* map ring groups to this vnic */
5559     for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
5560         grp_idx = bp->rx_ring[i].bnapi->index;
5561         if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
5562             netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
5563                    j, nr_rings);
5564             break;
5565         }
5566         vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
5567     }
5568 
5569 vnic_no_ring_grps:
5570     for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
5571         vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
5572     if (vnic_id == 0)
5573         req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
5574 
5575     resp = hwrm_req_hold(bp, req);
5576     rc = hwrm_req_send(bp, req);
5577     if (!rc)
5578         vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
5579     hwrm_req_drop(bp, req);
5580     return rc;
5581 }
5582 
5583 static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
5584 {
5585     struct hwrm_vnic_qcaps_output *resp;
5586     struct hwrm_vnic_qcaps_input *req;
5587     int rc;
5588 
5589     bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats);
5590     bp->flags &= ~(BNXT_FLAG_NEW_RSS_CAP | BNXT_FLAG_ROCE_MIRROR_CAP);
5591     if (bp->hwrm_spec_code < 0x10600)
5592         return 0;
5593 
5594     rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS);
5595     if (rc)
5596         return rc;
5597 
5598     resp = hwrm_req_hold(bp, req);
5599     rc = hwrm_req_send(bp, req);
5600     if (!rc) {
5601         u32 flags = le32_to_cpu(resp->flags);
5602 
5603         if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
5604             (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
5605             bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
5606         if (flags &
5607             VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
5608             bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
5609 
5610         /* Older P5 fw before EXT_HW_STATS support did not set
5611          * VLAN_STRIP_CAP properly.
5612          */
5613         if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
5614             (BNXT_CHIP_P5_THOR(bp) &&
5615              !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
5616             bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
5617         bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
5618         if (bp->max_tpa_v2) {
5619             if (BNXT_CHIP_P5_THOR(bp))
5620                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
5621             else
5622                 bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
5623         }
5624     }
5625     hwrm_req_drop(bp, req);
5626     return rc;
5627 }
5628 
5629 static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
5630 {
5631     struct hwrm_ring_grp_alloc_output *resp;
5632     struct hwrm_ring_grp_alloc_input *req;
5633     int rc;
5634     u16 i;
5635 
5636     if (bp->flags & BNXT_FLAG_CHIP_P5)
5637         return 0;
5638 
5639     rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC);
5640     if (rc)
5641         return rc;
5642 
5643     resp = hwrm_req_hold(bp, req);
5644     for (i = 0; i < bp->rx_nr_rings; i++) {
5645         unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
5646 
5647         req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
5648         req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
5649         req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
5650         req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
5651 
5652         rc = hwrm_req_send(bp, req);
5653 
5654         if (rc)
5655             break;
5656 
5657         bp->grp_info[grp_idx].fw_grp_id =
5658             le32_to_cpu(resp->ring_group_id);
5659     }
5660     hwrm_req_drop(bp, req);
5661     return rc;
5662 }
5663 
5664 static void bnxt_hwrm_ring_grp_free(struct bnxt *bp)
5665 {
5666     struct hwrm_ring_grp_free_input *req;
5667     u16 i;
5668 
5669     if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
5670         return;
5671 
5672     if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE))
5673         return;
5674 
5675     hwrm_req_hold(bp, req);
5676     for (i = 0; i < bp->cp_nr_rings; i++) {
5677         if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
5678             continue;
5679         req->ring_group_id =
5680             cpu_to_le32(bp->grp_info[i].fw_grp_id);
5681 
5682         hwrm_req_send(bp, req);
5683         bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
5684     }
5685     hwrm_req_drop(bp, req);
5686 }
5687 
5688 static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
5689                     struct bnxt_ring_struct *ring,
5690                     u32 ring_type, u32 map_index)
5691 {
5692     struct hwrm_ring_alloc_output *resp;
5693     struct hwrm_ring_alloc_input *req;
5694     struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
5695     struct bnxt_ring_grp_info *grp_info;
5696     int rc, err = 0;
5697     u16 ring_id;
5698 
5699     rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC);
5700     if (rc)
5701         goto exit;
5702 
5703     req->enables = 0;
5704     if (rmem->nr_pages > 1) {
5705         req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
5706         /* Page size is in log2 units */
5707         req->page_size = BNXT_PAGE_SHIFT;
5708         req->page_tbl_depth = 1;
5709     } else {
5710         req->page_tbl_addr =  cpu_to_le64(rmem->dma_arr[0]);
5711     }
5712     req->fbo = 0;
5713     /* Association of ring index with doorbell index and MSIX number */
5714     req->logical_id = cpu_to_le16(map_index);
5715 
5716     switch (ring_type) {
5717     case HWRM_RING_ALLOC_TX: {
5718         struct bnxt_tx_ring_info *txr;
5719 
5720         txr = container_of(ring, struct bnxt_tx_ring_info,
5721                    tx_ring_struct);
5722         req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
5723         /* Association of transmit ring with completion ring */
5724         grp_info = &bp->grp_info[ring->grp_idx];
5725         req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
5726         req->length = cpu_to_le32(bp->tx_ring_mask + 1);
5727         req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5728         req->queue_id = cpu_to_le16(ring->queue_id);
5729         break;
5730     }
5731     case HWRM_RING_ALLOC_RX:
5732         req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5733         req->length = cpu_to_le32(bp->rx_ring_mask + 1);
5734         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5735             u16 flags = 0;
5736 
5737             /* Association of rx ring with stats context */
5738             grp_info = &bp->grp_info[ring->grp_idx];
5739             req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
5740             req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5741             req->enables |= cpu_to_le32(
5742                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5743             if (NET_IP_ALIGN == 2)
5744                 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
5745             req->flags = cpu_to_le16(flags);
5746         }
5747         break;
5748     case HWRM_RING_ALLOC_AGG:
5749         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5750             req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
5751             /* Association of agg ring with rx ring */
5752             grp_info = &bp->grp_info[ring->grp_idx];
5753             req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
5754             req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
5755             req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
5756             req->enables |= cpu_to_le32(
5757                 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
5758                 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
5759         } else {
5760             req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
5761         }
5762         req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
5763         break;
5764     case HWRM_RING_ALLOC_CMPL:
5765         req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
5766         req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5767         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5768             /* Association of cp ring with nq */
5769             grp_info = &bp->grp_info[map_index];
5770             req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
5771             req->cq_handle = cpu_to_le64(ring->handle);
5772             req->enables |= cpu_to_le32(
5773                 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
5774         } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
5775             req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5776         }
5777         break;
5778     case HWRM_RING_ALLOC_NQ:
5779         req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
5780         req->length = cpu_to_le32(bp->cp_ring_mask + 1);
5781         if (bp->flags & BNXT_FLAG_USING_MSIX)
5782             req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
5783         break;
5784     default:
5785         netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
5786                ring_type);
5787         return -1;
5788     }
5789 
5790     resp = hwrm_req_hold(bp, req);
5791     rc = hwrm_req_send(bp, req);
5792     err = le16_to_cpu(resp->error_code);
5793     ring_id = le16_to_cpu(resp->ring_id);
5794     hwrm_req_drop(bp, req);
5795 
5796 exit:
5797     if (rc || err) {
5798         netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
5799                ring_type, rc, err);
5800         return -EIO;
5801     }
5802     ring->fw_ring_id = ring_id;
5803     return rc;
5804 }
5805 
5806 static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
5807 {
5808     int rc;
5809 
5810     if (BNXT_PF(bp)) {
5811         struct hwrm_func_cfg_input *req;
5812 
5813         rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
5814         if (rc)
5815             return rc;
5816 
5817         req->fid = cpu_to_le16(0xffff);
5818         req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5819         req->async_event_cr = cpu_to_le16(idx);
5820         return hwrm_req_send(bp, req);
5821     } else {
5822         struct hwrm_func_vf_cfg_input *req;
5823 
5824         rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG);
5825         if (rc)
5826             return rc;
5827 
5828         req->enables =
5829             cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
5830         req->async_event_cr = cpu_to_le16(idx);
5831         return hwrm_req_send(bp, req);
5832     }
5833 }
5834 
5835 static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
5836             u32 map_idx, u32 xid)
5837 {
5838     if (bp->flags & BNXT_FLAG_CHIP_P5) {
5839         if (BNXT_PF(bp))
5840             db->doorbell = bp->bar1 + DB_PF_OFFSET_P5;
5841         else
5842             db->doorbell = bp->bar1 + DB_VF_OFFSET_P5;
5843         switch (ring_type) {
5844         case HWRM_RING_ALLOC_TX:
5845             db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
5846             break;
5847         case HWRM_RING_ALLOC_RX:
5848         case HWRM_RING_ALLOC_AGG:
5849             db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
5850             break;
5851         case HWRM_RING_ALLOC_CMPL:
5852             db->db_key64 = DBR_PATH_L2;
5853             break;
5854         case HWRM_RING_ALLOC_NQ:
5855             db->db_key64 = DBR_PATH_L2;
5856             break;
5857         }
5858         db->db_key64 |= (u64)xid << DBR_XID_SFT;
5859     } else {
5860         db->doorbell = bp->bar1 + map_idx * 0x80;
5861         switch (ring_type) {
5862         case HWRM_RING_ALLOC_TX:
5863             db->db_key32 = DB_KEY_TX;
5864             break;
5865         case HWRM_RING_ALLOC_RX:
5866         case HWRM_RING_ALLOC_AGG:
5867             db->db_key32 = DB_KEY_RX;
5868             break;
5869         case HWRM_RING_ALLOC_CMPL:
5870             db->db_key32 = DB_KEY_CP;
5871             break;
5872         }
5873     }
5874 }
5875 
5876 static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
5877 {
5878     bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
5879     int i, rc = 0;
5880     u32 type;
5881 
5882     if (bp->flags & BNXT_FLAG_CHIP_P5)
5883         type = HWRM_RING_ALLOC_NQ;
5884     else
5885         type = HWRM_RING_ALLOC_CMPL;
5886     for (i = 0; i < bp->cp_nr_rings; i++) {
5887         struct bnxt_napi *bnapi = bp->bnapi[i];
5888         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5889         struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
5890         u32 map_idx = ring->map_idx;
5891         unsigned int vector;
5892 
5893         vector = bp->irq_tbl[map_idx].vector;
5894         disable_irq_nosync(vector);
5895         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5896         if (rc) {
5897             enable_irq(vector);
5898             goto err_out;
5899         }
5900         bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
5901         bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
5902         enable_irq(vector);
5903         bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
5904 
5905         if (!i) {
5906             rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
5907             if (rc)
5908                 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
5909         }
5910     }
5911 
5912     type = HWRM_RING_ALLOC_TX;
5913     for (i = 0; i < bp->tx_nr_rings; i++) {
5914         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
5915         struct bnxt_ring_struct *ring;
5916         u32 map_idx;
5917 
5918         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5919             struct bnxt_napi *bnapi = txr->bnapi;
5920             struct bnxt_cp_ring_info *cpr, *cpr2;
5921             u32 type2 = HWRM_RING_ALLOC_CMPL;
5922 
5923             cpr = &bnapi->cp_ring;
5924             cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
5925             ring = &cpr2->cp_ring_struct;
5926             ring->handle = BNXT_TX_HDL;
5927             map_idx = bnapi->index;
5928             rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5929             if (rc)
5930                 goto err_out;
5931             bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5932                     ring->fw_ring_id);
5933             bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5934         }
5935         ring = &txr->tx_ring_struct;
5936         map_idx = i;
5937         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5938         if (rc)
5939             goto err_out;
5940         bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
5941     }
5942 
5943     type = HWRM_RING_ALLOC_RX;
5944     for (i = 0; i < bp->rx_nr_rings; i++) {
5945         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5946         struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
5947         struct bnxt_napi *bnapi = rxr->bnapi;
5948         u32 map_idx = bnapi->index;
5949 
5950         rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5951         if (rc)
5952             goto err_out;
5953         bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
5954         /* If we have agg rings, post agg buffers first. */
5955         if (!agg_rings)
5956             bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5957         bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
5958         if (bp->flags & BNXT_FLAG_CHIP_P5) {
5959             struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5960             u32 type2 = HWRM_RING_ALLOC_CMPL;
5961             struct bnxt_cp_ring_info *cpr2;
5962 
5963             cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
5964             ring = &cpr2->cp_ring_struct;
5965             ring->handle = BNXT_RX_HDL;
5966             rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
5967             if (rc)
5968                 goto err_out;
5969             bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
5970                     ring->fw_ring_id);
5971             bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
5972         }
5973     }
5974 
5975     if (agg_rings) {
5976         type = HWRM_RING_ALLOC_AGG;
5977         for (i = 0; i < bp->rx_nr_rings; i++) {
5978             struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
5979             struct bnxt_ring_struct *ring =
5980                         &rxr->rx_agg_ring_struct;
5981             u32 grp_idx = ring->grp_idx;
5982             u32 map_idx = grp_idx + bp->rx_nr_rings;
5983 
5984             rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
5985             if (rc)
5986                 goto err_out;
5987 
5988             bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
5989                     ring->fw_ring_id);
5990             bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
5991             bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
5992             bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
5993         }
5994     }
5995 err_out:
5996     return rc;
5997 }
5998 
5999 static int hwrm_ring_free_send_msg(struct bnxt *bp,
6000                    struct bnxt_ring_struct *ring,
6001                    u32 ring_type, int cmpl_ring_id)
6002 {
6003     struct hwrm_ring_free_output *resp;
6004     struct hwrm_ring_free_input *req;
6005     u16 error_code = 0;
6006     int rc;
6007 
6008     if (BNXT_NO_FW_ACCESS(bp))
6009         return 0;
6010 
6011     rc = hwrm_req_init(bp, req, HWRM_RING_FREE);
6012     if (rc)
6013         goto exit;
6014 
6015     req->cmpl_ring = cpu_to_le16(cmpl_ring_id);
6016     req->ring_type = ring_type;
6017     req->ring_id = cpu_to_le16(ring->fw_ring_id);
6018 
6019     resp = hwrm_req_hold(bp, req);
6020     rc = hwrm_req_send(bp, req);
6021     error_code = le16_to_cpu(resp->error_code);
6022     hwrm_req_drop(bp, req);
6023 exit:
6024     if (rc || error_code) {
6025         netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
6026                ring_type, rc, error_code);
6027         return -EIO;
6028     }
6029     return 0;
6030 }
6031 
6032 static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
6033 {
6034     u32 type;
6035     int i;
6036 
6037     if (!bp->bnapi)
6038         return;
6039 
6040     for (i = 0; i < bp->tx_nr_rings; i++) {
6041         struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
6042         struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
6043 
6044         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6045             u32 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
6046 
6047             hwrm_ring_free_send_msg(bp, ring,
6048                         RING_FREE_REQ_RING_TYPE_TX,
6049                         close_path ? cmpl_ring_id :
6050                         INVALID_HW_RING_ID);
6051             ring->fw_ring_id = INVALID_HW_RING_ID;
6052         }
6053     }
6054 
6055     for (i = 0; i < bp->rx_nr_rings; i++) {
6056         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6057         struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
6058         u32 grp_idx = rxr->bnapi->index;
6059 
6060         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6061             u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6062 
6063             hwrm_ring_free_send_msg(bp, ring,
6064                         RING_FREE_REQ_RING_TYPE_RX,
6065                         close_path ? cmpl_ring_id :
6066                         INVALID_HW_RING_ID);
6067             ring->fw_ring_id = INVALID_HW_RING_ID;
6068             bp->grp_info[grp_idx].rx_fw_ring_id =
6069                 INVALID_HW_RING_ID;
6070         }
6071     }
6072 
6073     if (bp->flags & BNXT_FLAG_CHIP_P5)
6074         type = RING_FREE_REQ_RING_TYPE_RX_AGG;
6075     else
6076         type = RING_FREE_REQ_RING_TYPE_RX;
6077     for (i = 0; i < bp->rx_nr_rings; i++) {
6078         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
6079         struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
6080         u32 grp_idx = rxr->bnapi->index;
6081 
6082         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6083             u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
6084 
6085             hwrm_ring_free_send_msg(bp, ring, type,
6086                         close_path ? cmpl_ring_id :
6087                         INVALID_HW_RING_ID);
6088             ring->fw_ring_id = INVALID_HW_RING_ID;
6089             bp->grp_info[grp_idx].agg_fw_ring_id =
6090                 INVALID_HW_RING_ID;
6091         }
6092     }
6093 
6094     /* The completion rings are about to be freed.  After that the
6095      * IRQ doorbell will not work anymore.  So we need to disable
6096      * IRQ here.
6097      */
6098     bnxt_disable_int_sync(bp);
6099 
6100     if (bp->flags & BNXT_FLAG_CHIP_P5)
6101         type = RING_FREE_REQ_RING_TYPE_NQ;
6102     else
6103         type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
6104     for (i = 0; i < bp->cp_nr_rings; i++) {
6105         struct bnxt_napi *bnapi = bp->bnapi[i];
6106         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6107         struct bnxt_ring_struct *ring;
6108         int j;
6109 
6110         for (j = 0; j < 2; j++) {
6111             struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
6112 
6113             if (cpr2) {
6114                 ring = &cpr2->cp_ring_struct;
6115                 if (ring->fw_ring_id == INVALID_HW_RING_ID)
6116                     continue;
6117                 hwrm_ring_free_send_msg(bp, ring,
6118                     RING_FREE_REQ_RING_TYPE_L2_CMPL,
6119                     INVALID_HW_RING_ID);
6120                 ring->fw_ring_id = INVALID_HW_RING_ID;
6121             }
6122         }
6123         ring = &cpr->cp_ring_struct;
6124         if (ring->fw_ring_id != INVALID_HW_RING_ID) {
6125             hwrm_ring_free_send_msg(bp, ring, type,
6126                         INVALID_HW_RING_ID);
6127             ring->fw_ring_id = INVALID_HW_RING_ID;
6128             bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
6129         }
6130     }
6131 }
6132 
6133 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6134                bool shared);
6135 
6136 static int bnxt_hwrm_get_rings(struct bnxt *bp)
6137 {
6138     struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6139     struct hwrm_func_qcfg_output *resp;
6140     struct hwrm_func_qcfg_input *req;
6141     int rc;
6142 
6143     if (bp->hwrm_spec_code < 0x10601)
6144         return 0;
6145 
6146     rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6147     if (rc)
6148         return rc;
6149 
6150     req->fid = cpu_to_le16(0xffff);
6151     resp = hwrm_req_hold(bp, req);
6152     rc = hwrm_req_send(bp, req);
6153     if (rc) {
6154         hwrm_req_drop(bp, req);
6155         return rc;
6156     }
6157 
6158     hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6159     if (BNXT_NEW_RM(bp)) {
6160         u16 cp, stats;
6161 
6162         hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
6163         hw_resc->resv_hw_ring_grps =
6164             le32_to_cpu(resp->alloc_hw_ring_grps);
6165         hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
6166         cp = le16_to_cpu(resp->alloc_cmpl_rings);
6167         stats = le16_to_cpu(resp->alloc_stat_ctx);
6168         hw_resc->resv_irqs = cp;
6169         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6170             int rx = hw_resc->resv_rx_rings;
6171             int tx = hw_resc->resv_tx_rings;
6172 
6173             if (bp->flags & BNXT_FLAG_AGG_RINGS)
6174                 rx >>= 1;
6175             if (cp < (rx + tx)) {
6176                 bnxt_trim_rings(bp, &rx, &tx, cp, false);
6177                 if (bp->flags & BNXT_FLAG_AGG_RINGS)
6178                     rx <<= 1;
6179                 hw_resc->resv_rx_rings = rx;
6180                 hw_resc->resv_tx_rings = tx;
6181             }
6182             hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
6183             hw_resc->resv_hw_ring_grps = rx;
6184         }
6185         hw_resc->resv_cp_rings = cp;
6186         hw_resc->resv_stat_ctxs = stats;
6187     }
6188     hwrm_req_drop(bp, req);
6189     return 0;
6190 }
6191 
6192 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
6193 {
6194     struct hwrm_func_qcfg_output *resp;
6195     struct hwrm_func_qcfg_input *req;
6196     int rc;
6197 
6198     if (bp->hwrm_spec_code < 0x10601)
6199         return 0;
6200 
6201     rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6202     if (rc)
6203         return rc;
6204 
6205     req->fid = cpu_to_le16(fid);
6206     resp = hwrm_req_hold(bp, req);
6207     rc = hwrm_req_send(bp, req);
6208     if (!rc)
6209         *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
6210 
6211     hwrm_req_drop(bp, req);
6212     return rc;
6213 }
6214 
6215 static bool bnxt_rfs_supported(struct bnxt *bp);
6216 
6217 static struct hwrm_func_cfg_input *
6218 __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6219                  int ring_grps, int cp_rings, int stats, int vnics)
6220 {
6221     struct hwrm_func_cfg_input *req;
6222     u32 enables = 0;
6223 
6224     if (hwrm_req_init(bp, req, HWRM_FUNC_CFG))
6225         return NULL;
6226 
6227     req->fid = cpu_to_le16(0xffff);
6228     enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6229     req->num_tx_rings = cpu_to_le16(tx_rings);
6230     if (BNXT_NEW_RM(bp)) {
6231         enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
6232         enables |= stats ? FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6233         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6234             enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
6235             enables |= tx_rings + ring_grps ?
6236                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6237             enables |= rx_rings ?
6238                 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6239         } else {
6240             enables |= cp_rings ?
6241                    FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6242             enables |= ring_grps ?
6243                    FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
6244                    FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6245         }
6246         enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
6247 
6248         req->num_rx_rings = cpu_to_le16(rx_rings);
6249         if (bp->flags & BNXT_FLAG_CHIP_P5) {
6250             req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6251             req->num_msix = cpu_to_le16(cp_rings);
6252             req->num_rsscos_ctxs =
6253                 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6254         } else {
6255             req->num_cmpl_rings = cpu_to_le16(cp_rings);
6256             req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6257             req->num_rsscos_ctxs = cpu_to_le16(1);
6258             if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
6259                 bnxt_rfs_supported(bp))
6260                 req->num_rsscos_ctxs =
6261                     cpu_to_le16(ring_grps + 1);
6262         }
6263         req->num_stat_ctxs = cpu_to_le16(stats);
6264         req->num_vnics = cpu_to_le16(vnics);
6265     }
6266     req->enables = cpu_to_le32(enables);
6267     return req;
6268 }
6269 
6270 static struct hwrm_func_vf_cfg_input *
6271 __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6272                  int ring_grps, int cp_rings, int stats, int vnics)
6273 {
6274     struct hwrm_func_vf_cfg_input *req;
6275     u32 enables = 0;
6276 
6277     if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG))
6278         return NULL;
6279 
6280     enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
6281     enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
6282                   FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
6283     enables |= stats ? FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
6284     if (bp->flags & BNXT_FLAG_CHIP_P5) {
6285         enables |= tx_rings + ring_grps ?
6286                FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6287     } else {
6288         enables |= cp_rings ?
6289                FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS : 0;
6290         enables |= ring_grps ?
6291                FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
6292     }
6293     enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
6294     enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
6295 
6296     req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
6297     req->num_tx_rings = cpu_to_le16(tx_rings);
6298     req->num_rx_rings = cpu_to_le16(rx_rings);
6299     if (bp->flags & BNXT_FLAG_CHIP_P5) {
6300         req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
6301         req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
6302     } else {
6303         req->num_cmpl_rings = cpu_to_le16(cp_rings);
6304         req->num_hw_ring_grps = cpu_to_le16(ring_grps);
6305         req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
6306     }
6307     req->num_stat_ctxs = cpu_to_le16(stats);
6308     req->num_vnics = cpu_to_le16(vnics);
6309 
6310     req->enables = cpu_to_le32(enables);
6311     return req;
6312 }
6313 
6314 static int
6315 bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6316                int ring_grps, int cp_rings, int stats, int vnics)
6317 {
6318     struct hwrm_func_cfg_input *req;
6319     int rc;
6320 
6321     req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6322                        cp_rings, stats, vnics);
6323     if (!req)
6324         return -ENOMEM;
6325 
6326     if (!req->enables) {
6327         hwrm_req_drop(bp, req);
6328         return 0;
6329     }
6330 
6331     rc = hwrm_req_send(bp, req);
6332     if (rc)
6333         return rc;
6334 
6335     if (bp->hwrm_spec_code < 0x10601)
6336         bp->hw_resc.resv_tx_rings = tx_rings;
6337 
6338     return bnxt_hwrm_get_rings(bp);
6339 }
6340 
6341 static int
6342 bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6343                int ring_grps, int cp_rings, int stats, int vnics)
6344 {
6345     struct hwrm_func_vf_cfg_input *req;
6346     int rc;
6347 
6348     if (!BNXT_NEW_RM(bp)) {
6349         bp->hw_resc.resv_tx_rings = tx_rings;
6350         return 0;
6351     }
6352 
6353     req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6354                        cp_rings, stats, vnics);
6355     if (!req)
6356         return -ENOMEM;
6357 
6358     rc = hwrm_req_send(bp, req);
6359     if (rc)
6360         return rc;
6361 
6362     return bnxt_hwrm_get_rings(bp);
6363 }
6364 
6365 static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
6366                    int cp, int stat, int vnic)
6367 {
6368     if (BNXT_PF(bp))
6369         return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat,
6370                           vnic);
6371     else
6372         return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat,
6373                           vnic);
6374 }
6375 
6376 int bnxt_nq_rings_in_use(struct bnxt *bp)
6377 {
6378     int cp = bp->cp_nr_rings;
6379     int ulp_msix, ulp_base;
6380 
6381     ulp_msix = bnxt_get_ulp_msix_num(bp);
6382     if (ulp_msix) {
6383         ulp_base = bnxt_get_ulp_msix_base(bp);
6384         cp += ulp_msix;
6385         if ((ulp_base + ulp_msix) > cp)
6386             cp = ulp_base + ulp_msix;
6387     }
6388     return cp;
6389 }
6390 
6391 static int bnxt_cp_rings_in_use(struct bnxt *bp)
6392 {
6393     int cp;
6394 
6395     if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6396         return bnxt_nq_rings_in_use(bp);
6397 
6398     cp = bp->tx_nr_rings + bp->rx_nr_rings;
6399     return cp;
6400 }
6401 
6402 static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
6403 {
6404     int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
6405     int cp = bp->cp_nr_rings;
6406 
6407     if (!ulp_stat)
6408         return cp;
6409 
6410     if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
6411         return bnxt_get_ulp_msix_base(bp) + ulp_stat;
6412 
6413     return cp + ulp_stat;
6414 }
6415 
6416 /* Check if a default RSS map needs to be setup.  This function is only
6417  * used on older firmware that does not require reserving RX rings.
6418  */
6419 static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
6420 {
6421     struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6422 
6423     /* The RSS map is valid for RX rings set to resv_rx_rings */
6424     if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
6425         hw_resc->resv_rx_rings = bp->rx_nr_rings;
6426         if (!netif_is_rxfh_configured(bp->dev))
6427             bnxt_set_dflt_rss_indir_tbl(bp);
6428     }
6429 }
6430 
6431 static bool bnxt_need_reserve_rings(struct bnxt *bp)
6432 {
6433     struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6434     int cp = bnxt_cp_rings_in_use(bp);
6435     int nq = bnxt_nq_rings_in_use(bp);
6436     int rx = bp->rx_nr_rings, stat;
6437     int vnic = 1, grp = rx;
6438 
6439     if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
6440         bp->hwrm_spec_code >= 0x10601)
6441         return true;
6442 
6443     /* Old firmware does not need RX ring reservations but we still
6444      * need to setup a default RSS map when needed.  With new firmware
6445      * we go through RX ring reservations first and then set up the
6446      * RSS map for the successfully reserved RX rings when needed.
6447      */
6448     if (!BNXT_NEW_RM(bp)) {
6449         bnxt_check_rss_tbl_no_rmgr(bp);
6450         return false;
6451     }
6452     if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6453         vnic = rx + 1;
6454     if (bp->flags & BNXT_FLAG_AGG_RINGS)
6455         rx <<= 1;
6456     stat = bnxt_get_func_stat_ctxs(bp);
6457     if (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
6458         hw_resc->resv_vnics != vnic || hw_resc->resv_stat_ctxs != stat ||
6459         (hw_resc->resv_hw_ring_grps != grp &&
6460          !(bp->flags & BNXT_FLAG_CHIP_P5)))
6461         return true;
6462     if ((bp->flags & BNXT_FLAG_CHIP_P5) && BNXT_PF(bp) &&
6463         hw_resc->resv_irqs != nq)
6464         return true;
6465     return false;
6466 }
6467 
6468 static int __bnxt_reserve_rings(struct bnxt *bp)
6469 {
6470     struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6471     int cp = bnxt_nq_rings_in_use(bp);
6472     int tx = bp->tx_nr_rings;
6473     int rx = bp->rx_nr_rings;
6474     int grp, rx_rings, rc;
6475     int vnic = 1, stat;
6476     bool sh = false;
6477 
6478     if (!bnxt_need_reserve_rings(bp))
6479         return 0;
6480 
6481     if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6482         sh = true;
6483     if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
6484         vnic = rx + 1;
6485     if (bp->flags & BNXT_FLAG_AGG_RINGS)
6486         rx <<= 1;
6487     grp = bp->rx_nr_rings;
6488     stat = bnxt_get_func_stat_ctxs(bp);
6489 
6490     rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic);
6491     if (rc)
6492         return rc;
6493 
6494     tx = hw_resc->resv_tx_rings;
6495     if (BNXT_NEW_RM(bp)) {
6496         rx = hw_resc->resv_rx_rings;
6497         cp = hw_resc->resv_irqs;
6498         grp = hw_resc->resv_hw_ring_grps;
6499         vnic = hw_resc->resv_vnics;
6500         stat = hw_resc->resv_stat_ctxs;
6501     }
6502 
6503     rx_rings = rx;
6504     if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6505         if (rx >= 2) {
6506             rx_rings = rx >> 1;
6507         } else {
6508             if (netif_running(bp->dev))
6509                 return -ENOMEM;
6510 
6511             bp->flags &= ~BNXT_FLAG_AGG_RINGS;
6512             bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
6513             bp->dev->hw_features &= ~NETIF_F_LRO;
6514             bp->dev->features &= ~NETIF_F_LRO;
6515             bnxt_set_ring_params(bp);
6516         }
6517     }
6518     rx_rings = min_t(int, rx_rings, grp);
6519     cp = min_t(int, cp, bp->cp_nr_rings);
6520     if (stat > bnxt_get_ulp_stat_ctxs(bp))
6521         stat -= bnxt_get_ulp_stat_ctxs(bp);
6522     cp = min_t(int, cp, stat);
6523     rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
6524     if (bp->flags & BNXT_FLAG_AGG_RINGS)
6525         rx = rx_rings << 1;
6526     cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
6527     bp->tx_nr_rings = tx;
6528 
6529     /* If we cannot reserve all the RX rings, reset the RSS map only
6530      * if absolutely necessary
6531      */
6532     if (rx_rings != bp->rx_nr_rings) {
6533         netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
6534                 rx_rings, bp->rx_nr_rings);
6535         if (netif_is_rxfh_configured(bp->dev) &&
6536             (bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
6537              bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
6538              bnxt_get_max_rss_ring(bp) >= rx_rings)) {
6539             netdev_warn(bp->dev, "RSS table entries reverting to default\n");
6540             bp->dev->priv_flags &= ~IFF_RXFH_CONFIGURED;
6541         }
6542     }
6543     bp->rx_nr_rings = rx_rings;
6544     bp->cp_nr_rings = cp;
6545 
6546     if (!tx || !rx || !cp || !grp || !vnic || !stat)
6547         return -ENOMEM;
6548 
6549     if (!netif_is_rxfh_configured(bp->dev))
6550         bnxt_set_dflt_rss_indir_tbl(bp);
6551 
6552     return rc;
6553 }
6554 
6555 static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6556                     int ring_grps, int cp_rings, int stats,
6557                     int vnics)
6558 {
6559     struct hwrm_func_vf_cfg_input *req;
6560     u32 flags;
6561 
6562     if (!BNXT_NEW_RM(bp))
6563         return 0;
6564 
6565     req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6566                        cp_rings, stats, vnics);
6567     flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
6568         FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6569         FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6570         FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6571         FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
6572         FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
6573     if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6574         flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6575 
6576     req->flags = cpu_to_le32(flags);
6577     return hwrm_req_send_silent(bp, req);
6578 }
6579 
6580 static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6581                     int ring_grps, int cp_rings, int stats,
6582                     int vnics)
6583 {
6584     struct hwrm_func_cfg_input *req;
6585     u32 flags;
6586 
6587     req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps,
6588                        cp_rings, stats, vnics);
6589     flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
6590     if (BNXT_NEW_RM(bp)) {
6591         flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
6592              FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
6593              FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
6594              FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
6595         if (bp->flags & BNXT_FLAG_CHIP_P5)
6596             flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST |
6597                  FUNC_CFG_REQ_FLAGS_NQ_ASSETS_TEST;
6598         else
6599             flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
6600     }
6601 
6602     req->flags = cpu_to_le32(flags);
6603     return hwrm_req_send_silent(bp, req);
6604 }
6605 
6606 static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
6607                  int ring_grps, int cp_rings, int stats,
6608                  int vnics)
6609 {
6610     if (bp->hwrm_spec_code < 0x10801)
6611         return 0;
6612 
6613     if (BNXT_PF(bp))
6614         return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
6615                         ring_grps, cp_rings, stats,
6616                         vnics);
6617 
6618     return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
6619                     cp_rings, stats, vnics);
6620 }
6621 
6622 static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
6623 {
6624     struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6625     struct hwrm_ring_aggint_qcaps_output *resp;
6626     struct hwrm_ring_aggint_qcaps_input *req;
6627     int rc;
6628 
6629     coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
6630     coal_cap->num_cmpl_dma_aggr_max = 63;
6631     coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
6632     coal_cap->cmpl_aggr_dma_tmr_max = 65535;
6633     coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
6634     coal_cap->int_lat_tmr_min_max = 65535;
6635     coal_cap->int_lat_tmr_max_max = 65535;
6636     coal_cap->num_cmpl_aggr_int_max = 65535;
6637     coal_cap->timer_units = 80;
6638 
6639     if (bp->hwrm_spec_code < 0x10902)
6640         return;
6641 
6642     if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS))
6643         return;
6644 
6645     resp = hwrm_req_hold(bp, req);
6646     rc = hwrm_req_send_silent(bp, req);
6647     if (!rc) {
6648         coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
6649         coal_cap->nq_params = le32_to_cpu(resp->nq_params);
6650         coal_cap->num_cmpl_dma_aggr_max =
6651             le16_to_cpu(resp->num_cmpl_dma_aggr_max);
6652         coal_cap->num_cmpl_dma_aggr_during_int_max =
6653             le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
6654         coal_cap->cmpl_aggr_dma_tmr_max =
6655             le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
6656         coal_cap->cmpl_aggr_dma_tmr_during_int_max =
6657             le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
6658         coal_cap->int_lat_tmr_min_max =
6659             le16_to_cpu(resp->int_lat_tmr_min_max);
6660         coal_cap->int_lat_tmr_max_max =
6661             le16_to_cpu(resp->int_lat_tmr_max_max);
6662         coal_cap->num_cmpl_aggr_int_max =
6663             le16_to_cpu(resp->num_cmpl_aggr_int_max);
6664         coal_cap->timer_units = le16_to_cpu(resp->timer_units);
6665     }
6666     hwrm_req_drop(bp, req);
6667 }
6668 
6669 static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
6670 {
6671     struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6672 
6673     return usec * 1000 / coal_cap->timer_units;
6674 }
6675 
6676 static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
6677     struct bnxt_coal *hw_coal,
6678     struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
6679 {
6680     struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6681     u16 val, tmr, max, flags = hw_coal->flags;
6682     u32 cmpl_params = coal_cap->cmpl_params;
6683 
6684     max = hw_coal->bufs_per_record * 128;
6685     if (hw_coal->budget)
6686         max = hw_coal->bufs_per_record * hw_coal->budget;
6687     max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
6688 
6689     val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
6690     req->num_cmpl_aggr_int = cpu_to_le16(val);
6691 
6692     val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
6693     req->num_cmpl_dma_aggr = cpu_to_le16(val);
6694 
6695     val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
6696               coal_cap->num_cmpl_dma_aggr_during_int_max);
6697     req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
6698 
6699     tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
6700     tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
6701     req->int_lat_tmr_max = cpu_to_le16(tmr);
6702 
6703     /* min timer set to 1/2 of interrupt timer */
6704     if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
6705         val = tmr / 2;
6706         val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
6707         req->int_lat_tmr_min = cpu_to_le16(val);
6708         req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6709     }
6710 
6711     /* buf timer set to 1/4 of interrupt timer */
6712     val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
6713     req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
6714 
6715     if (cmpl_params &
6716         RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
6717         tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
6718         val = clamp_t(u16, tmr, 1,
6719                   coal_cap->cmpl_aggr_dma_tmr_during_int_max);
6720         req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(val);
6721         req->enables |=
6722             cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
6723     }
6724 
6725     if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
6726         hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
6727         flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
6728     req->flags = cpu_to_le16(flags);
6729     req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
6730 }
6731 
6732 static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
6733                    struct bnxt_coal *hw_coal)
6734 {
6735     struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req;
6736     struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6737     struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
6738     u32 nq_params = coal_cap->nq_params;
6739     u16 tmr;
6740     int rc;
6741 
6742     if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
6743         return 0;
6744 
6745     rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6746     if (rc)
6747         return rc;
6748 
6749     req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
6750     req->flags =
6751         cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
6752 
6753     tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
6754     tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
6755     req->int_lat_tmr_min = cpu_to_le16(tmr);
6756     req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
6757     return hwrm_req_send(bp, req);
6758 }
6759 
6760 int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
6761 {
6762     struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx;
6763     struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6764     struct bnxt_coal coal;
6765     int rc;
6766 
6767     /* Tick values in micro seconds.
6768      * 1 coal_buf x bufs_per_record = 1 completion record.
6769      */
6770     memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
6771 
6772     coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
6773     coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
6774 
6775     if (!bnapi->rx_ring)
6776         return -ENODEV;
6777 
6778     rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6779     if (rc)
6780         return rc;
6781 
6782     bnxt_hwrm_set_coal_params(bp, &coal, req_rx);
6783 
6784     req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
6785 
6786     return hwrm_req_send(bp, req_rx);
6787 }
6788 
6789 int bnxt_hwrm_set_coal(struct bnxt *bp)
6790 {
6791     struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx,
6792                                *req;
6793     int i, rc;
6794 
6795     rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6796     if (rc)
6797         return rc;
6798 
6799     rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
6800     if (rc) {
6801         hwrm_req_drop(bp, req_rx);
6802         return rc;
6803     }
6804 
6805     bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx);
6806     bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx);
6807 
6808     hwrm_req_hold(bp, req_rx);
6809     hwrm_req_hold(bp, req_tx);
6810     for (i = 0; i < bp->cp_nr_rings; i++) {
6811         struct bnxt_napi *bnapi = bp->bnapi[i];
6812         struct bnxt_coal *hw_coal;
6813         u16 ring_id;
6814 
6815         req = req_rx;
6816         if (!bnapi->rx_ring) {
6817             ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6818             req = req_tx;
6819         } else {
6820             ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
6821         }
6822         req->ring_id = cpu_to_le16(ring_id);
6823 
6824         rc = hwrm_req_send(bp, req);
6825         if (rc)
6826             break;
6827 
6828         if (!(bp->flags & BNXT_FLAG_CHIP_P5))
6829             continue;
6830 
6831         if (bnapi->rx_ring && bnapi->tx_ring) {
6832             req = req_tx;
6833             ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
6834             req->ring_id = cpu_to_le16(ring_id);
6835             rc = hwrm_req_send(bp, req);
6836             if (rc)
6837                 break;
6838         }
6839         if (bnapi->rx_ring)
6840             hw_coal = &bp->rx_coal;
6841         else
6842             hw_coal = &bp->tx_coal;
6843         __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
6844     }
6845     hwrm_req_drop(bp, req_rx);
6846     hwrm_req_drop(bp, req_tx);
6847     return rc;
6848 }
6849 
6850 static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
6851 {
6852     struct hwrm_stat_ctx_clr_stats_input *req0 = NULL;
6853     struct hwrm_stat_ctx_free_input *req;
6854     int i;
6855 
6856     if (!bp->bnapi)
6857         return;
6858 
6859     if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6860         return;
6861 
6862     if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE))
6863         return;
6864     if (BNXT_FW_MAJ(bp) <= 20) {
6865         if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) {
6866             hwrm_req_drop(bp, req);
6867             return;
6868         }
6869         hwrm_req_hold(bp, req0);
6870     }
6871     hwrm_req_hold(bp, req);
6872     for (i = 0; i < bp->cp_nr_rings; i++) {
6873         struct bnxt_napi *bnapi = bp->bnapi[i];
6874         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6875 
6876         if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
6877             req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
6878             if (req0) {
6879                 req0->stat_ctx_id = req->stat_ctx_id;
6880                 hwrm_req_send(bp, req0);
6881             }
6882             hwrm_req_send(bp, req);
6883 
6884             cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
6885         }
6886     }
6887     hwrm_req_drop(bp, req);
6888     if (req0)
6889         hwrm_req_drop(bp, req0);
6890 }
6891 
6892 static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
6893 {
6894     struct hwrm_stat_ctx_alloc_output *resp;
6895     struct hwrm_stat_ctx_alloc_input *req;
6896     int rc, i;
6897 
6898     if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6899         return 0;
6900 
6901     rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC);
6902     if (rc)
6903         return rc;
6904 
6905     req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size);
6906     req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
6907 
6908     resp = hwrm_req_hold(bp, req);
6909     for (i = 0; i < bp->cp_nr_rings; i++) {
6910         struct bnxt_napi *bnapi = bp->bnapi[i];
6911         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
6912 
6913         req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map);
6914 
6915         rc = hwrm_req_send(bp, req);
6916         if (rc)
6917             break;
6918 
6919         cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
6920 
6921         bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
6922     }
6923     hwrm_req_drop(bp, req);
6924     return rc;
6925 }
6926 
6927 static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
6928 {
6929     struct hwrm_func_qcfg_output *resp;
6930     struct hwrm_func_qcfg_input *req;
6931     u32 min_db_offset = 0;
6932     u16 flags;
6933     int rc;
6934 
6935     rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG);
6936     if (rc)
6937         return rc;
6938 
6939     req->fid = cpu_to_le16(0xffff);
6940     resp = hwrm_req_hold(bp, req);
6941     rc = hwrm_req_send(bp, req);
6942     if (rc)
6943         goto func_qcfg_exit;
6944 
6945 #ifdef CONFIG_BNXT_SRIOV
6946     if (BNXT_VF(bp)) {
6947         struct bnxt_vf_info *vf = &bp->vf;
6948 
6949         vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
6950     } else {
6951         bp->pf.registered_vfs = le16_to_cpu(resp->registered_vfs);
6952     }
6953 #endif
6954     flags = le16_to_cpu(resp->flags);
6955     if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
6956              FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
6957         bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
6958         if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
6959             bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
6960     }
6961     if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
6962         bp->flags |= BNXT_FLAG_MULTI_HOST;
6963     if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
6964         bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
6965 
6966     switch (resp->port_partition_type) {
6967     case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
6968     case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
6969     case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
6970         bp->port_partition_type = resp->port_partition_type;
6971         break;
6972     }
6973     if (bp->hwrm_spec_code < 0x10707 ||
6974         resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
6975         bp->br_mode = BRIDGE_MODE_VEB;
6976     else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
6977         bp->br_mode = BRIDGE_MODE_VEPA;
6978     else
6979         bp->br_mode = BRIDGE_MODE_UNDEF;
6980 
6981     bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
6982     if (!bp->max_mtu)
6983         bp->max_mtu = BNXT_MAX_MTU;
6984 
6985     if (bp->db_size)
6986         goto func_qcfg_exit;
6987 
6988     if (bp->flags & BNXT_FLAG_CHIP_P5) {
6989         if (BNXT_PF(bp))
6990             min_db_offset = DB_PF_OFFSET_P5;
6991         else
6992             min_db_offset = DB_VF_OFFSET_P5;
6993     }
6994     bp->db_size = PAGE_ALIGN(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
6995                  1024);
6996     if (!bp->db_size || bp->db_size > pci_resource_len(bp->pdev, 2) ||
6997         bp->db_size <= min_db_offset)
6998         bp->db_size = pci_resource_len(bp->pdev, 2);
6999 
7000 func_qcfg_exit:
7001     hwrm_req_drop(bp, req);
7002     return rc;
7003 }
7004 
7005 static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
7006             struct hwrm_func_backing_store_qcaps_output *resp)
7007 {
7008     struct bnxt_mem_init *mem_init;
7009     u16 init_mask;
7010     u8 init_val;
7011     u8 *offset;
7012     int i;
7013 
7014     init_val = resp->ctx_kind_initializer;
7015     init_mask = le16_to_cpu(resp->ctx_init_mask);
7016     offset = &resp->qp_init_offset;
7017     mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7018     for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
7019         mem_init->init_val = init_val;
7020         mem_init->offset = BNXT_MEM_INVALID_OFFSET;
7021         if (!init_mask)
7022             continue;
7023         if (i == BNXT_CTX_MEM_INIT_STAT)
7024             offset = &resp->stat_init_offset;
7025         if (init_mask & (1 << i))
7026             mem_init->offset = *offset * 4;
7027         else
7028             mem_init->init_val = 0;
7029     }
7030     ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
7031     ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
7032     ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
7033     ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
7034     ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
7035     ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
7036 }
7037 
7038 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
7039 {
7040     struct hwrm_func_backing_store_qcaps_output *resp;
7041     struct hwrm_func_backing_store_qcaps_input *req;
7042     int rc;
7043 
7044     if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
7045         return 0;
7046 
7047     rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS);
7048     if (rc)
7049         return rc;
7050 
7051     resp = hwrm_req_hold(bp, req);
7052     rc = hwrm_req_send_silent(bp, req);
7053     if (!rc) {
7054         struct bnxt_ctx_pg_info *ctx_pg;
7055         struct bnxt_ctx_mem_info *ctx;
7056         int i, tqm_rings;
7057 
7058         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
7059         if (!ctx) {
7060             rc = -ENOMEM;
7061             goto ctx_err;
7062         }
7063         ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
7064         ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
7065         ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
7066         ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
7067         ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
7068         ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
7069         ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
7070         ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
7071         ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
7072         ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
7073         ctx->vnic_max_vnic_entries =
7074             le16_to_cpu(resp->vnic_max_vnic_entries);
7075         ctx->vnic_max_ring_table_entries =
7076             le16_to_cpu(resp->vnic_max_ring_table_entries);
7077         ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
7078         ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
7079         ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
7080         ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
7081         ctx->tqm_min_entries_per_ring =
7082             le32_to_cpu(resp->tqm_min_entries_per_ring);
7083         ctx->tqm_max_entries_per_ring =
7084             le32_to_cpu(resp->tqm_max_entries_per_ring);
7085         ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
7086         if (!ctx->tqm_entries_multiple)
7087             ctx->tqm_entries_multiple = 1;
7088         ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
7089         ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
7090         ctx->mrav_num_entries_units =
7091             le16_to_cpu(resp->mrav_num_entries_units);
7092         ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
7093         ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
7094 
7095         bnxt_init_ctx_initializer(ctx, resp);
7096 
7097         ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
7098         if (!ctx->tqm_fp_rings_count)
7099             ctx->tqm_fp_rings_count = bp->max_q;
7100         else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
7101             ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
7102 
7103         tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
7104         ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
7105         if (!ctx_pg) {
7106             kfree(ctx);
7107             rc = -ENOMEM;
7108             goto ctx_err;
7109         }
7110         for (i = 0; i < tqm_rings; i++, ctx_pg++)
7111             ctx->tqm_mem[i] = ctx_pg;
7112         bp->ctx = ctx;
7113     } else {
7114         rc = 0;
7115     }
7116 ctx_err:
7117     hwrm_req_drop(bp, req);
7118     return rc;
7119 }
7120 
7121 static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
7122                   __le64 *pg_dir)
7123 {
7124     if (!rmem->nr_pages)
7125         return;
7126 
7127     BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
7128     if (rmem->depth >= 1) {
7129         if (rmem->depth == 2)
7130             *pg_attr |= 2;
7131         else
7132             *pg_attr |= 1;
7133         *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
7134     } else {
7135         *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
7136     }
7137 }
7138 
7139 #define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES         \
7140     (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP |        \
7141      FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ |       \
7142      FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ |        \
7143      FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC |      \
7144      FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
7145 
7146 static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
7147 {
7148     struct hwrm_func_backing_store_cfg_input *req;
7149     struct bnxt_ctx_mem_info *ctx = bp->ctx;
7150     struct bnxt_ctx_pg_info *ctx_pg;
7151     void **__req = (void **)&req;
7152     u32 req_len = sizeof(*req);
7153     __le32 *num_entries;
7154     __le64 *pg_dir;
7155     u32 flags = 0;
7156     u8 *pg_attr;
7157     u32 ena;
7158     int rc;
7159     int i;
7160 
7161     if (!ctx)
7162         return 0;
7163 
7164     if (req_len > bp->hwrm_max_ext_req_len)
7165         req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
7166     rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len);
7167     if (rc)
7168         return rc;
7169 
7170     req->enables = cpu_to_le32(enables);
7171     if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
7172         ctx_pg = &ctx->qp_mem;
7173         req->qp_num_entries = cpu_to_le32(ctx_pg->entries);
7174         req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
7175         req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
7176         req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
7177         bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7178                       &req->qpc_pg_size_qpc_lvl,
7179                       &req->qpc_page_dir);
7180     }
7181     if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
7182         ctx_pg = &ctx->srq_mem;
7183         req->srq_num_entries = cpu_to_le32(ctx_pg->entries);
7184         req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
7185         req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
7186         bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7187                       &req->srq_pg_size_srq_lvl,
7188                       &req->srq_page_dir);
7189     }
7190     if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
7191         ctx_pg = &ctx->cq_mem;
7192         req->cq_num_entries = cpu_to_le32(ctx_pg->entries);
7193         req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
7194         req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
7195         bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7196                       &req->cq_pg_size_cq_lvl,
7197                       &req->cq_page_dir);
7198     }
7199     if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
7200         ctx_pg = &ctx->vnic_mem;
7201         req->vnic_num_vnic_entries =
7202             cpu_to_le16(ctx->vnic_max_vnic_entries);
7203         req->vnic_num_ring_table_entries =
7204             cpu_to_le16(ctx->vnic_max_ring_table_entries);
7205         req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
7206         bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7207                       &req->vnic_pg_size_vnic_lvl,
7208                       &req->vnic_page_dir);
7209     }
7210     if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
7211         ctx_pg = &ctx->stat_mem;
7212         req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
7213         req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
7214         bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7215                       &req->stat_pg_size_stat_lvl,
7216                       &req->stat_page_dir);
7217     }
7218     if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) {
7219         ctx_pg = &ctx->mrav_mem;
7220         req->mrav_num_entries = cpu_to_le32(ctx_pg->entries);
7221         if (ctx->mrav_num_entries_units)
7222             flags |=
7223             FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT;
7224         req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size);
7225         bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7226                       &req->mrav_pg_size_mrav_lvl,
7227                       &req->mrav_page_dir);
7228     }
7229     if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) {
7230         ctx_pg = &ctx->tim_mem;
7231         req->tim_num_entries = cpu_to_le32(ctx_pg->entries);
7232         req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size);
7233         bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
7234                       &req->tim_pg_size_tim_lvl,
7235                       &req->tim_page_dir);
7236     }
7237     for (i = 0, num_entries = &req->tqm_sp_num_entries,
7238          pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl,
7239          pg_dir = &req->tqm_sp_page_dir,
7240          ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
7241          i < BNXT_MAX_TQM_RINGS;
7242          i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
7243         if (!(enables & ena))
7244             continue;
7245 
7246         req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
7247         ctx_pg = ctx->tqm_mem[i];
7248         *num_entries = cpu_to_le32(ctx_pg->entries);
7249         bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
7250     }
7251     req->flags = cpu_to_le32(flags);
7252     return hwrm_req_send(bp, req);
7253 }
7254 
7255 static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
7256                   struct bnxt_ctx_pg_info *ctx_pg)
7257 {
7258     struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7259 
7260     rmem->page_size = BNXT_PAGE_SIZE;
7261     rmem->pg_arr = ctx_pg->ctx_pg_arr;
7262     rmem->dma_arr = ctx_pg->ctx_dma_arr;
7263     rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
7264     if (rmem->depth >= 1)
7265         rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG;
7266     return bnxt_alloc_ring(bp, rmem);
7267 }
7268 
7269 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
7270                   struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
7271                   u8 depth, struct bnxt_mem_init *mem_init)
7272 {
7273     struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7274     int rc;
7275 
7276     if (!mem_size)
7277         return -EINVAL;
7278 
7279     ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7280     if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
7281         ctx_pg->nr_pages = 0;
7282         return -EINVAL;
7283     }
7284     if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) {
7285         int nr_tbls, i;
7286 
7287         rmem->depth = 2;
7288         ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg),
7289                          GFP_KERNEL);
7290         if (!ctx_pg->ctx_pg_tbl)
7291             return -ENOMEM;
7292         nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
7293         rmem->nr_pages = nr_tbls;
7294         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7295         if (rc)
7296             return rc;
7297         for (i = 0; i < nr_tbls; i++) {
7298             struct bnxt_ctx_pg_info *pg_tbl;
7299 
7300             pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
7301             if (!pg_tbl)
7302                 return -ENOMEM;
7303             ctx_pg->ctx_pg_tbl[i] = pg_tbl;
7304             rmem = &pg_tbl->ring_mem;
7305             rmem->pg_tbl = ctx_pg->ctx_pg_arr[i];
7306             rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
7307             rmem->depth = 1;
7308             rmem->nr_pages = MAX_CTX_PAGES;
7309             rmem->mem_init = mem_init;
7310             if (i == (nr_tbls - 1)) {
7311                 int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
7312 
7313                 if (rem)
7314                     rmem->nr_pages = rem;
7315             }
7316             rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl);
7317             if (rc)
7318                 break;
7319         }
7320     } else {
7321         rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
7322         if (rmem->nr_pages > 1 || depth)
7323             rmem->depth = 1;
7324         rmem->mem_init = mem_init;
7325         rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
7326     }
7327     return rc;
7328 }
7329 
7330 static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
7331                   struct bnxt_ctx_pg_info *ctx_pg)
7332 {
7333     struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
7334 
7335     if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES ||
7336         ctx_pg->ctx_pg_tbl) {
7337         int i, nr_tbls = rmem->nr_pages;
7338 
7339         for (i = 0; i < nr_tbls; i++) {
7340             struct bnxt_ctx_pg_info *pg_tbl;
7341             struct bnxt_ring_mem_info *rmem2;
7342 
7343             pg_tbl = ctx_pg->ctx_pg_tbl[i];
7344             if (!pg_tbl)
7345                 continue;
7346             rmem2 = &pg_tbl->ring_mem;
7347             bnxt_free_ring(bp, rmem2);
7348             ctx_pg->ctx_pg_arr[i] = NULL;
7349             kfree(pg_tbl);
7350             ctx_pg->ctx_pg_tbl[i] = NULL;
7351         }
7352         kfree(ctx_pg->ctx_pg_tbl);
7353         ctx_pg->ctx_pg_tbl = NULL;
7354     }
7355     bnxt_free_ring(bp, rmem);
7356     ctx_pg->nr_pages = 0;
7357 }
7358 
7359 void bnxt_free_ctx_mem(struct bnxt *bp)
7360 {
7361     struct bnxt_ctx_mem_info *ctx = bp->ctx;
7362     int i;
7363 
7364     if (!ctx)
7365         return;
7366 
7367     if (ctx->tqm_mem[0]) {
7368         for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++)
7369             bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]);
7370         kfree(ctx->tqm_mem[0]);
7371         ctx->tqm_mem[0] = NULL;
7372     }
7373 
7374     bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem);
7375     bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem);
7376     bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem);
7377     bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem);
7378     bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem);
7379     bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem);
7380     bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem);
7381     ctx->flags &= ~BNXT_CTX_FLAG_INITED;
7382 }
7383 
7384 static int bnxt_alloc_ctx_mem(struct bnxt *bp)
7385 {
7386     struct bnxt_ctx_pg_info *ctx_pg;
7387     struct bnxt_ctx_mem_info *ctx;
7388     struct bnxt_mem_init *init;
7389     u32 mem_size, ena, entries;
7390     u32 entries_sp, min;
7391     u32 num_mr, num_ah;
7392     u32 extra_srqs = 0;
7393     u32 extra_qps = 0;
7394     u8 pg_lvl = 1;
7395     int i, rc;
7396 
7397     rc = bnxt_hwrm_func_backing_store_qcaps(bp);
7398     if (rc) {
7399         netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
7400                rc);
7401         return rc;
7402     }
7403     ctx = bp->ctx;
7404     if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
7405         return 0;
7406 
7407     if ((bp->flags & BNXT_FLAG_ROCE_CAP) && !is_kdump_kernel()) {
7408         pg_lvl = 2;
7409         extra_qps = 65536;
7410         extra_srqs = 8192;
7411     }
7412 
7413     ctx_pg = &ctx->qp_mem;
7414     ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
7415               extra_qps;
7416     if (ctx->qp_entry_size) {
7417         mem_size = ctx->qp_entry_size * ctx_pg->entries;
7418         init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
7419         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7420         if (rc)
7421             return rc;
7422     }
7423 
7424     ctx_pg = &ctx->srq_mem;
7425     ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
7426     if (ctx->srq_entry_size) {
7427         mem_size = ctx->srq_entry_size * ctx_pg->entries;
7428         init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
7429         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7430         if (rc)
7431             return rc;
7432     }
7433 
7434     ctx_pg = &ctx->cq_mem;
7435     ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
7436     if (ctx->cq_entry_size) {
7437         mem_size = ctx->cq_entry_size * ctx_pg->entries;
7438         init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
7439         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
7440         if (rc)
7441             return rc;
7442     }
7443 
7444     ctx_pg = &ctx->vnic_mem;
7445     ctx_pg->entries = ctx->vnic_max_vnic_entries +
7446               ctx->vnic_max_ring_table_entries;
7447     if (ctx->vnic_entry_size) {
7448         mem_size = ctx->vnic_entry_size * ctx_pg->entries;
7449         init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
7450         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7451         if (rc)
7452             return rc;
7453     }
7454 
7455     ctx_pg = &ctx->stat_mem;
7456     ctx_pg->entries = ctx->stat_max_entries;
7457     if (ctx->stat_entry_size) {
7458         mem_size = ctx->stat_entry_size * ctx_pg->entries;
7459         init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
7460         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
7461         if (rc)
7462             return rc;
7463     }
7464 
7465     ena = 0;
7466     if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
7467         goto skip_rdma;
7468 
7469     ctx_pg = &ctx->mrav_mem;
7470     /* 128K extra is needed to accommodate static AH context
7471      * allocation by f/w.
7472      */
7473     num_mr = 1024 * 256;
7474     num_ah = 1024 * 128;
7475     ctx_pg->entries = num_mr + num_ah;
7476     if (ctx->mrav_entry_size) {
7477         mem_size = ctx->mrav_entry_size * ctx_pg->entries;
7478         init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
7479         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
7480         if (rc)
7481             return rc;
7482     }
7483     ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
7484     if (ctx->mrav_num_entries_units)
7485         ctx_pg->entries =
7486             ((num_mr / ctx->mrav_num_entries_units) << 16) |
7487              (num_ah / ctx->mrav_num_entries_units);
7488 
7489     ctx_pg = &ctx->tim_mem;
7490     ctx_pg->entries = ctx->qp_mem.entries;
7491     if (ctx->tim_entry_size) {
7492         mem_size = ctx->tim_entry_size * ctx_pg->entries;
7493         rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
7494         if (rc)
7495             return rc;
7496     }
7497     ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
7498 
7499 skip_rdma:
7500     min = ctx->tqm_min_entries_per_ring;
7501     entries_sp = ctx->vnic_max_vnic_entries + ctx->qp_max_l2_entries +
7502              2 * (extra_qps + ctx->qp_min_qp1_entries) + min;
7503     entries_sp = roundup(entries_sp, ctx->tqm_entries_multiple);
7504     entries = ctx->qp_max_l2_entries + 2 * (extra_qps + ctx->qp_min_qp1_entries);
7505     entries = roundup(entries, ctx->tqm_entries_multiple);
7506     entries = clamp_t(u32, entries, min, ctx->tqm_max_entries_per_ring);
7507     for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
7508         ctx_pg = ctx->tqm_mem[i];
7509         ctx_pg->entries = i ? entries : entries_sp;
7510         if (ctx->tqm_entry_size) {
7511             mem_size = ctx->tqm_entry_size * ctx_pg->entries;
7512             rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
7513                             NULL);
7514             if (rc)
7515                 return rc;
7516         }
7517         ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
7518     }
7519     ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
7520     rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
7521     if (rc) {
7522         netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
7523                rc);
7524         return rc;
7525     }
7526     ctx->flags |= BNXT_CTX_FLAG_INITED;
7527     return 0;
7528 }
7529 
7530 int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
7531 {
7532     struct hwrm_func_resource_qcaps_output *resp;
7533     struct hwrm_func_resource_qcaps_input *req;
7534     struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7535     int rc;
7536 
7537     rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS);
7538     if (rc)
7539         return rc;
7540 
7541     req->fid = cpu_to_le16(0xffff);
7542     resp = hwrm_req_hold(bp, req);
7543     rc = hwrm_req_send_silent(bp, req);
7544     if (rc)
7545         goto hwrm_func_resc_qcaps_exit;
7546 
7547     hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
7548     if (!all)
7549         goto hwrm_func_resc_qcaps_exit;
7550 
7551     hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
7552     hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7553     hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
7554     hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7555     hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
7556     hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7557     hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
7558     hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7559     hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
7560     hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
7561     hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
7562     hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7563     hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
7564     hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7565     hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
7566     hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7567 
7568     if (bp->flags & BNXT_FLAG_CHIP_P5) {
7569         u16 max_msix = le16_to_cpu(resp->max_msix);
7570 
7571         hw_resc->max_nqs = max_msix;
7572         hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
7573     }
7574 
7575     if (BNXT_PF(bp)) {
7576         struct bnxt_pf_info *pf = &bp->pf;
7577 
7578         pf->vf_resv_strategy =
7579             le16_to_cpu(resp->vf_reservation_strategy);
7580         if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
7581             pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
7582     }
7583 hwrm_func_resc_qcaps_exit:
7584     hwrm_req_drop(bp, req);
7585     return rc;
7586 }
7587 
7588 static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
7589 {
7590     struct hwrm_port_mac_ptp_qcfg_output *resp;
7591     struct hwrm_port_mac_ptp_qcfg_input *req;
7592     struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
7593     bool phc_cfg;
7594     u8 flags;
7595     int rc;
7596 
7597     if (bp->hwrm_spec_code < 0x10801) {
7598         rc = -ENODEV;
7599         goto no_ptp;
7600     }
7601 
7602     rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG);
7603     if (rc)
7604         goto no_ptp;
7605 
7606     req->port_id = cpu_to_le16(bp->pf.port_id);
7607     resp = hwrm_req_hold(bp, req);
7608     rc = hwrm_req_send(bp, req);
7609     if (rc)
7610         goto exit;
7611 
7612     flags = resp->flags;
7613     if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
7614         rc = -ENODEV;
7615         goto exit;
7616     }
7617     if (!ptp) {
7618         ptp = kzalloc(sizeof(*ptp), GFP_KERNEL);
7619         if (!ptp) {
7620             rc = -ENOMEM;
7621             goto exit;
7622         }
7623         ptp->bp = bp;
7624         bp->ptp_cfg = ptp;
7625     }
7626     if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
7627         ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
7628         ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
7629     } else if (bp->flags & BNXT_FLAG_CHIP_P5) {
7630         ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
7631         ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
7632     } else {
7633         rc = -ENODEV;
7634         goto exit;
7635     }
7636     phc_cfg = (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_RTC_CONFIGURED) != 0;
7637     rc = bnxt_ptp_init(bp, phc_cfg);
7638     if (rc)
7639         netdev_warn(bp->dev, "PTP initialization failed.\n");
7640 exit:
7641     hwrm_req_drop(bp, req);
7642     if (!rc)
7643         return 0;
7644 
7645 no_ptp:
7646     bnxt_ptp_clear(bp);
7647     kfree(ptp);
7648     bp->ptp_cfg = NULL;
7649     return rc;
7650 }
7651 
7652 static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
7653 {
7654     struct hwrm_func_qcaps_output *resp;
7655     struct hwrm_func_qcaps_input *req;
7656     struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7657     u32 flags, flags_ext, flags_ext2;
7658     int rc;
7659 
7660     rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS);
7661     if (rc)
7662         return rc;
7663 
7664     req->fid = cpu_to_le16(0xffff);
7665     resp = hwrm_req_hold(bp, req);
7666     rc = hwrm_req_send(bp, req);
7667     if (rc)
7668         goto hwrm_func_qcaps_exit;
7669 
7670     flags = le32_to_cpu(resp->flags);
7671     if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
7672         bp->flags |= BNXT_FLAG_ROCEV1_CAP;
7673     if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
7674         bp->flags |= BNXT_FLAG_ROCEV2_CAP;
7675     if (flags & FUNC_QCAPS_RESP_FLAGS_PCIE_STATS_SUPPORTED)
7676         bp->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
7677     if (flags & FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE)
7678         bp->fw_cap |= BNXT_FW_CAP_HOT_RESET;
7679     if (flags & FUNC_QCAPS_RESP_FLAGS_EXT_STATS_SUPPORTED)
7680         bp->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
7681     if (flags &  FUNC_QCAPS_RESP_FLAGS_ERROR_RECOVERY_CAPABLE)
7682         bp->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
7683     if (flags & FUNC_QCAPS_RESP_FLAGS_ERR_RECOVER_RELOAD)
7684         bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
7685     if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
7686         bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
7687     if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
7688         bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
7689 
7690     flags_ext = le32_to_cpu(resp->flags_ext);
7691     if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
7692         bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
7693     if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
7694         bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
7695     if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
7696         bp->fw_cap |= BNXT_FW_CAP_PTP_RTC;
7697     if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
7698         bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
7699     if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
7700         bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
7701 
7702     flags_ext2 = le32_to_cpu(resp->flags_ext2);
7703     if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
7704         bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
7705 
7706     bp->tx_push_thresh = 0;
7707     if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
7708         BNXT_FW_MAJ(bp) > 217)
7709         bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
7710 
7711     hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
7712     hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
7713     hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
7714     hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
7715     hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
7716     if (!hw_resc->max_hw_ring_grps)
7717         hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
7718     hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
7719     hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
7720     hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
7721 
7722     if (BNXT_PF(bp)) {
7723         struct bnxt_pf_info *pf = &bp->pf;
7724 
7725         pf->fw_fid = le16_to_cpu(resp->fid);
7726         pf->port_id = le16_to_cpu(resp->port_id);
7727         memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
7728         pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
7729         pf->max_vfs = le16_to_cpu(resp->max_vfs);
7730         pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
7731         pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
7732         pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
7733         pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
7734         pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
7735         pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
7736         bp->flags &= ~BNXT_FLAG_WOL_CAP;
7737         if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
7738             bp->flags |= BNXT_FLAG_WOL_CAP;
7739         if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) {
7740             __bnxt_hwrm_ptp_qcfg(bp);
7741         } else {
7742             bnxt_ptp_clear(bp);
7743             kfree(bp->ptp_cfg);
7744             bp->ptp_cfg = NULL;
7745         }
7746     } else {
7747 #ifdef CONFIG_BNXT_SRIOV
7748         struct bnxt_vf_info *vf = &bp->vf;
7749 
7750         vf->fw_fid = le16_to_cpu(resp->fid);
7751         memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
7752 #endif
7753     }
7754 
7755 hwrm_func_qcaps_exit:
7756     hwrm_req_drop(bp, req);
7757     return rc;
7758 }
7759 
7760 static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
7761 {
7762     struct hwrm_dbg_qcaps_output *resp;
7763     struct hwrm_dbg_qcaps_input *req;
7764     int rc;
7765 
7766     bp->fw_dbg_cap = 0;
7767     if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
7768         return;
7769 
7770     rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
7771     if (rc)
7772         return;
7773 
7774     req->fid = cpu_to_le16(0xffff);
7775     resp = hwrm_req_hold(bp, req);
7776     rc = hwrm_req_send(bp, req);
7777     if (rc)
7778         goto hwrm_dbg_qcaps_exit;
7779 
7780     bp->fw_dbg_cap = le32_to_cpu(resp->flags);
7781 
7782 hwrm_dbg_qcaps_exit:
7783     hwrm_req_drop(bp, req);
7784 }
7785 
7786 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
7787 
7788 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
7789 {
7790     int rc;
7791 
7792     rc = __bnxt_hwrm_func_qcaps(bp);
7793     if (rc)
7794         return rc;
7795 
7796     bnxt_hwrm_dbg_qcaps(bp);
7797 
7798     rc = bnxt_hwrm_queue_qportcfg(bp);
7799     if (rc) {
7800         netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
7801         return rc;
7802     }
7803     if (bp->hwrm_spec_code >= 0x10803) {
7804         rc = bnxt_alloc_ctx_mem(bp);
7805         if (rc)
7806             return rc;
7807         rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7808         if (!rc)
7809             bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
7810     }
7811     return 0;
7812 }
7813 
7814 static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp)
7815 {
7816     struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp;
7817     struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req;
7818     u32 flags;
7819     int rc;
7820 
7821     if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW))
7822         return 0;
7823 
7824     rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS);
7825     if (rc)
7826         return rc;
7827 
7828     resp = hwrm_req_hold(bp, req);
7829     rc = hwrm_req_send(bp, req);
7830     if (rc)
7831         goto hwrm_cfa_adv_qcaps_exit;
7832 
7833     flags = le32_to_cpu(resp->flags);
7834     if (flags &
7835         CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_RFS_RING_TBL_IDX_V2_SUPPORTED)
7836         bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2;
7837 
7838 hwrm_cfa_adv_qcaps_exit:
7839     hwrm_req_drop(bp, req);
7840     return rc;
7841 }
7842 
7843 static int __bnxt_alloc_fw_health(struct bnxt *bp)
7844 {
7845     if (bp->fw_health)
7846         return 0;
7847 
7848     bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
7849     if (!bp->fw_health)
7850         return -ENOMEM;
7851 
7852     mutex_init(&bp->fw_health->lock);
7853     return 0;
7854 }
7855 
7856 static int bnxt_alloc_fw_health(struct bnxt *bp)
7857 {
7858     int rc;
7859 
7860     if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
7861         !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7862         return 0;
7863 
7864     rc = __bnxt_alloc_fw_health(bp);
7865     if (rc) {
7866         bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
7867         bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
7868         return rc;
7869     }
7870 
7871     return 0;
7872 }
7873 
7874 static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
7875 {
7876     writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
7877                      BNXT_GRCPF_REG_WINDOW_BASE_OUT +
7878                      BNXT_FW_HEALTH_WIN_MAP_OFF);
7879 }
7880 
7881 static void bnxt_inv_fw_health_reg(struct bnxt *bp)
7882 {
7883     struct bnxt_fw_health *fw_health = bp->fw_health;
7884     u32 reg_type;
7885 
7886     if (!fw_health)
7887         return;
7888 
7889     reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
7890     if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7891         fw_health->status_reliable = false;
7892 
7893     reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
7894     if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
7895         fw_health->resets_reliable = false;
7896 }
7897 
7898 static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
7899 {
7900     void __iomem *hs;
7901     u32 status_loc;
7902     u32 reg_type;
7903     u32 sig;
7904 
7905     if (bp->fw_health)
7906         bp->fw_health->status_reliable = false;
7907 
7908     __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
7909     hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
7910 
7911     sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
7912     if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
7913         if (!bp->chip_num) {
7914             __bnxt_map_fw_health_reg(bp, BNXT_GRC_REG_BASE);
7915             bp->chip_num = readl(bp->bar0 +
7916                          BNXT_FW_HEALTH_WIN_BASE +
7917                          BNXT_GRC_REG_CHIP_NUM);
7918         }
7919         if (!BNXT_CHIP_P5(bp))
7920             return;
7921 
7922         status_loc = BNXT_GRC_REG_STATUS_P5 |
7923                  BNXT_FW_HEALTH_REG_TYPE_BAR0;
7924     } else {
7925         status_loc = readl(hs + offsetof(struct hcomm_status,
7926                          fw_status_loc));
7927     }
7928 
7929     if (__bnxt_alloc_fw_health(bp)) {
7930         netdev_warn(bp->dev, "no memory for firmware status checks\n");
7931         return;
7932     }
7933 
7934     bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
7935     reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
7936     if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
7937         __bnxt_map_fw_health_reg(bp, status_loc);
7938         bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
7939             BNXT_FW_HEALTH_WIN_OFF(status_loc);
7940     }
7941 
7942     bp->fw_health->status_reliable = true;
7943 }
7944 
7945 static int bnxt_map_fw_health_regs(struct bnxt *bp)
7946 {
7947     struct bnxt_fw_health *fw_health = bp->fw_health;
7948     u32 reg_base = 0xffffffff;
7949     int i;
7950 
7951     bp->fw_health->status_reliable = false;
7952     bp->fw_health->resets_reliable = false;
7953     /* Only pre-map the monitoring GRC registers using window 3 */
7954     for (i = 0; i < 4; i++) {
7955         u32 reg = fw_health->regs[i];
7956 
7957         if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
7958             continue;
7959         if (reg_base == 0xffffffff)
7960             reg_base = reg & BNXT_GRC_BASE_MASK;
7961         if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
7962             return -ERANGE;
7963         fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
7964     }
7965     bp->fw_health->status_reliable = true;
7966     bp->fw_health->resets_reliable = true;
7967     if (reg_base == 0xffffffff)
7968         return 0;
7969 
7970     __bnxt_map_fw_health_reg(bp, reg_base);
7971     return 0;
7972 }
7973 
7974 static void bnxt_remap_fw_health_regs(struct bnxt *bp)
7975 {
7976     if (!bp->fw_health)
7977         return;
7978 
7979     if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) {
7980         bp->fw_health->status_reliable = true;
7981         bp->fw_health->resets_reliable = true;
7982     } else {
7983         bnxt_try_map_fw_health_reg(bp);
7984     }
7985 }
7986 
7987 static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp)
7988 {
7989     struct bnxt_fw_health *fw_health = bp->fw_health;
7990     struct hwrm_error_recovery_qcfg_output *resp;
7991     struct hwrm_error_recovery_qcfg_input *req;
7992     int rc, i;
7993 
7994     if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
7995         return 0;
7996 
7997     rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG);
7998     if (rc)
7999         return rc;
8000 
8001     resp = hwrm_req_hold(bp, req);
8002     rc = hwrm_req_send(bp, req);
8003     if (rc)
8004         goto err_recovery_out;
8005     fw_health->flags = le32_to_cpu(resp->flags);
8006     if ((fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) &&
8007         !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
8008         rc = -EINVAL;
8009         goto err_recovery_out;
8010     }
8011     fw_health->polling_dsecs = le32_to_cpu(resp->driver_polling_freq);
8012     fw_health->master_func_wait_dsecs =
8013         le32_to_cpu(resp->master_func_wait_period);
8014     fw_health->normal_func_wait_dsecs =
8015         le32_to_cpu(resp->normal_func_wait_period);
8016     fw_health->post_reset_wait_dsecs =
8017         le32_to_cpu(resp->master_func_wait_period_after_reset);
8018     fw_health->post_reset_max_wait_dsecs =
8019         le32_to_cpu(resp->max_bailout_time_after_reset);
8020     fw_health->regs[BNXT_FW_HEALTH_REG] =
8021         le32_to_cpu(resp->fw_health_status_reg);
8022     fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
8023         le32_to_cpu(resp->fw_heartbeat_reg);
8024     fw_health->regs[BNXT_FW_RESET_CNT_REG] =
8025         le32_to_cpu(resp->fw_reset_cnt_reg);
8026     fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
8027         le32_to_cpu(resp->reset_inprogress_reg);
8028     fw_health->fw_reset_inprog_reg_mask =
8029         le32_to_cpu(resp->reset_inprogress_reg_mask);
8030     fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
8031     if (fw_health->fw_reset_seq_cnt >= 16) {
8032         rc = -EINVAL;
8033         goto err_recovery_out;
8034     }
8035     for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
8036         fw_health->fw_reset_seq_regs[i] =
8037             le32_to_cpu(resp->reset_reg[i]);
8038         fw_health->fw_reset_seq_vals[i] =
8039             le32_to_cpu(resp->reset_reg_val[i]);
8040         fw_health->fw_reset_seq_delay_msec[i] =
8041             resp->delay_after_reset[i];
8042     }
8043 err_recovery_out:
8044     hwrm_req_drop(bp, req);
8045     if (!rc)
8046         rc = bnxt_map_fw_health_regs(bp);
8047     if (rc)
8048         bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
8049     return rc;
8050 }
8051 
8052 static int bnxt_hwrm_func_reset(struct bnxt *bp)
8053 {
8054     struct hwrm_func_reset_input *req;
8055     int rc;
8056 
8057     rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET);
8058     if (rc)
8059         return rc;
8060 
8061     req->enables = 0;
8062     hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT);
8063     return hwrm_req_send(bp, req);
8064 }
8065 
8066 static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
8067 {
8068     struct hwrm_nvm_get_dev_info_output nvm_info;
8069 
8070     if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
8071         snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
8072              nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
8073              nvm_info.nvm_cfg_ver_upd);
8074 }
8075 
8076 static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
8077 {
8078     struct hwrm_queue_qportcfg_output *resp;
8079     struct hwrm_queue_qportcfg_input *req;
8080     u8 i, j, *qptr;
8081     bool no_rdma;
8082     int rc = 0;
8083 
8084     rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG);
8085     if (rc)
8086         return rc;
8087 
8088     resp = hwrm_req_hold(bp, req);
8089     rc = hwrm_req_send(bp, req);
8090     if (rc)
8091         goto qportcfg_exit;
8092 
8093     if (!resp->max_configurable_queues) {
8094         rc = -EINVAL;
8095         goto qportcfg_exit;
8096     }
8097     bp->max_tc = resp->max_configurable_queues;
8098     bp->max_lltc = resp->max_configurable_lossless_queues;
8099     if (bp->max_tc > BNXT_MAX_QUEUE)
8100         bp->max_tc = BNXT_MAX_QUEUE;
8101 
8102     no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
8103     qptr = &resp->queue_id0;
8104     for (i = 0, j = 0; i < bp->max_tc; i++) {
8105         bp->q_info[j].queue_id = *qptr;
8106         bp->q_ids[i] = *qptr++;
8107         bp->q_info[j].queue_profile = *qptr++;
8108         bp->tc_to_qidx[j] = j;
8109         if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
8110             (no_rdma && BNXT_PF(bp)))
8111             j++;
8112     }
8113     bp->max_q = bp->max_tc;
8114     bp->max_tc = max_t(u8, j, 1);
8115 
8116     if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
8117         bp->max_tc = 1;
8118 
8119     if (bp->max_lltc > bp->max_tc)
8120         bp->max_lltc = bp->max_tc;
8121 
8122 qportcfg_exit:
8123     hwrm_req_drop(bp, req);
8124     return rc;
8125 }
8126 
8127 static int bnxt_hwrm_poll(struct bnxt *bp)
8128 {
8129     struct hwrm_ver_get_input *req;
8130     int rc;
8131 
8132     rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8133     if (rc)
8134         return rc;
8135 
8136     req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8137     req->hwrm_intf_min = HWRM_VERSION_MINOR;
8138     req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8139 
8140     hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT);
8141     rc = hwrm_req_send(bp, req);
8142     return rc;
8143 }
8144 
8145 static int bnxt_hwrm_ver_get(struct bnxt *bp)
8146 {
8147     struct hwrm_ver_get_output *resp;
8148     struct hwrm_ver_get_input *req;
8149     u16 fw_maj, fw_min, fw_bld, fw_rsv;
8150     u32 dev_caps_cfg, hwrm_ver;
8151     int rc, len;
8152 
8153     rc = hwrm_req_init(bp, req, HWRM_VER_GET);
8154     if (rc)
8155         return rc;
8156 
8157     hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
8158     bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
8159     req->hwrm_intf_maj = HWRM_VERSION_MAJOR;
8160     req->hwrm_intf_min = HWRM_VERSION_MINOR;
8161     req->hwrm_intf_upd = HWRM_VERSION_UPDATE;
8162 
8163     resp = hwrm_req_hold(bp, req);
8164     rc = hwrm_req_send(bp, req);
8165     if (rc)
8166         goto hwrm_ver_get_exit;
8167 
8168     memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
8169 
8170     bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
8171                  resp->hwrm_intf_min_8b << 8 |
8172                  resp->hwrm_intf_upd_8b;
8173     if (resp->hwrm_intf_maj_8b < 1) {
8174         netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
8175                 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8176                 resp->hwrm_intf_upd_8b);
8177         netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
8178     }
8179 
8180     hwrm_ver = HWRM_VERSION_MAJOR << 16 | HWRM_VERSION_MINOR << 8 |
8181             HWRM_VERSION_UPDATE;
8182 
8183     if (bp->hwrm_spec_code > hwrm_ver)
8184         snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8185              HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR,
8186              HWRM_VERSION_UPDATE);
8187     else
8188         snprintf(bp->hwrm_ver_supp, FW_VER_STR_LEN, "%d.%d.%d",
8189              resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
8190              resp->hwrm_intf_upd_8b);
8191 
8192     fw_maj = le16_to_cpu(resp->hwrm_fw_major);
8193     if (bp->hwrm_spec_code > 0x10803 && fw_maj) {
8194         fw_min = le16_to_cpu(resp->hwrm_fw_minor);
8195         fw_bld = le16_to_cpu(resp->hwrm_fw_build);
8196         fw_rsv = le16_to_cpu(resp->hwrm_fw_patch);
8197         len = FW_VER_STR_LEN;
8198     } else {
8199         fw_maj = resp->hwrm_fw_maj_8b;
8200         fw_min = resp->hwrm_fw_min_8b;
8201         fw_bld = resp->hwrm_fw_bld_8b;
8202         fw_rsv = resp->hwrm_fw_rsvd_8b;
8203         len = BC_HWRM_STR_LEN;
8204     }
8205     bp->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
8206     snprintf(bp->fw_ver_str, len, "%d.%d.%d.%d", fw_maj, fw_min, fw_bld,
8207          fw_rsv);
8208 
8209     if (strlen(resp->active_pkg_name)) {
8210         int fw_ver_len = strlen(bp->fw_ver_str);
8211 
8212         snprintf(bp->fw_ver_str + fw_ver_len,
8213              FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
8214              resp->active_pkg_name);
8215         bp->fw_cap |= BNXT_FW_CAP_PKG_VER;
8216     }
8217 
8218     bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
8219     if (!bp->hwrm_cmd_timeout)
8220         bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
8221     bp->hwrm_cmd_max_timeout = le16_to_cpu(resp->max_req_timeout) * 1000;
8222     if (!bp->hwrm_cmd_max_timeout)
8223         bp->hwrm_cmd_max_timeout = HWRM_CMD_MAX_TIMEOUT;
8224     else if (bp->hwrm_cmd_max_timeout > HWRM_CMD_MAX_TIMEOUT)
8225         netdev_warn(bp->dev, "Device requests max timeout of %d seconds, may trigger hung task watchdog\n",
8226                 bp->hwrm_cmd_max_timeout / 1000);
8227 
8228     if (resp->hwrm_intf_maj_8b >= 1) {
8229         bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
8230         bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
8231     }
8232     if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
8233         bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
8234 
8235     bp->chip_num = le16_to_cpu(resp->chip_num);
8236     bp->chip_rev = resp->chip_rev;
8237     if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
8238         !resp->chip_metal)
8239         bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
8240 
8241     dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
8242     if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
8243         (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
8244         bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
8245 
8246     if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
8247         bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
8248 
8249     if (dev_caps_cfg &
8250         VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
8251         bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
8252 
8253     if (dev_caps_cfg &
8254         VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
8255         bp->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
8256 
8257     if (dev_caps_cfg &
8258         VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
8259         bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
8260 
8261 hwrm_ver_get_exit:
8262     hwrm_req_drop(bp, req);
8263     return rc;
8264 }
8265 
8266 int bnxt_hwrm_fw_set_time(struct bnxt *bp)
8267 {
8268     struct hwrm_fw_set_time_input *req;
8269     struct tm tm;
8270     time64_t now = ktime_get_real_seconds();
8271     int rc;
8272 
8273     if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
8274         bp->hwrm_spec_code < 0x10400)
8275         return -EOPNOTSUPP;
8276 
8277     time64_to_tm(now, 0, &tm);
8278     rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME);
8279     if (rc)
8280         return rc;
8281 
8282     req->year = cpu_to_le16(1900 + tm.tm_year);
8283     req->month = 1 + tm.tm_mon;
8284     req->day = tm.tm_mday;
8285     req->hour = tm.tm_hour;
8286     req->minute = tm.tm_min;
8287     req->second = tm.tm_sec;
8288     return hwrm_req_send(bp, req);
8289 }
8290 
8291 static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask)
8292 {
8293     u64 sw_tmp;
8294 
8295     hw &= mask;
8296     sw_tmp = (*sw & ~mask) | hw;
8297     if (hw < (*sw & mask))
8298         sw_tmp += mask + 1;
8299     WRITE_ONCE(*sw, sw_tmp);
8300 }
8301 
8302 static void __bnxt_accumulate_stats(__le64 *hw_stats, u64 *sw_stats, u64 *masks,
8303                     int count, bool ignore_zero)
8304 {
8305     int i;
8306 
8307     for (i = 0; i < count; i++) {
8308         u64 hw = le64_to_cpu(READ_ONCE(hw_stats[i]));
8309 
8310         if (ignore_zero && !hw)
8311             continue;
8312 
8313         if (masks[i] == -1ULL)
8314             sw_stats[i] = hw;
8315         else
8316             bnxt_add_one_ctr(hw, &sw_stats[i], masks[i]);
8317     }
8318 }
8319 
8320 static void bnxt_accumulate_stats(struct bnxt_stats_mem *stats)
8321 {
8322     if (!stats->hw_stats)
8323         return;
8324 
8325     __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8326                 stats->hw_masks, stats->len / 8, false);
8327 }
8328 
8329 static void bnxt_accumulate_all_stats(struct bnxt *bp)
8330 {
8331     struct bnxt_stats_mem *ring0_stats;
8332     bool ignore_zero = false;
8333     int i;
8334 
8335     /* Chip bug.  Counter intermittently becomes 0. */
8336     if (bp->flags & BNXT_FLAG_CHIP_P5)
8337         ignore_zero = true;
8338 
8339     for (i = 0; i < bp->cp_nr_rings; i++) {
8340         struct bnxt_napi *bnapi = bp->bnapi[i];
8341         struct bnxt_cp_ring_info *cpr;
8342         struct bnxt_stats_mem *stats;
8343 
8344         cpr = &bnapi->cp_ring;
8345         stats = &cpr->stats;
8346         if (!i)
8347             ring0_stats = stats;
8348         __bnxt_accumulate_stats(stats->hw_stats, stats->sw_stats,
8349                     ring0_stats->hw_masks,
8350                     ring0_stats->len / 8, ignore_zero);
8351     }
8352     if (bp->flags & BNXT_FLAG_PORT_STATS) {
8353         struct bnxt_stats_mem *stats = &bp->port_stats;
8354         __le64 *hw_stats = stats->hw_stats;
8355         u64 *sw_stats = stats->sw_stats;
8356         u64 *masks = stats->hw_masks;
8357         int cnt;
8358 
8359         cnt = sizeof(struct rx_port_stats) / 8;
8360         __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8361 
8362         hw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8363         sw_stats += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8364         masks += BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
8365         cnt = sizeof(struct tx_port_stats) / 8;
8366         __bnxt_accumulate_stats(hw_stats, sw_stats, masks, cnt, false);
8367     }
8368     if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) {
8369         bnxt_accumulate_stats(&bp->rx_port_stats_ext);
8370         bnxt_accumulate_stats(&bp->tx_port_stats_ext);
8371     }
8372 }
8373 
8374 static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags)
8375 {
8376     struct hwrm_port_qstats_input *req;
8377     struct bnxt_pf_info *pf = &bp->pf;
8378     int rc;
8379 
8380     if (!(bp->flags & BNXT_FLAG_PORT_STATS))
8381         return 0;
8382 
8383     if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8384         return -EOPNOTSUPP;
8385 
8386     rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS);
8387     if (rc)
8388         return rc;
8389 
8390     req->flags = flags;
8391     req->port_id = cpu_to_le16(pf->port_id);
8392     req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map +
8393                         BNXT_TX_PORT_STATS_BYTE_OFFSET);
8394     req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map);
8395     return hwrm_req_send(bp, req);
8396 }
8397 
8398 static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
8399 {
8400     struct hwrm_queue_pri2cos_qcfg_output *resp_qc;
8401     struct hwrm_queue_pri2cos_qcfg_input *req_qc;
8402     struct hwrm_port_qstats_ext_output *resp_qs;
8403     struct hwrm_port_qstats_ext_input *req_qs;
8404     struct bnxt_pf_info *pf = &bp->pf;
8405     u32 tx_stat_size;
8406     int rc;
8407 
8408     if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
8409         return 0;
8410 
8411     if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED))
8412         return -EOPNOTSUPP;
8413 
8414     rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT);
8415     if (rc)
8416         return rc;
8417 
8418     req_qs->flags = flags;
8419     req_qs->port_id = cpu_to_le16(pf->port_id);
8420     req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
8421     req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map);
8422     tx_stat_size = bp->tx_port_stats_ext.hw_stats ?
8423                sizeof(struct tx_port_stats_ext) : 0;
8424     req_qs->tx_stat_size = cpu_to_le16(tx_stat_size);
8425     req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map);
8426     resp_qs = hwrm_req_hold(bp, req_qs);
8427     rc = hwrm_req_send(bp, req_qs);
8428     if (!rc) {
8429         bp->fw_rx_stats_ext_size =
8430             le16_to_cpu(resp_qs->rx_stat_size) / 8;
8431         if (BNXT_FW_MAJ(bp) < 220 &&
8432             bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
8433             bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
8434 
8435         bp->fw_tx_stats_ext_size = tx_stat_size ?
8436             le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
8437     } else {
8438         bp->fw_rx_stats_ext_size = 0;
8439         bp->fw_tx_stats_ext_size = 0;
8440     }
8441     hwrm_req_drop(bp, req_qs);
8442 
8443     if (flags)
8444         return rc;
8445 
8446     if (bp->fw_tx_stats_ext_size <=
8447         offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
8448         bp->pri2cos_valid = 0;
8449         return rc;
8450     }
8451 
8452     rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG);
8453     if (rc)
8454         return rc;
8455 
8456     req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
8457 
8458     resp_qc = hwrm_req_hold(bp, req_qc);
8459     rc = hwrm_req_send(bp, req_qc);
8460     if (!rc) {
8461         u8 *pri2cos;
8462         int i, j;
8463 
8464         pri2cos = &resp_qc->pri0_cos_queue_id;
8465         for (i = 0; i < 8; i++) {
8466             u8 queue_id = pri2cos[i];
8467             u8 queue_idx;
8468 
8469             /* Per port queue IDs start from 0, 10, 20, etc */
8470             queue_idx = queue_id % 10;
8471             if (queue_idx > BNXT_MAX_QUEUE) {
8472                 bp->pri2cos_valid = false;
8473                 hwrm_req_drop(bp, req_qc);
8474                 return rc;
8475             }
8476             for (j = 0; j < bp->max_q; j++) {
8477                 if (bp->q_ids[j] == queue_id)
8478                     bp->pri2cos_idx[i] = queue_idx;
8479             }
8480         }
8481         bp->pri2cos_valid = true;
8482     }
8483     hwrm_req_drop(bp, req_qc);
8484 
8485     return rc;
8486 }
8487 
8488 static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
8489 {
8490     bnxt_hwrm_tunnel_dst_port_free(bp,
8491         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8492     bnxt_hwrm_tunnel_dst_port_free(bp,
8493         TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8494 }
8495 
8496 static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
8497 {
8498     int rc, i;
8499     u32 tpa_flags = 0;
8500 
8501     if (set_tpa)
8502         tpa_flags = bp->flags & BNXT_FLAG_TPA;
8503     else if (BNXT_NO_FW_ACCESS(bp))
8504         return 0;
8505     for (i = 0; i < bp->nr_vnics; i++) {
8506         rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
8507         if (rc) {
8508             netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
8509                    i, rc);
8510             return rc;
8511         }
8512     }
8513     return 0;
8514 }
8515 
8516 static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
8517 {
8518     int i;
8519 
8520     for (i = 0; i < bp->nr_vnics; i++)
8521         bnxt_hwrm_vnic_set_rss(bp, i, false);
8522 }
8523 
8524 static void bnxt_clear_vnic(struct bnxt *bp)
8525 {
8526     if (!bp->vnic_info)
8527         return;
8528 
8529     bnxt_hwrm_clear_vnic_filter(bp);
8530     if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
8531         /* clear all RSS setting before free vnic ctx */
8532         bnxt_hwrm_clear_vnic_rss(bp);
8533         bnxt_hwrm_vnic_ctx_free(bp);
8534     }
8535     /* before free the vnic, undo the vnic tpa settings */
8536     if (bp->flags & BNXT_FLAG_TPA)
8537         bnxt_set_tpa(bp, false);
8538     bnxt_hwrm_vnic_free(bp);
8539     if (bp->flags & BNXT_FLAG_CHIP_P5)
8540         bnxt_hwrm_vnic_ctx_free(bp);
8541 }
8542 
8543 static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
8544                     bool irq_re_init)
8545 {
8546     bnxt_clear_vnic(bp);
8547     bnxt_hwrm_ring_free(bp, close_path);
8548     bnxt_hwrm_ring_grp_free(bp);
8549     if (irq_re_init) {
8550         bnxt_hwrm_stat_ctx_free(bp);
8551         bnxt_hwrm_free_tunnel_ports(bp);
8552     }
8553 }
8554 
8555 static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
8556 {
8557     struct hwrm_func_cfg_input *req;
8558     u8 evb_mode;
8559     int rc;
8560 
8561     if (br_mode == BRIDGE_MODE_VEB)
8562         evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
8563     else if (br_mode == BRIDGE_MODE_VEPA)
8564         evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
8565     else
8566         return -EINVAL;
8567 
8568     rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8569     if (rc)
8570         return rc;
8571 
8572     req->fid = cpu_to_le16(0xffff);
8573     req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
8574     req->evb_mode = evb_mode;
8575     return hwrm_req_send(bp, req);
8576 }
8577 
8578 static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
8579 {
8580     struct hwrm_func_cfg_input *req;
8581     int rc;
8582 
8583     if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
8584         return 0;
8585 
8586     rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
8587     if (rc)
8588         return rc;
8589 
8590     req->fid = cpu_to_le16(0xffff);
8591     req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
8592     req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
8593     if (size == 128)
8594         req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
8595 
8596     return hwrm_req_send(bp, req);
8597 }
8598 
8599 static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8600 {
8601     struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
8602     int rc;
8603 
8604     if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
8605         goto skip_rss_ctx;
8606 
8607     /* allocate context for vnic */
8608     rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
8609     if (rc) {
8610         netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8611                vnic_id, rc);
8612         goto vnic_setup_err;
8613     }
8614     bp->rsscos_nr_ctxs++;
8615 
8616     if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8617         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
8618         if (rc) {
8619             netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
8620                    vnic_id, rc);
8621             goto vnic_setup_err;
8622         }
8623         bp->rsscos_nr_ctxs++;
8624     }
8625 
8626 skip_rss_ctx:
8627     /* configure default vnic, ring grp */
8628     rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8629     if (rc) {
8630         netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8631                vnic_id, rc);
8632         goto vnic_setup_err;
8633     }
8634 
8635     /* Enable RSS hashing on vnic */
8636     rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
8637     if (rc) {
8638         netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
8639                vnic_id, rc);
8640         goto vnic_setup_err;
8641     }
8642 
8643     if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8644         rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8645         if (rc) {
8646             netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8647                    vnic_id, rc);
8648         }
8649     }
8650 
8651 vnic_setup_err:
8652     return rc;
8653 }
8654 
8655 static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
8656 {
8657     int rc, i, nr_ctxs;
8658 
8659     nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
8660     for (i = 0; i < nr_ctxs; i++) {
8661         rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
8662         if (rc) {
8663             netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
8664                    vnic_id, i, rc);
8665             break;
8666         }
8667         bp->rsscos_nr_ctxs++;
8668     }
8669     if (i < nr_ctxs)
8670         return -ENOMEM;
8671 
8672     rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
8673     if (rc) {
8674         netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
8675                vnic_id, rc);
8676         return rc;
8677     }
8678     rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
8679     if (rc) {
8680         netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
8681                vnic_id, rc);
8682         return rc;
8683     }
8684     if (bp->flags & BNXT_FLAG_AGG_RINGS) {
8685         rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
8686         if (rc) {
8687             netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
8688                    vnic_id, rc);
8689         }
8690     }
8691     return rc;
8692 }
8693 
8694 static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
8695 {
8696     if (bp->flags & BNXT_FLAG_CHIP_P5)
8697         return __bnxt_setup_vnic_p5(bp, vnic_id);
8698     else
8699         return __bnxt_setup_vnic(bp, vnic_id);
8700 }
8701 
8702 static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
8703 {
8704 #ifdef CONFIG_RFS_ACCEL
8705     int i, rc = 0;
8706 
8707     if (bp->flags & BNXT_FLAG_CHIP_P5)
8708         return 0;
8709 
8710     for (i = 0; i < bp->rx_nr_rings; i++) {
8711         struct bnxt_vnic_info *vnic;
8712         u16 vnic_id = i + 1;
8713         u16 ring_id = i;
8714 
8715         if (vnic_id >= bp->nr_vnics)
8716             break;
8717 
8718         vnic = &bp->vnic_info[vnic_id];
8719         vnic->flags |= BNXT_VNIC_RFS_FLAG;
8720         if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8721             vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
8722         rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
8723         if (rc) {
8724             netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
8725                    vnic_id, rc);
8726             break;
8727         }
8728         rc = bnxt_setup_vnic(bp, vnic_id);
8729         if (rc)
8730             break;
8731     }
8732     return rc;
8733 #else
8734     return 0;
8735 #endif
8736 }
8737 
8738 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
8739 static bool bnxt_promisc_ok(struct bnxt *bp)
8740 {
8741 #ifdef CONFIG_BNXT_SRIOV
8742     if (BNXT_VF(bp) && !bp->vf.vlan && !bnxt_is_trusted_vf(bp, &bp->vf))
8743         return false;
8744 #endif
8745     return true;
8746 }
8747 
8748 static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
8749 {
8750     unsigned int rc = 0;
8751 
8752     rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
8753     if (rc) {
8754         netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8755                rc);
8756         return rc;
8757     }
8758 
8759     rc = bnxt_hwrm_vnic_cfg(bp, 1);
8760     if (rc) {
8761         netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
8762                rc);
8763         return rc;
8764     }
8765     return rc;
8766 }
8767 
8768 static int bnxt_cfg_rx_mode(struct bnxt *);
8769 static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
8770 
8771 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
8772 {
8773     struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8774     int rc = 0;
8775     unsigned int rx_nr_rings = bp->rx_nr_rings;
8776 
8777     if (irq_re_init) {
8778         rc = bnxt_hwrm_stat_ctx_alloc(bp);
8779         if (rc) {
8780             netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
8781                    rc);
8782             goto err_out;
8783         }
8784     }
8785 
8786     rc = bnxt_hwrm_ring_alloc(bp);
8787     if (rc) {
8788         netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
8789         goto err_out;
8790     }
8791 
8792     rc = bnxt_hwrm_ring_grp_alloc(bp);
8793     if (rc) {
8794         netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
8795         goto err_out;
8796     }
8797 
8798     if (BNXT_CHIP_TYPE_NITRO_A0(bp))
8799         rx_nr_rings--;
8800 
8801     /* default vnic 0 */
8802     rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
8803     if (rc) {
8804         netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
8805         goto err_out;
8806     }
8807 
8808     rc = bnxt_setup_vnic(bp, 0);
8809     if (rc)
8810         goto err_out;
8811 
8812     if (bp->flags & BNXT_FLAG_RFS) {
8813         rc = bnxt_alloc_rfs_vnics(bp);
8814         if (rc)
8815             goto err_out;
8816     }
8817 
8818     if (bp->flags & BNXT_FLAG_TPA) {
8819         rc = bnxt_set_tpa(bp, true);
8820         if (rc)
8821             goto err_out;
8822     }
8823 
8824     if (BNXT_VF(bp))
8825         bnxt_update_vf_mac(bp);
8826 
8827     /* Filter for default vnic 0 */
8828     rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
8829     if (rc) {
8830         if (BNXT_VF(bp) && rc == -ENODEV)
8831             netdev_err(bp->dev, "Cannot configure L2 filter while PF is unavailable\n");
8832         else
8833             netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
8834         goto err_out;
8835     }
8836     vnic->uc_filter_count = 1;
8837 
8838     vnic->rx_mask = 0;
8839     if (test_bit(BNXT_STATE_HALF_OPEN, &bp->state))
8840         goto skip_rx_mask;
8841 
8842     if (bp->dev->flags & IFF_BROADCAST)
8843         vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
8844 
8845     if (bp->dev->flags & IFF_PROMISC)
8846         vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8847 
8848     if (bp->dev->flags & IFF_ALLMULTI) {
8849         vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8850         vnic->mc_list_count = 0;
8851     } else if (bp->dev->flags & IFF_MULTICAST) {
8852         u32 mask = 0;
8853 
8854         bnxt_mc_list_updated(bp, &mask);
8855         vnic->rx_mask |= mask;
8856     }
8857 
8858     rc = bnxt_cfg_rx_mode(bp);
8859     if (rc)
8860         goto err_out;
8861 
8862 skip_rx_mask:
8863     rc = bnxt_hwrm_set_coal(bp);
8864     if (rc)
8865         netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
8866                 rc);
8867 
8868     if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
8869         rc = bnxt_setup_nitroa0_vnic(bp);
8870         if (rc)
8871             netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
8872                    rc);
8873     }
8874 
8875     if (BNXT_VF(bp)) {
8876         bnxt_hwrm_func_qcfg(bp);
8877         netdev_update_features(bp->dev);
8878     }
8879 
8880     return 0;
8881 
8882 err_out:
8883     bnxt_hwrm_resource_free(bp, 0, true);
8884 
8885     return rc;
8886 }
8887 
8888 static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
8889 {
8890     bnxt_hwrm_resource_free(bp, 1, irq_re_init);
8891     return 0;
8892 }
8893 
8894 static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
8895 {
8896     bnxt_init_cp_rings(bp);
8897     bnxt_init_rx_rings(bp);
8898     bnxt_init_tx_rings(bp);
8899     bnxt_init_ring_grps(bp, irq_re_init);
8900     bnxt_init_vnics(bp);
8901 
8902     return bnxt_init_chip(bp, irq_re_init);
8903 }
8904 
8905 static int bnxt_set_real_num_queues(struct bnxt *bp)
8906 {
8907     int rc;
8908     struct net_device *dev = bp->dev;
8909 
8910     rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
8911                       bp->tx_nr_rings_xdp);
8912     if (rc)
8913         return rc;
8914 
8915     rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
8916     if (rc)
8917         return rc;
8918 
8919 #ifdef CONFIG_RFS_ACCEL
8920     if (bp->flags & BNXT_FLAG_RFS)
8921         dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
8922 #endif
8923 
8924     return rc;
8925 }
8926 
8927 static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
8928                bool shared)
8929 {
8930     int _rx = *rx, _tx = *tx;
8931 
8932     if (shared) {
8933         *rx = min_t(int, _rx, max);
8934         *tx = min_t(int, _tx, max);
8935     } else {
8936         if (max < 2)
8937             return -ENOMEM;
8938 
8939         while (_rx + _tx > max) {
8940             if (_rx > _tx && _rx > 1)
8941                 _rx--;
8942             else if (_tx > 1)
8943                 _tx--;
8944         }
8945         *rx = _rx;
8946         *tx = _tx;
8947     }
8948     return 0;
8949 }
8950 
8951 static void bnxt_setup_msix(struct bnxt *bp)
8952 {
8953     const int len = sizeof(bp->irq_tbl[0].name);
8954     struct net_device *dev = bp->dev;
8955     int tcs, i;
8956 
8957     tcs = netdev_get_num_tc(dev);
8958     if (tcs) {
8959         int i, off, count;
8960 
8961         for (i = 0; i < tcs; i++) {
8962             count = bp->tx_nr_rings_per_tc;
8963             off = i * count;
8964             netdev_set_tc_queue(dev, i, count, off);
8965         }
8966     }
8967 
8968     for (i = 0; i < bp->cp_nr_rings; i++) {
8969         int map_idx = bnxt_cp_num_to_irq_num(bp, i);
8970         char *attr;
8971 
8972         if (bp->flags & BNXT_FLAG_SHARED_RINGS)
8973             attr = "TxRx";
8974         else if (i < bp->rx_nr_rings)
8975             attr = "rx";
8976         else
8977             attr = "tx";
8978 
8979         snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
8980              attr, i);
8981         bp->irq_tbl[map_idx].handler = bnxt_msix;
8982     }
8983 }
8984 
8985 static void bnxt_setup_inta(struct bnxt *bp)
8986 {
8987     const int len = sizeof(bp->irq_tbl[0].name);
8988 
8989     if (netdev_get_num_tc(bp->dev))
8990         netdev_reset_tc(bp->dev);
8991 
8992     snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
8993          0);
8994     bp->irq_tbl[0].handler = bnxt_inta;
8995 }
8996 
8997 static int bnxt_init_int_mode(struct bnxt *bp);
8998 
8999 static int bnxt_setup_int_mode(struct bnxt *bp)
9000 {
9001     int rc;
9002 
9003     if (!bp->irq_tbl) {
9004         rc = bnxt_init_int_mode(bp);
9005         if (rc || !bp->irq_tbl)
9006             return rc ?: -ENODEV;
9007     }
9008 
9009     if (bp->flags & BNXT_FLAG_USING_MSIX)
9010         bnxt_setup_msix(bp);
9011     else
9012         bnxt_setup_inta(bp);
9013 
9014     rc = bnxt_set_real_num_queues(bp);
9015     return rc;
9016 }
9017 
9018 #ifdef CONFIG_RFS_ACCEL
9019 static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
9020 {
9021     return bp->hw_resc.max_rsscos_ctxs;
9022 }
9023 
9024 static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
9025 {
9026     return bp->hw_resc.max_vnics;
9027 }
9028 #endif
9029 
9030 unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
9031 {
9032     return bp->hw_resc.max_stat_ctxs;
9033 }
9034 
9035 unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
9036 {
9037     return bp->hw_resc.max_cp_rings;
9038 }
9039 
9040 static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
9041 {
9042     unsigned int cp = bp->hw_resc.max_cp_rings;
9043 
9044     if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9045         cp -= bnxt_get_ulp_msix_num(bp);
9046 
9047     return cp;
9048 }
9049 
9050 static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
9051 {
9052     struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9053 
9054     if (bp->flags & BNXT_FLAG_CHIP_P5)
9055         return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs);
9056 
9057     return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
9058 }
9059 
9060 static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
9061 {
9062     bp->hw_resc.max_irqs = max_irqs;
9063 }
9064 
9065 unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp)
9066 {
9067     unsigned int cp;
9068 
9069     cp = bnxt_get_max_func_cp_rings_for_en(bp);
9070     if (bp->flags & BNXT_FLAG_CHIP_P5)
9071         return cp - bp->rx_nr_rings - bp->tx_nr_rings;
9072     else
9073         return cp - bp->cp_nr_rings;
9074 }
9075 
9076 unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
9077 {
9078     return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
9079 }
9080 
9081 int bnxt_get_avail_msix(struct bnxt *bp, int num)
9082 {
9083     int max_cp = bnxt_get_max_func_cp_rings(bp);
9084     int max_irq = bnxt_get_max_func_irqs(bp);
9085     int total_req = bp->cp_nr_rings + num;
9086     int max_idx, avail_msix;
9087 
9088     max_idx = bp->total_irqs;
9089     if (!(bp->flags & BNXT_FLAG_CHIP_P5))
9090         max_idx = min_t(int, bp->total_irqs, max_cp);
9091     avail_msix = max_idx - bp->cp_nr_rings;
9092     if (!BNXT_NEW_RM(bp) || avail_msix >= num)
9093         return avail_msix;
9094 
9095     if (max_irq < total_req) {
9096         num = max_irq - bp->cp_nr_rings;
9097         if (num <= 0)
9098             return 0;
9099     }
9100     return num;
9101 }
9102 
9103 static int bnxt_get_num_msix(struct bnxt *bp)
9104 {
9105     if (!BNXT_NEW_RM(bp))
9106         return bnxt_get_max_func_irqs(bp);
9107 
9108     return bnxt_nq_rings_in_use(bp);
9109 }
9110 
9111 static int bnxt_init_msix(struct bnxt *bp)
9112 {
9113     int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
9114     struct msix_entry *msix_ent;
9115 
9116     total_vecs = bnxt_get_num_msix(bp);
9117     max = bnxt_get_max_func_irqs(bp);
9118     if (total_vecs > max)
9119         total_vecs = max;
9120 
9121     if (!total_vecs)
9122         return 0;
9123 
9124     msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
9125     if (!msix_ent)
9126         return -ENOMEM;
9127 
9128     for (i = 0; i < total_vecs; i++) {
9129         msix_ent[i].entry = i;
9130         msix_ent[i].vector = 0;
9131     }
9132 
9133     if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
9134         min = 2;
9135 
9136     total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
9137     ulp_msix = bnxt_get_ulp_msix_num(bp);
9138     if (total_vecs < 0 || total_vecs < ulp_msix) {
9139         rc = -ENODEV;
9140         goto msix_setup_exit;
9141     }
9142 
9143     bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
9144     if (bp->irq_tbl) {
9145         for (i = 0; i < total_vecs; i++)
9146             bp->irq_tbl[i].vector = msix_ent[i].vector;
9147 
9148         bp->total_irqs = total_vecs;
9149         /* Trim rings based upon num of vectors allocated */
9150         rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
9151                      total_vecs - ulp_msix, min == 1);
9152         if (rc)
9153             goto msix_setup_exit;
9154 
9155         bp->cp_nr_rings = (min == 1) ?
9156                   max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9157                   bp->tx_nr_rings + bp->rx_nr_rings;
9158 
9159     } else {
9160         rc = -ENOMEM;
9161         goto msix_setup_exit;
9162     }
9163     bp->flags |= BNXT_FLAG_USING_MSIX;
9164     kfree(msix_ent);
9165     return 0;
9166 
9167 msix_setup_exit:
9168     netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
9169     kfree(bp->irq_tbl);
9170     bp->irq_tbl = NULL;
9171     pci_disable_msix(bp->pdev);
9172     kfree(msix_ent);
9173     return rc;
9174 }
9175 
9176 static int bnxt_init_inta(struct bnxt *bp)
9177 {
9178     bp->irq_tbl = kzalloc(sizeof(struct bnxt_irq), GFP_KERNEL);
9179     if (!bp->irq_tbl)
9180         return -ENOMEM;
9181 
9182     bp->total_irqs = 1;
9183     bp->rx_nr_rings = 1;
9184     bp->tx_nr_rings = 1;
9185     bp->cp_nr_rings = 1;
9186     bp->flags |= BNXT_FLAG_SHARED_RINGS;
9187     bp->irq_tbl[0].vector = bp->pdev->irq;
9188     return 0;
9189 }
9190 
9191 static int bnxt_init_int_mode(struct bnxt *bp)
9192 {
9193     int rc = -ENODEV;
9194 
9195     if (bp->flags & BNXT_FLAG_MSIX_CAP)
9196         rc = bnxt_init_msix(bp);
9197 
9198     if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
9199         /* fallback to INTA */
9200         rc = bnxt_init_inta(bp);
9201     }
9202     return rc;
9203 }
9204 
9205 static void bnxt_clear_int_mode(struct bnxt *bp)
9206 {
9207     if (bp->flags & BNXT_FLAG_USING_MSIX)
9208         pci_disable_msix(bp->pdev);
9209 
9210     kfree(bp->irq_tbl);
9211     bp->irq_tbl = NULL;
9212     bp->flags &= ~BNXT_FLAG_USING_MSIX;
9213 }
9214 
9215 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
9216 {
9217     int tcs = netdev_get_num_tc(bp->dev);
9218     bool irq_cleared = false;
9219     int rc;
9220 
9221     if (!bnxt_need_reserve_rings(bp))
9222         return 0;
9223 
9224     if (irq_re_init && BNXT_NEW_RM(bp) &&
9225         bnxt_get_num_msix(bp) != bp->total_irqs) {
9226         bnxt_ulp_irq_stop(bp);
9227         bnxt_clear_int_mode(bp);
9228         irq_cleared = true;
9229     }
9230     rc = __bnxt_reserve_rings(bp);
9231     if (irq_cleared) {
9232         if (!rc)
9233             rc = bnxt_init_int_mode(bp);
9234         bnxt_ulp_irq_restart(bp, rc);
9235     }
9236     if (rc) {
9237         netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
9238         return rc;
9239     }
9240     if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
9241         netdev_err(bp->dev, "tx ring reservation failure\n");
9242         netdev_reset_tc(bp->dev);
9243         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9244         return -ENOMEM;
9245     }
9246     return 0;
9247 }
9248 
9249 static void bnxt_free_irq(struct bnxt *bp)
9250 {
9251     struct bnxt_irq *irq;
9252     int i;
9253 
9254 #ifdef CONFIG_RFS_ACCEL
9255     free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
9256     bp->dev->rx_cpu_rmap = NULL;
9257 #endif
9258     if (!bp->irq_tbl || !bp->bnapi)
9259         return;
9260 
9261     for (i = 0; i < bp->cp_nr_rings; i++) {
9262         int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9263 
9264         irq = &bp->irq_tbl[map_idx];
9265         if (irq->requested) {
9266             if (irq->have_cpumask) {
9267                 irq_set_affinity_hint(irq->vector, NULL);
9268                 free_cpumask_var(irq->cpu_mask);
9269                 irq->have_cpumask = 0;
9270             }
9271             free_irq(irq->vector, bp->bnapi[i]);
9272         }
9273 
9274         irq->requested = 0;
9275     }
9276 }
9277 
9278 static int bnxt_request_irq(struct bnxt *bp)
9279 {
9280     int i, j, rc = 0;
9281     unsigned long flags = 0;
9282 #ifdef CONFIG_RFS_ACCEL
9283     struct cpu_rmap *rmap;
9284 #endif
9285 
9286     rc = bnxt_setup_int_mode(bp);
9287     if (rc) {
9288         netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
9289                rc);
9290         return rc;
9291     }
9292 #ifdef CONFIG_RFS_ACCEL
9293     rmap = bp->dev->rx_cpu_rmap;
9294 #endif
9295     if (!(bp->flags & BNXT_FLAG_USING_MSIX))
9296         flags = IRQF_SHARED;
9297 
9298     for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
9299         int map_idx = bnxt_cp_num_to_irq_num(bp, i);
9300         struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
9301 
9302 #ifdef CONFIG_RFS_ACCEL
9303         if (rmap && bp->bnapi[i]->rx_ring) {
9304             rc = irq_cpu_rmap_add(rmap, irq->vector);
9305             if (rc)
9306                 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
9307                         j);
9308             j++;
9309         }
9310 #endif
9311         rc = request_irq(irq->vector, irq->handler, flags, irq->name,
9312                  bp->bnapi[i]);
9313         if (rc)
9314             break;
9315 
9316         irq->requested = 1;
9317 
9318         if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
9319             int numa_node = dev_to_node(&bp->pdev->dev);
9320 
9321             irq->have_cpumask = 1;
9322             cpumask_set_cpu(cpumask_local_spread(i, numa_node),
9323                     irq->cpu_mask);
9324             rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
9325             if (rc) {
9326                 netdev_warn(bp->dev,
9327                         "Set affinity failed, IRQ = %d\n",
9328                         irq->vector);
9329                 break;
9330             }
9331         }
9332     }
9333     return rc;
9334 }
9335 
9336 static void bnxt_del_napi(struct bnxt *bp)
9337 {
9338     int i;
9339 
9340     if (!bp->bnapi)
9341         return;
9342 
9343     for (i = 0; i < bp->cp_nr_rings; i++) {
9344         struct bnxt_napi *bnapi = bp->bnapi[i];
9345 
9346         __netif_napi_del(&bnapi->napi);
9347     }
9348     /* We called __netif_napi_del(), we need
9349      * to respect an RCU grace period before freeing napi structures.
9350      */
9351     synchronize_net();
9352 }
9353 
9354 static void bnxt_init_napi(struct bnxt *bp)
9355 {
9356     int i;
9357     unsigned int cp_nr_rings = bp->cp_nr_rings;
9358     struct bnxt_napi *bnapi;
9359 
9360     if (bp->flags & BNXT_FLAG_USING_MSIX) {
9361         int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
9362 
9363         if (bp->flags & BNXT_FLAG_CHIP_P5)
9364             poll_fn = bnxt_poll_p5;
9365         else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
9366             cp_nr_rings--;
9367         for (i = 0; i < cp_nr_rings; i++) {
9368             bnapi = bp->bnapi[i];
9369             netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
9370         }
9371         if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9372             bnapi = bp->bnapi[cp_nr_rings];
9373             netif_napi_add(bp->dev, &bnapi->napi,
9374                        bnxt_poll_nitroa0, 64);
9375         }
9376     } else {
9377         bnapi = bp->bnapi[0];
9378         netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
9379     }
9380 }
9381 
9382 static void bnxt_disable_napi(struct bnxt *bp)
9383 {
9384     int i;
9385 
9386     if (!bp->bnapi ||
9387         test_and_set_bit(BNXT_STATE_NAPI_DISABLED, &bp->state))
9388         return;
9389 
9390     for (i = 0; i < bp->cp_nr_rings; i++) {
9391         struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
9392 
9393         napi_disable(&bp->bnapi[i]->napi);
9394         if (bp->bnapi[i]->rx_ring)
9395             cancel_work_sync(&cpr->dim.work);
9396     }
9397 }
9398 
9399 static void bnxt_enable_napi(struct bnxt *bp)
9400 {
9401     int i;
9402 
9403     clear_bit(BNXT_STATE_NAPI_DISABLED, &bp->state);
9404     for (i = 0; i < bp->cp_nr_rings; i++) {
9405         struct bnxt_napi *bnapi = bp->bnapi[i];
9406         struct bnxt_cp_ring_info *cpr;
9407 
9408         cpr = &bnapi->cp_ring;
9409         if (bnapi->in_reset)
9410             cpr->sw_stats.rx.rx_resets++;
9411         bnapi->in_reset = false;
9412 
9413         if (bnapi->rx_ring) {
9414             INIT_WORK(&cpr->dim.work, bnxt_dim_work);
9415             cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
9416         }
9417         napi_enable(&bnapi->napi);
9418     }
9419 }
9420 
9421 void bnxt_tx_disable(struct bnxt *bp)
9422 {
9423     int i;
9424     struct bnxt_tx_ring_info *txr;
9425 
9426     if (bp->tx_ring) {
9427         for (i = 0; i < bp->tx_nr_rings; i++) {
9428             txr = &bp->tx_ring[i];
9429             WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
9430         }
9431     }
9432     /* Make sure napi polls see @dev_state change */
9433     synchronize_net();
9434     /* Drop carrier first to prevent TX timeout */
9435     netif_carrier_off(bp->dev);
9436     /* Stop all TX queues */
9437     netif_tx_disable(bp->dev);
9438 }
9439 
9440 void bnxt_tx_enable(struct bnxt *bp)
9441 {
9442     int i;
9443     struct bnxt_tx_ring_info *txr;
9444 
9445     for (i = 0; i < bp->tx_nr_rings; i++) {
9446         txr = &bp->tx_ring[i];
9447         WRITE_ONCE(txr->dev_state, 0);
9448     }
9449     /* Make sure napi polls see @dev_state change */
9450     synchronize_net();
9451     netif_tx_wake_all_queues(bp->dev);
9452     if (BNXT_LINK_IS_UP(bp))
9453         netif_carrier_on(bp->dev);
9454 }
9455 
9456 static char *bnxt_report_fec(struct bnxt_link_info *link_info)
9457 {
9458     u8 active_fec = link_info->active_fec_sig_mode &
9459             PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
9460 
9461     switch (active_fec) {
9462     default:
9463     case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
9464         return "None";
9465     case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
9466         return "Clause 74 BaseR";
9467     case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
9468         return "Clause 91 RS(528,514)";
9469     case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
9470         return "Clause 91 RS544_1XN";
9471     case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
9472         return "Clause 91 RS(544,514)";
9473     case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
9474         return "Clause 91 RS272_1XN";
9475     case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
9476         return "Clause 91 RS(272,257)";
9477     }
9478 }
9479 
9480 void bnxt_report_link(struct bnxt *bp)
9481 {
9482     if (BNXT_LINK_IS_UP(bp)) {
9483         const char *signal = "";
9484         const char *flow_ctrl;
9485         const char *duplex;
9486         u32 speed;
9487         u16 fec;
9488 
9489         netif_carrier_on(bp->dev);
9490         speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
9491         if (speed == SPEED_UNKNOWN) {
9492             netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
9493             return;
9494         }
9495         if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
9496             duplex = "full";
9497         else
9498             duplex = "half";
9499         if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
9500             flow_ctrl = "ON - receive & transmit";
9501         else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
9502             flow_ctrl = "ON - transmit";
9503         else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
9504             flow_ctrl = "ON - receive";
9505         else
9506             flow_ctrl = "none";
9507         if (bp->link_info.phy_qcfg_resp.option_flags &
9508             PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN) {
9509             u8 sig_mode = bp->link_info.active_fec_sig_mode &
9510                       PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK;
9511             switch (sig_mode) {
9512             case PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ:
9513                 signal = "(NRZ) ";
9514                 break;
9515             case PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4:
9516                 signal = "(PAM4) ";
9517                 break;
9518             default:
9519                 break;
9520             }
9521         }
9522         netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s%s duplex, Flow control: %s\n",
9523                 speed, signal, duplex, flow_ctrl);
9524         if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP)
9525             netdev_info(bp->dev, "EEE is %s\n",
9526                     bp->eee.eee_active ? "active" :
9527                              "not active");
9528         fec = bp->link_info.fec_cfg;
9529         if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
9530             netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
9531                     (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
9532                     bnxt_report_fec(&bp->link_info));
9533     } else {
9534         netif_carrier_off(bp->dev);
9535         netdev_err(bp->dev, "NIC Link is Down\n");
9536     }
9537 }
9538 
9539 static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
9540 {
9541     if (!resp->supported_speeds_auto_mode &&
9542         !resp->supported_speeds_force_mode &&
9543         !resp->supported_pam4_speeds_auto_mode &&
9544         !resp->supported_pam4_speeds_force_mode)
9545         return true;
9546     return false;
9547 }
9548 
9549 static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
9550 {
9551     struct bnxt_link_info *link_info = &bp->link_info;
9552     struct hwrm_port_phy_qcaps_output *resp;
9553     struct hwrm_port_phy_qcaps_input *req;
9554     int rc = 0;
9555 
9556     if (bp->hwrm_spec_code < 0x10201)
9557         return 0;
9558 
9559     rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS);
9560     if (rc)
9561         return rc;
9562 
9563     resp = hwrm_req_hold(bp, req);
9564     rc = hwrm_req_send(bp, req);
9565     if (rc)
9566         goto hwrm_phy_qcaps_exit;
9567 
9568     bp->phy_flags = resp->flags | (le16_to_cpu(resp->flags2) << 8);
9569     if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
9570         struct ethtool_eee *eee = &bp->eee;
9571         u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
9572 
9573         eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9574         bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
9575                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
9576         bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
9577                  PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
9578     }
9579 
9580     if (bp->hwrm_spec_code >= 0x10a01) {
9581         if (bnxt_phy_qcaps_no_speed(resp)) {
9582             link_info->phy_state = BNXT_PHY_STATE_DISABLED;
9583             netdev_warn(bp->dev, "Ethernet link disabled\n");
9584         } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
9585             link_info->phy_state = BNXT_PHY_STATE_ENABLED;
9586             netdev_info(bp->dev, "Ethernet link enabled\n");
9587             /* Phy re-enabled, reprobe the speeds */
9588             link_info->support_auto_speeds = 0;
9589             link_info->support_pam4_auto_speeds = 0;
9590         }
9591     }
9592     if (resp->supported_speeds_auto_mode)
9593         link_info->support_auto_speeds =
9594             le16_to_cpu(resp->supported_speeds_auto_mode);
9595     if (resp->supported_pam4_speeds_auto_mode)
9596         link_info->support_pam4_auto_speeds =
9597             le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
9598 
9599     bp->port_count = resp->port_cnt;
9600 
9601 hwrm_phy_qcaps_exit:
9602     hwrm_req_drop(bp, req);
9603     return rc;
9604 }
9605 
9606 static bool bnxt_support_dropped(u16 advertising, u16 supported)
9607 {
9608     u16 diff = advertising ^ supported;
9609 
9610     return ((supported | diff) != supported);
9611 }
9612 
9613 int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
9614 {
9615     struct bnxt_link_info *link_info = &bp->link_info;
9616     struct hwrm_port_phy_qcfg_output *resp;
9617     struct hwrm_port_phy_qcfg_input *req;
9618     u8 link_state = link_info->link_state;
9619     bool support_changed = false;
9620     int rc;
9621 
9622     rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG);
9623     if (rc)
9624         return rc;
9625 
9626     resp = hwrm_req_hold(bp, req);
9627     rc = hwrm_req_send(bp, req);
9628     if (rc) {
9629         hwrm_req_drop(bp, req);
9630         if (BNXT_VF(bp) && rc == -ENODEV) {
9631             netdev_warn(bp->dev, "Cannot obtain link state while PF unavailable.\n");
9632             rc = 0;
9633         }
9634         return rc;
9635     }
9636 
9637     memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
9638     link_info->phy_link_status = resp->link;
9639     link_info->duplex = resp->duplex_cfg;
9640     if (bp->hwrm_spec_code >= 0x10800)
9641         link_info->duplex = resp->duplex_state;
9642     link_info->pause = resp->pause;
9643     link_info->auto_mode = resp->auto_mode;
9644     link_info->auto_pause_setting = resp->auto_pause;
9645     link_info->lp_pause = resp->link_partner_adv_pause;
9646     link_info->force_pause_setting = resp->force_pause;
9647     link_info->duplex_setting = resp->duplex_cfg;
9648     if (link_info->phy_link_status == BNXT_LINK_LINK)
9649         link_info->link_speed = le16_to_cpu(resp->link_speed);
9650     else
9651         link_info->link_speed = 0;
9652     link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
9653     link_info->force_pam4_link_speed =
9654         le16_to_cpu(resp->force_pam4_link_speed);
9655     link_info->support_speeds = le16_to_cpu(resp->support_speeds);
9656     link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
9657     link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
9658     link_info->auto_pam4_link_speeds =
9659         le16_to_cpu(resp->auto_pam4_link_speed_mask);
9660     link_info->lp_auto_link_speeds =
9661         le16_to_cpu(resp->link_partner_adv_speeds);
9662     link_info->lp_auto_pam4_link_speeds =
9663         resp->link_partner_pam4_adv_speeds;
9664     link_info->preemphasis = le32_to_cpu(resp->preemphasis);
9665     link_info->phy_ver[0] = resp->phy_maj;
9666     link_info->phy_ver[1] = resp->phy_min;
9667     link_info->phy_ver[2] = resp->phy_bld;
9668     link_info->media_type = resp->media_type;
9669     link_info->phy_type = resp->phy_type;
9670     link_info->transceiver = resp->xcvr_pkg_type;
9671     link_info->phy_addr = resp->eee_config_phy_addr &
9672                   PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
9673     link_info->module_status = resp->module_status;
9674 
9675     if (bp->phy_flags & BNXT_PHY_FL_EEE_CAP) {
9676         struct ethtool_eee *eee = &bp->eee;
9677         u16 fw_speeds;
9678 
9679         eee->eee_active = 0;
9680         if (resp->eee_config_phy_addr &
9681             PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
9682             eee->eee_active = 1;
9683             fw_speeds = le16_to_cpu(
9684                 resp->link_partner_adv_eee_link_speed_mask);
9685             eee->lp_advertised =
9686                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9687         }
9688 
9689         /* Pull initial EEE config */
9690         if (!chng_link_state) {
9691             if (resp->eee_config_phy_addr &
9692                 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
9693                 eee->eee_enabled = 1;
9694 
9695             fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
9696             eee->advertised =
9697                 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
9698 
9699             if (resp->eee_config_phy_addr &
9700                 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
9701                 __le32 tmr;
9702 
9703                 eee->tx_lpi_enabled = 1;
9704                 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
9705                 eee->tx_lpi_timer = le32_to_cpu(tmr) &
9706                     PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
9707             }
9708         }
9709     }
9710 
9711     link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
9712     if (bp->hwrm_spec_code >= 0x10504) {
9713         link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
9714         link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
9715     }
9716     /* TODO: need to add more logic to report VF link */
9717     if (chng_link_state) {
9718         if (link_info->phy_link_status == BNXT_LINK_LINK)
9719             link_info->link_state = BNXT_LINK_STATE_UP;
9720         else
9721             link_info->link_state = BNXT_LINK_STATE_DOWN;
9722         if (link_state != link_info->link_state)
9723             bnxt_report_link(bp);
9724     } else {
9725         /* always link down if not require to update link state */
9726         link_info->link_state = BNXT_LINK_STATE_DOWN;
9727     }
9728     hwrm_req_drop(bp, req);
9729 
9730     if (!BNXT_PHY_CFG_ABLE(bp))
9731         return 0;
9732 
9733     /* Check if any advertised speeds are no longer supported. The caller
9734      * holds the link_lock mutex, so we can modify link_info settings.
9735      */
9736     if (bnxt_support_dropped(link_info->advertising,
9737                  link_info->support_auto_speeds)) {
9738         link_info->advertising = link_info->support_auto_speeds;
9739         support_changed = true;
9740     }
9741     if (bnxt_support_dropped(link_info->advertising_pam4,
9742                  link_info->support_pam4_auto_speeds)) {
9743         link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
9744         support_changed = true;
9745     }
9746     if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
9747         bnxt_hwrm_set_link_setting(bp, true, false);
9748     return 0;
9749 }
9750 
9751 static void bnxt_get_port_module_status(struct bnxt *bp)
9752 {
9753     struct bnxt_link_info *link_info = &bp->link_info;
9754     struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
9755     u8 module_status;
9756 
9757     if (bnxt_update_link(bp, true))
9758         return;
9759 
9760     module_status = link_info->module_status;
9761     switch (module_status) {
9762     case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
9763     case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
9764     case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
9765         netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
9766                 bp->pf.port_id);
9767         if (bp->hwrm_spec_code >= 0x10201) {
9768             netdev_warn(bp->dev, "Module part number %s\n",
9769                     resp->phy_vendor_partnumber);
9770         }
9771         if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
9772             netdev_warn(bp->dev, "TX is disabled\n");
9773         if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
9774             netdev_warn(bp->dev, "SFP+ module is shutdown\n");
9775     }
9776 }
9777 
9778 static void
9779 bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9780 {
9781     if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
9782         if (bp->hwrm_spec_code >= 0x10201)
9783             req->auto_pause =
9784                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
9785         if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9786             req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
9787         if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9788             req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
9789         req->enables |=
9790             cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9791     } else {
9792         if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
9793             req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
9794         if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
9795             req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
9796         req->enables |=
9797             cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
9798         if (bp->hwrm_spec_code >= 0x10201) {
9799             req->auto_pause = req->force_pause;
9800             req->enables |= cpu_to_le32(
9801                 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
9802         }
9803     }
9804 }
9805 
9806 static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
9807 {
9808     if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
9809         req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
9810         if (bp->link_info.advertising) {
9811             req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
9812             req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
9813         }
9814         if (bp->link_info.advertising_pam4) {
9815             req->enables |=
9816                 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
9817             req->auto_link_pam4_speed_mask =
9818                 cpu_to_le16(bp->link_info.advertising_pam4);
9819         }
9820         req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
9821         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
9822     } else {
9823         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
9824         if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
9825             req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9826             req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
9827         } else {
9828             req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
9829         }
9830     }
9831 
9832     /* tell chimp that the setting takes effect immediately */
9833     req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
9834 }
9835 
9836 int bnxt_hwrm_set_pause(struct bnxt *bp)
9837 {
9838     struct hwrm_port_phy_cfg_input *req;
9839     int rc;
9840 
9841     rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9842     if (rc)
9843         return rc;
9844 
9845     bnxt_hwrm_set_pause_common(bp, req);
9846 
9847     if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
9848         bp->link_info.force_link_chng)
9849         bnxt_hwrm_set_link_common(bp, req);
9850 
9851     rc = hwrm_req_send(bp, req);
9852     if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
9853         /* since changing of pause setting doesn't trigger any link
9854          * change event, the driver needs to update the current pause
9855          * result upon successfully return of the phy_cfg command
9856          */
9857         bp->link_info.pause =
9858         bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
9859         bp->link_info.auto_pause_setting = 0;
9860         if (!bp->link_info.force_link_chng)
9861             bnxt_report_link(bp);
9862     }
9863     bp->link_info.force_link_chng = false;
9864     return rc;
9865 }
9866 
9867 static void bnxt_hwrm_set_eee(struct bnxt *bp,
9868                   struct hwrm_port_phy_cfg_input *req)
9869 {
9870     struct ethtool_eee *eee = &bp->eee;
9871 
9872     if (eee->eee_enabled) {
9873         u16 eee_speeds;
9874         u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
9875 
9876         if (eee->tx_lpi_enabled)
9877             flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
9878         else
9879             flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
9880 
9881         req->flags |= cpu_to_le32(flags);
9882         eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
9883         req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
9884         req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
9885     } else {
9886         req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
9887     }
9888 }
9889 
9890 int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
9891 {
9892     struct hwrm_port_phy_cfg_input *req;
9893     int rc;
9894 
9895     rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9896     if (rc)
9897         return rc;
9898 
9899     if (set_pause)
9900         bnxt_hwrm_set_pause_common(bp, req);
9901 
9902     bnxt_hwrm_set_link_common(bp, req);
9903 
9904     if (set_eee)
9905         bnxt_hwrm_set_eee(bp, req);
9906     return hwrm_req_send(bp, req);
9907 }
9908 
9909 static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
9910 {
9911     struct hwrm_port_phy_cfg_input *req;
9912     int rc;
9913 
9914     if (!BNXT_SINGLE_PF(bp))
9915         return 0;
9916 
9917     if (pci_num_vf(bp->pdev) &&
9918         !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN))
9919         return 0;
9920 
9921     rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG);
9922     if (rc)
9923         return rc;
9924 
9925     req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
9926     rc = hwrm_req_send(bp, req);
9927     if (!rc) {
9928         mutex_lock(&bp->link_lock);
9929         /* Device is not obliged link down in certain scenarios, even
9930          * when forced. Setting the state unknown is consistent with
9931          * driver startup and will force link state to be reported
9932          * during subsequent open based on PORT_PHY_QCFG.
9933          */
9934         bp->link_info.link_state = BNXT_LINK_STATE_UNKNOWN;
9935         mutex_unlock(&bp->link_lock);
9936     }
9937     return rc;
9938 }
9939 
9940 static int bnxt_fw_reset_via_optee(struct bnxt *bp)
9941 {
9942 #ifdef CONFIG_TEE_BNXT_FW
9943     int rc = tee_bnxt_fw_load();
9944 
9945     if (rc)
9946         netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
9947 
9948     return rc;
9949 #else
9950     netdev_err(bp->dev, "OP-TEE not supported\n");
9951     return -ENODEV;
9952 #endif
9953 }
9954 
9955 static int bnxt_try_recover_fw(struct bnxt *bp)
9956 {
9957     if (bp->fw_health && bp->fw_health->status_reliable) {
9958         int retry = 0, rc;
9959         u32 sts;
9960 
9961         do {
9962             sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
9963             rc = bnxt_hwrm_poll(bp);
9964             if (!BNXT_FW_IS_BOOTING(sts) &&
9965                 !BNXT_FW_IS_RECOVERING(sts))
9966                 break;
9967             retry++;
9968         } while (rc == -EBUSY && retry < BNXT_FW_RETRY);
9969 
9970         if (!BNXT_FW_IS_HEALTHY(sts)) {
9971             netdev_err(bp->dev,
9972                    "Firmware not responding, status: 0x%x\n",
9973                    sts);
9974             rc = -ENODEV;
9975         }
9976         if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
9977             netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
9978             return bnxt_fw_reset_via_optee(bp);
9979         }
9980         return rc;
9981     }
9982 
9983     return -ENODEV;
9984 }
9985 
9986 int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
9987 {
9988     struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
9989     int rc;
9990 
9991     if (!BNXT_NEW_RM(bp))
9992         return 0; /* no resource reservations required */
9993 
9994     rc = bnxt_hwrm_func_resc_qcaps(bp, true);
9995     if (rc)
9996         netdev_err(bp->dev, "resc_qcaps failed\n");
9997 
9998     hw_resc->resv_cp_rings = 0;
9999     hw_resc->resv_stat_ctxs = 0;
10000     hw_resc->resv_irqs = 0;
10001     hw_resc->resv_tx_rings = 0;
10002     hw_resc->resv_rx_rings = 0;
10003     hw_resc->resv_hw_ring_grps = 0;
10004     hw_resc->resv_vnics = 0;
10005     if (!fw_reset) {
10006         bp->tx_nr_rings = 0;
10007         bp->rx_nr_rings = 0;
10008     }
10009 
10010     return rc;
10011 }
10012 
10013 static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
10014 {
10015     struct hwrm_func_drv_if_change_output *resp;
10016     struct hwrm_func_drv_if_change_input *req;
10017     bool fw_reset = !bp->irq_tbl;
10018     bool resc_reinit = false;
10019     int rc, retry = 0;
10020     u32 flags = 0;
10021 
10022     if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
10023         return 0;
10024 
10025     rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE);
10026     if (rc)
10027         return rc;
10028 
10029     if (up)
10030         req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
10031     resp = hwrm_req_hold(bp, req);
10032 
10033     hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT);
10034     while (retry < BNXT_FW_IF_RETRY) {
10035         rc = hwrm_req_send(bp, req);
10036         if (rc != -EAGAIN)
10037             break;
10038 
10039         msleep(50);
10040         retry++;
10041     }
10042 
10043     if (rc == -EAGAIN) {
10044         hwrm_req_drop(bp, req);
10045         return rc;
10046     } else if (!rc) {
10047         flags = le32_to_cpu(resp->flags);
10048     } else if (up) {
10049         rc = bnxt_try_recover_fw(bp);
10050         fw_reset = true;
10051     }
10052     hwrm_req_drop(bp, req);
10053     if (rc)
10054         return rc;
10055 
10056     if (!up) {
10057         bnxt_inv_fw_health_reg(bp);
10058         return 0;
10059     }
10060 
10061     if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)
10062         resc_reinit = true;
10063     if (flags & FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE ||
10064         test_bit(BNXT_STATE_FW_RESET_DET, &bp->state))
10065         fw_reset = true;
10066     else
10067         bnxt_remap_fw_health_regs(bp);
10068 
10069     if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state) && !fw_reset) {
10070         netdev_err(bp->dev, "RESET_DONE not set during FW reset.\n");
10071         set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10072         return -ENODEV;
10073     }
10074     if (resc_reinit || fw_reset) {
10075         if (fw_reset) {
10076             set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10077             if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10078                 bnxt_ulp_stop(bp);
10079             bnxt_free_ctx_mem(bp);
10080             kfree(bp->ctx);
10081             bp->ctx = NULL;
10082             bnxt_dcb_free(bp);
10083             rc = bnxt_fw_init_one(bp);
10084             if (rc) {
10085                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10086                 set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10087                 return rc;
10088             }
10089             bnxt_clear_int_mode(bp);
10090             rc = bnxt_init_int_mode(bp);
10091             if (rc) {
10092                 clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10093                 netdev_err(bp->dev, "init int mode failed\n");
10094                 return rc;
10095             }
10096         }
10097         rc = bnxt_cancel_reservations(bp, fw_reset);
10098     }
10099     return rc;
10100 }
10101 
10102 static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
10103 {
10104     struct hwrm_port_led_qcaps_output *resp;
10105     struct hwrm_port_led_qcaps_input *req;
10106     struct bnxt_pf_info *pf = &bp->pf;
10107     int rc;
10108 
10109     bp->num_leds = 0;
10110     if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
10111         return 0;
10112 
10113     rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS);
10114     if (rc)
10115         return rc;
10116 
10117     req->port_id = cpu_to_le16(pf->port_id);
10118     resp = hwrm_req_hold(bp, req);
10119     rc = hwrm_req_send(bp, req);
10120     if (rc) {
10121         hwrm_req_drop(bp, req);
10122         return rc;
10123     }
10124     if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
10125         int i;
10126 
10127         bp->num_leds = resp->num_leds;
10128         memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
10129                          bp->num_leds);
10130         for (i = 0; i < bp->num_leds; i++) {
10131             struct bnxt_led_info *led = &bp->leds[i];
10132             __le16 caps = led->led_state_caps;
10133 
10134             if (!led->led_group_id ||
10135                 !BNXT_LED_ALT_BLINK_CAP(caps)) {
10136                 bp->num_leds = 0;
10137                 break;
10138             }
10139         }
10140     }
10141     hwrm_req_drop(bp, req);
10142     return 0;
10143 }
10144 
10145 int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
10146 {
10147     struct hwrm_wol_filter_alloc_output *resp;
10148     struct hwrm_wol_filter_alloc_input *req;
10149     int rc;
10150 
10151     rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC);
10152     if (rc)
10153         return rc;
10154 
10155     req->port_id = cpu_to_le16(bp->pf.port_id);
10156     req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
10157     req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
10158     memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN);
10159 
10160     resp = hwrm_req_hold(bp, req);
10161     rc = hwrm_req_send(bp, req);
10162     if (!rc)
10163         bp->wol_filter_id = resp->wol_filter_id;
10164     hwrm_req_drop(bp, req);
10165     return rc;
10166 }
10167 
10168 int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
10169 {
10170     struct hwrm_wol_filter_free_input *req;
10171     int rc;
10172 
10173     rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE);
10174     if (rc)
10175         return rc;
10176 
10177     req->port_id = cpu_to_le16(bp->pf.port_id);
10178     req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
10179     req->wol_filter_id = bp->wol_filter_id;
10180 
10181     return hwrm_req_send(bp, req);
10182 }
10183 
10184 static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
10185 {
10186     struct hwrm_wol_filter_qcfg_output *resp;
10187     struct hwrm_wol_filter_qcfg_input *req;
10188     u16 next_handle = 0;
10189     int rc;
10190 
10191     rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG);
10192     if (rc)
10193         return rc;
10194 
10195     req->port_id = cpu_to_le16(bp->pf.port_id);
10196     req->handle = cpu_to_le16(handle);
10197     resp = hwrm_req_hold(bp, req);
10198     rc = hwrm_req_send(bp, req);
10199     if (!rc) {
10200         next_handle = le16_to_cpu(resp->next_handle);
10201         if (next_handle != 0) {
10202             if (resp->wol_type ==
10203                 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
10204                 bp->wol = 1;
10205                 bp->wol_filter_id = resp->wol_filter_id;
10206             }
10207         }
10208     }
10209     hwrm_req_drop(bp, req);
10210     return next_handle;
10211 }
10212 
10213 static void bnxt_get_wol_settings(struct bnxt *bp)
10214 {
10215     u16 handle = 0;
10216 
10217     bp->wol = 0;
10218     if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
10219         return;
10220 
10221     do {
10222         handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
10223     } while (handle && handle != 0xffff);
10224 }
10225 
10226 #ifdef CONFIG_BNXT_HWMON
10227 static ssize_t bnxt_show_temp(struct device *dev,
10228                   struct device_attribute *devattr, char *buf)
10229 {
10230     struct hwrm_temp_monitor_query_output *resp;
10231     struct hwrm_temp_monitor_query_input *req;
10232     struct bnxt *bp = dev_get_drvdata(dev);
10233     u32 len = 0;
10234     int rc;
10235 
10236     rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10237     if (rc)
10238         return rc;
10239     resp = hwrm_req_hold(bp, req);
10240     rc = hwrm_req_send(bp, req);
10241     if (!rc)
10242         len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */
10243     hwrm_req_drop(bp, req);
10244     if (rc)
10245         return rc;
10246     return len;
10247 }
10248 static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
10249 
10250 static struct attribute *bnxt_attrs[] = {
10251     &sensor_dev_attr_temp1_input.dev_attr.attr,
10252     NULL
10253 };
10254 ATTRIBUTE_GROUPS(bnxt);
10255 
10256 static void bnxt_hwmon_close(struct bnxt *bp)
10257 {
10258     if (bp->hwmon_dev) {
10259         hwmon_device_unregister(bp->hwmon_dev);
10260         bp->hwmon_dev = NULL;
10261     }
10262 }
10263 
10264 static void bnxt_hwmon_open(struct bnxt *bp)
10265 {
10266     struct hwrm_temp_monitor_query_input *req;
10267     struct pci_dev *pdev = bp->pdev;
10268     int rc;
10269 
10270     rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY);
10271     if (!rc)
10272         rc = hwrm_req_send_silent(bp, req);
10273     if (rc == -EACCES || rc == -EOPNOTSUPP) {
10274         bnxt_hwmon_close(bp);
10275         return;
10276     }
10277 
10278     if (bp->hwmon_dev)
10279         return;
10280 
10281     bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
10282                               DRV_MODULE_NAME, bp,
10283                               bnxt_groups);
10284     if (IS_ERR(bp->hwmon_dev)) {
10285         bp->hwmon_dev = NULL;
10286         dev_warn(&pdev->dev, "Cannot register hwmon device\n");
10287     }
10288 }
10289 #else
10290 static void bnxt_hwmon_close(struct bnxt *bp)
10291 {
10292 }
10293 
10294 static void bnxt_hwmon_open(struct bnxt *bp)
10295 {
10296 }
10297 #endif
10298 
10299 static bool bnxt_eee_config_ok(struct bnxt *bp)
10300 {
10301     struct ethtool_eee *eee = &bp->eee;
10302     struct bnxt_link_info *link_info = &bp->link_info;
10303 
10304     if (!(bp->phy_flags & BNXT_PHY_FL_EEE_CAP))
10305         return true;
10306 
10307     if (eee->eee_enabled) {
10308         u32 advertising =
10309             _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
10310 
10311         if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10312             eee->eee_enabled = 0;
10313             return false;
10314         }
10315         if (eee->advertised & ~advertising) {
10316             eee->advertised = advertising & eee->supported;
10317             return false;
10318         }
10319     }
10320     return true;
10321 }
10322 
10323 static int bnxt_update_phy_setting(struct bnxt *bp)
10324 {
10325     int rc;
10326     bool update_link = false;
10327     bool update_pause = false;
10328     bool update_eee = false;
10329     struct bnxt_link_info *link_info = &bp->link_info;
10330 
10331     rc = bnxt_update_link(bp, true);
10332     if (rc) {
10333         netdev_err(bp->dev, "failed to update link (rc: %x)\n",
10334                rc);
10335         return rc;
10336     }
10337     if (!BNXT_SINGLE_PF(bp))
10338         return 0;
10339 
10340     if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10341         (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
10342         link_info->req_flow_ctrl)
10343         update_pause = true;
10344     if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
10345         link_info->force_pause_setting != link_info->req_flow_ctrl)
10346         update_pause = true;
10347     if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
10348         if (BNXT_AUTO_MODE(link_info->auto_mode))
10349             update_link = true;
10350         if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
10351             link_info->req_link_speed != link_info->force_link_speed)
10352             update_link = true;
10353         else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
10354              link_info->req_link_speed != link_info->force_pam4_link_speed)
10355             update_link = true;
10356         if (link_info->req_duplex != link_info->duplex_setting)
10357             update_link = true;
10358     } else {
10359         if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
10360             update_link = true;
10361         if (link_info->advertising != link_info->auto_link_speeds ||
10362             link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
10363             update_link = true;
10364     }
10365 
10366     /* The last close may have shutdown the link, so need to call
10367      * PHY_CFG to bring it back up.
10368      */
10369     if (!BNXT_LINK_IS_UP(bp))
10370         update_link = true;
10371 
10372     if (!bnxt_eee_config_ok(bp))
10373         update_eee = true;
10374 
10375     if (update_link)
10376         rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
10377     else if (update_pause)
10378         rc = bnxt_hwrm_set_pause(bp);
10379     if (rc) {
10380         netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
10381                rc);
10382         return rc;
10383     }
10384 
10385     return rc;
10386 }
10387 
10388 /* Common routine to pre-map certain register block to different GRC window.
10389  * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
10390  * in PF and 3 windows in VF that can be customized to map in different
10391  * register blocks.
10392  */
10393 static void bnxt_preset_reg_win(struct bnxt *bp)
10394 {
10395     if (BNXT_PF(bp)) {
10396         /* CAG registers map to GRC window #4 */
10397         writel(BNXT_CAG_REG_BASE,
10398                bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
10399     }
10400 }
10401 
10402 static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
10403 
10404 static int bnxt_reinit_after_abort(struct bnxt *bp)
10405 {
10406     int rc;
10407 
10408     if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
10409         return -EBUSY;
10410 
10411     if (bp->dev->reg_state == NETREG_UNREGISTERED)
10412         return -ENODEV;
10413 
10414     rc = bnxt_fw_init_one(bp);
10415     if (!rc) {
10416         bnxt_clear_int_mode(bp);
10417         rc = bnxt_init_int_mode(bp);
10418         if (!rc) {
10419             clear_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10420             set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
10421         }
10422     }
10423     return rc;
10424 }
10425 
10426 static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10427 {
10428     int rc = 0;
10429 
10430     bnxt_preset_reg_win(bp);
10431     netif_carrier_off(bp->dev);
10432     if (irq_re_init) {
10433         /* Reserve rings now if none were reserved at driver probe. */
10434         rc = bnxt_init_dflt_ring_mode(bp);
10435         if (rc) {
10436             netdev_err(bp->dev, "Failed to reserve default rings at open\n");
10437             return rc;
10438         }
10439     }
10440     rc = bnxt_reserve_rings(bp, irq_re_init);
10441     if (rc)
10442         return rc;
10443     if ((bp->flags & BNXT_FLAG_RFS) &&
10444         !(bp->flags & BNXT_FLAG_USING_MSIX)) {
10445         /* disable RFS if falling back to INTA */
10446         bp->dev->hw_features &= ~NETIF_F_NTUPLE;
10447         bp->flags &= ~BNXT_FLAG_RFS;
10448     }
10449 
10450     rc = bnxt_alloc_mem(bp, irq_re_init);
10451     if (rc) {
10452         netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10453         goto open_err_free_mem;
10454     }
10455 
10456     if (irq_re_init) {
10457         bnxt_init_napi(bp);
10458         rc = bnxt_request_irq(bp);
10459         if (rc) {
10460             netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
10461             goto open_err_irq;
10462         }
10463     }
10464 
10465     rc = bnxt_init_nic(bp, irq_re_init);
10466     if (rc) {
10467         netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10468         goto open_err_irq;
10469     }
10470 
10471     bnxt_enable_napi(bp);
10472     bnxt_debug_dev_init(bp);
10473 
10474     if (link_re_init) {
10475         mutex_lock(&bp->link_lock);
10476         rc = bnxt_update_phy_setting(bp);
10477         mutex_unlock(&bp->link_lock);
10478         if (rc) {
10479             netdev_warn(bp->dev, "failed to update phy settings\n");
10480             if (BNXT_SINGLE_PF(bp)) {
10481                 bp->link_info.phy_retry = true;
10482                 bp->link_info.phy_retry_expires =
10483                     jiffies + 5 * HZ;
10484             }
10485         }
10486     }
10487 
10488     if (irq_re_init)
10489         udp_tunnel_nic_reset_ntf(bp->dev);
10490 
10491     if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
10492         if (!static_key_enabled(&bnxt_xdp_locking_key))
10493             static_branch_enable(&bnxt_xdp_locking_key);
10494     } else if (static_key_enabled(&bnxt_xdp_locking_key)) {
10495         static_branch_disable(&bnxt_xdp_locking_key);
10496     }
10497     set_bit(BNXT_STATE_OPEN, &bp->state);
10498     bnxt_enable_int(bp);
10499     /* Enable TX queues */
10500     bnxt_tx_enable(bp);
10501     mod_timer(&bp->timer, jiffies + bp->current_interval);
10502     /* Poll link status and check for SFP+ module status */
10503     mutex_lock(&bp->link_lock);
10504     bnxt_get_port_module_status(bp);
10505     mutex_unlock(&bp->link_lock);
10506 
10507     /* VF-reps may need to be re-opened after the PF is re-opened */
10508     if (BNXT_PF(bp))
10509         bnxt_vf_reps_open(bp);
10510     bnxt_ptp_init_rtc(bp, true);
10511     bnxt_ptp_cfg_tstamp_filters(bp);
10512     return 0;
10513 
10514 open_err_irq:
10515     bnxt_del_napi(bp);
10516 
10517 open_err_free_mem:
10518     bnxt_free_skbs(bp);
10519     bnxt_free_irq(bp);
10520     bnxt_free_mem(bp, true);
10521     return rc;
10522 }
10523 
10524 /* rtnl_lock held */
10525 int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10526 {
10527     int rc = 0;
10528 
10529     if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
10530         rc = -EIO;
10531     if (!rc)
10532         rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
10533     if (rc) {
10534         netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
10535         dev_close(bp->dev);
10536     }
10537     return rc;
10538 }
10539 
10540 /* rtnl_lock held, open the NIC half way by allocating all resources, but
10541  * NAPI, IRQ, and TX are not enabled.  This is mainly used for offline
10542  * self tests.
10543  */
10544 int bnxt_half_open_nic(struct bnxt *bp)
10545 {
10546     int rc = 0;
10547 
10548     if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10549         netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n");
10550         rc = -ENODEV;
10551         goto half_open_err;
10552     }
10553 
10554     rc = bnxt_alloc_mem(bp, true);
10555     if (rc) {
10556         netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
10557         goto half_open_err;
10558     }
10559     set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10560     rc = bnxt_init_nic(bp, true);
10561     if (rc) {
10562         clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10563         netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
10564         goto half_open_err;
10565     }
10566     return 0;
10567 
10568 half_open_err:
10569     bnxt_free_skbs(bp);
10570     bnxt_free_mem(bp, true);
10571     dev_close(bp->dev);
10572     return rc;
10573 }
10574 
10575 /* rtnl_lock held, this call can only be made after a previous successful
10576  * call to bnxt_half_open_nic().
10577  */
10578 void bnxt_half_close_nic(struct bnxt *bp)
10579 {
10580     bnxt_hwrm_resource_free(bp, false, true);
10581     bnxt_free_skbs(bp);
10582     bnxt_free_mem(bp, true);
10583     clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
10584 }
10585 
10586 void bnxt_reenable_sriov(struct bnxt *bp)
10587 {
10588     if (BNXT_PF(bp)) {
10589         struct bnxt_pf_info *pf = &bp->pf;
10590         int n = pf->active_vfs;
10591 
10592         if (n)
10593             bnxt_cfg_hw_sriov(bp, &n, true);
10594     }
10595 }
10596 
10597 static int bnxt_open(struct net_device *dev)
10598 {
10599     struct bnxt *bp = netdev_priv(dev);
10600     int rc;
10601 
10602     if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
10603         rc = bnxt_reinit_after_abort(bp);
10604         if (rc) {
10605             if (rc == -EBUSY)
10606                 netdev_err(bp->dev, "A previous firmware reset has not completed, aborting\n");
10607             else
10608                 netdev_err(bp->dev, "Failed to reinitialize after aborted firmware reset\n");
10609             return -ENODEV;
10610         }
10611     }
10612 
10613     rc = bnxt_hwrm_if_change(bp, true);
10614     if (rc)
10615         return rc;
10616 
10617     rc = __bnxt_open_nic(bp, true, true);
10618     if (rc) {
10619         bnxt_hwrm_if_change(bp, false);
10620     } else {
10621         if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
10622             if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10623                 bnxt_ulp_start(bp, 0);
10624                 bnxt_reenable_sriov(bp);
10625             }
10626         }
10627         bnxt_hwmon_open(bp);
10628     }
10629 
10630     return rc;
10631 }
10632 
10633 static bool bnxt_drv_busy(struct bnxt *bp)
10634 {
10635     return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
10636         test_bit(BNXT_STATE_READ_STATS, &bp->state));
10637 }
10638 
10639 static void bnxt_get_ring_stats(struct bnxt *bp,
10640                 struct rtnl_link_stats64 *stats);
10641 
10642 static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
10643                  bool link_re_init)
10644 {
10645     /* Close the VF-reps before closing PF */
10646     if (BNXT_PF(bp))
10647         bnxt_vf_reps_close(bp);
10648 
10649     /* Change device state to avoid TX queue wake up's */
10650     bnxt_tx_disable(bp);
10651 
10652     clear_bit(BNXT_STATE_OPEN, &bp->state);
10653     smp_mb__after_atomic();
10654     while (bnxt_drv_busy(bp))
10655         msleep(20);
10656 
10657     /* Flush rings and disable interrupts */
10658     bnxt_shutdown_nic(bp, irq_re_init);
10659 
10660     /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
10661 
10662     bnxt_debug_dev_exit(bp);
10663     bnxt_disable_napi(bp);
10664     del_timer_sync(&bp->timer);
10665     bnxt_free_skbs(bp);
10666 
10667     /* Save ring stats before shutdown */
10668     if (bp->bnapi && irq_re_init)
10669         bnxt_get_ring_stats(bp, &bp->net_stats_prev);
10670     if (irq_re_init) {
10671         bnxt_free_irq(bp);
10672         bnxt_del_napi(bp);
10673     }
10674     bnxt_free_mem(bp, irq_re_init);
10675 }
10676 
10677 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
10678 {
10679     int rc = 0;
10680 
10681     if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
10682         /* If we get here, it means firmware reset is in progress
10683          * while we are trying to close.  We can safely proceed with
10684          * the close because we are holding rtnl_lock().  Some firmware
10685          * messages may fail as we proceed to close.  We set the
10686          * ABORT_ERR flag here so that the FW reset thread will later
10687          * abort when it gets the rtnl_lock() and sees the flag.
10688          */
10689         netdev_warn(bp->dev, "FW reset in progress during close, FW reset will be aborted\n");
10690         set_bit(BNXT_STATE_ABORT_ERR, &bp->state);
10691     }
10692 
10693 #ifdef CONFIG_BNXT_SRIOV
10694     if (bp->sriov_cfg) {
10695         rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
10696                               !bp->sriov_cfg,
10697                               BNXT_SRIOV_CFG_WAIT_TMO);
10698         if (rc)
10699             netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
10700     }
10701 #endif
10702     __bnxt_close_nic(bp, irq_re_init, link_re_init);
10703     return rc;
10704 }
10705 
10706 static int bnxt_close(struct net_device *dev)
10707 {
10708     struct bnxt *bp = netdev_priv(dev);
10709 
10710     bnxt_hwmon_close(bp);
10711     bnxt_close_nic(bp, true, true);
10712     bnxt_hwrm_shutdown_link(bp);
10713     bnxt_hwrm_if_change(bp, false);
10714     return 0;
10715 }
10716 
10717 static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg,
10718                    u16 *val)
10719 {
10720     struct hwrm_port_phy_mdio_read_output *resp;
10721     struct hwrm_port_phy_mdio_read_input *req;
10722     int rc;
10723 
10724     if (bp->hwrm_spec_code < 0x10a00)
10725         return -EOPNOTSUPP;
10726 
10727     rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ);
10728     if (rc)
10729         return rc;
10730 
10731     req->port_id = cpu_to_le16(bp->pf.port_id);
10732     req->phy_addr = phy_addr;
10733     req->reg_addr = cpu_to_le16(reg & 0x1f);
10734     if (mdio_phy_id_is_c45(phy_addr)) {
10735         req->cl45_mdio = 1;
10736         req->phy_addr = mdio_phy_id_prtad(phy_addr);
10737         req->dev_addr = mdio_phy_id_devad(phy_addr);
10738         req->reg_addr = cpu_to_le16(reg);
10739     }
10740 
10741     resp = hwrm_req_hold(bp, req);
10742     rc = hwrm_req_send(bp, req);
10743     if (!rc)
10744         *val = le16_to_cpu(resp->reg_data);
10745     hwrm_req_drop(bp, req);
10746     return rc;
10747 }
10748 
10749 static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg,
10750                     u16 val)
10751 {
10752     struct hwrm_port_phy_mdio_write_input *req;
10753     int rc;
10754 
10755     if (bp->hwrm_spec_code < 0x10a00)
10756         return -EOPNOTSUPP;
10757 
10758     rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE);
10759     if (rc)
10760         return rc;
10761 
10762     req->port_id = cpu_to_le16(bp->pf.port_id);
10763     req->phy_addr = phy_addr;
10764     req->reg_addr = cpu_to_le16(reg & 0x1f);
10765     if (mdio_phy_id_is_c45(phy_addr)) {
10766         req->cl45_mdio = 1;
10767         req->phy_addr = mdio_phy_id_prtad(phy_addr);
10768         req->dev_addr = mdio_phy_id_devad(phy_addr);
10769         req->reg_addr = cpu_to_le16(reg);
10770     }
10771     req->reg_data = cpu_to_le16(val);
10772 
10773     return hwrm_req_send(bp, req);
10774 }
10775 
10776 /* rtnl_lock held */
10777 static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10778 {
10779     struct mii_ioctl_data *mdio = if_mii(ifr);
10780     struct bnxt *bp = netdev_priv(dev);
10781     int rc;
10782 
10783     switch (cmd) {
10784     case SIOCGMIIPHY:
10785         mdio->phy_id = bp->link_info.phy_addr;
10786 
10787         fallthrough;
10788     case SIOCGMIIREG: {
10789         u16 mii_regval = 0;
10790 
10791         if (!netif_running(dev))
10792             return -EAGAIN;
10793 
10794         rc = bnxt_hwrm_port_phy_read(bp, mdio->phy_id, mdio->reg_num,
10795                          &mii_regval);
10796         mdio->val_out = mii_regval;
10797         return rc;
10798     }
10799 
10800     case SIOCSMIIREG:
10801         if (!netif_running(dev))
10802             return -EAGAIN;
10803 
10804         return bnxt_hwrm_port_phy_write(bp, mdio->phy_id, mdio->reg_num,
10805                         mdio->val_in);
10806 
10807     case SIOCSHWTSTAMP:
10808         return bnxt_hwtstamp_set(dev, ifr);
10809 
10810     case SIOCGHWTSTAMP:
10811         return bnxt_hwtstamp_get(dev, ifr);
10812 
10813     default:
10814         /* do nothing */
10815         break;
10816     }
10817     return -EOPNOTSUPP;
10818 }
10819 
10820 static void bnxt_get_ring_stats(struct bnxt *bp,
10821                 struct rtnl_link_stats64 *stats)
10822 {
10823     int i;
10824 
10825     for (i = 0; i < bp->cp_nr_rings; i++) {
10826         struct bnxt_napi *bnapi = bp->bnapi[i];
10827         struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
10828         u64 *sw = cpr->stats.sw_stats;
10829 
10830         stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_ucast_pkts);
10831         stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10832         stats->rx_packets += BNXT_GET_RING_STATS64(sw, rx_bcast_pkts);
10833 
10834         stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_ucast_pkts);
10835         stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_mcast_pkts);
10836         stats->tx_packets += BNXT_GET_RING_STATS64(sw, tx_bcast_pkts);
10837 
10838         stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_ucast_bytes);
10839         stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
10840         stats->rx_bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
10841 
10842         stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_ucast_bytes);
10843         stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_mcast_bytes);
10844         stats->tx_bytes += BNXT_GET_RING_STATS64(sw, tx_bcast_bytes);
10845 
10846         stats->rx_missed_errors +=
10847             BNXT_GET_RING_STATS64(sw, rx_discard_pkts);
10848 
10849         stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts);
10850 
10851         stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
10852 
10853         stats->rx_dropped +=
10854             cpr->sw_stats.rx.rx_netpoll_discards +
10855             cpr->sw_stats.rx.rx_oom_discards;
10856     }
10857 }
10858 
10859 static void bnxt_add_prev_stats(struct bnxt *bp,
10860                 struct rtnl_link_stats64 *stats)
10861 {
10862     struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev;
10863 
10864     stats->rx_packets += prev_stats->rx_packets;
10865     stats->tx_packets += prev_stats->tx_packets;
10866     stats->rx_bytes += prev_stats->rx_bytes;
10867     stats->tx_bytes += prev_stats->tx_bytes;
10868     stats->rx_missed_errors += prev_stats->rx_missed_errors;
10869     stats->multicast += prev_stats->multicast;
10870     stats->rx_dropped += prev_stats->rx_dropped;
10871     stats->tx_dropped += prev_stats->tx_dropped;
10872 }
10873 
10874 static void
10875 bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
10876 {
10877     struct bnxt *bp = netdev_priv(dev);
10878 
10879     set_bit(BNXT_STATE_READ_STATS, &bp->state);
10880     /* Make sure bnxt_close_nic() sees that we are reading stats before
10881      * we check the BNXT_STATE_OPEN flag.
10882      */
10883     smp_mb__after_atomic();
10884     if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
10885         clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10886         *stats = bp->net_stats_prev;
10887         return;
10888     }
10889 
10890     bnxt_get_ring_stats(bp, stats);
10891     bnxt_add_prev_stats(bp, stats);
10892 
10893     if (bp->flags & BNXT_FLAG_PORT_STATS) {
10894         u64 *rx = bp->port_stats.sw_stats;
10895         u64 *tx = bp->port_stats.sw_stats +
10896               BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
10897 
10898         stats->rx_crc_errors =
10899             BNXT_GET_RX_PORT_STATS64(rx, rx_fcs_err_frames);
10900         stats->rx_frame_errors =
10901             BNXT_GET_RX_PORT_STATS64(rx, rx_align_err_frames);
10902         stats->rx_length_errors =
10903             BNXT_GET_RX_PORT_STATS64(rx, rx_undrsz_frames) +
10904             BNXT_GET_RX_PORT_STATS64(rx, rx_ovrsz_frames) +
10905             BNXT_GET_RX_PORT_STATS64(rx, rx_runt_frames);
10906         stats->rx_errors =
10907             BNXT_GET_RX_PORT_STATS64(rx, rx_false_carrier_frames) +
10908             BNXT_GET_RX_PORT_STATS64(rx, rx_jbr_frames);
10909         stats->collisions =
10910             BNXT_GET_TX_PORT_STATS64(tx, tx_total_collisions);
10911         stats->tx_fifo_errors =
10912             BNXT_GET_TX_PORT_STATS64(tx, tx_fifo_underruns);
10913         stats->tx_errors = BNXT_GET_TX_PORT_STATS64(tx, tx_err);
10914     }
10915     clear_bit(BNXT_STATE_READ_STATS, &bp->state);
10916 }
10917 
10918 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
10919 {
10920     struct net_device *dev = bp->dev;
10921     struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10922     struct netdev_hw_addr *ha;
10923     u8 *haddr;
10924     int mc_count = 0;
10925     bool update = false;
10926     int off = 0;
10927 
10928     netdev_for_each_mc_addr(ha, dev) {
10929         if (mc_count >= BNXT_MAX_MC_ADDRS) {
10930             *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10931             vnic->mc_list_count = 0;
10932             return false;
10933         }
10934         haddr = ha->addr;
10935         if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
10936             memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
10937             update = true;
10938         }
10939         off += ETH_ALEN;
10940         mc_count++;
10941     }
10942     if (mc_count)
10943         *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
10944 
10945     if (mc_count != vnic->mc_list_count) {
10946         vnic->mc_list_count = mc_count;
10947         update = true;
10948     }
10949     return update;
10950 }
10951 
10952 static bool bnxt_uc_list_updated(struct bnxt *bp)
10953 {
10954     struct net_device *dev = bp->dev;
10955     struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
10956     struct netdev_hw_addr *ha;
10957     int off = 0;
10958 
10959     if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
10960         return true;
10961 
10962     netdev_for_each_uc_addr(ha, dev) {
10963         if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
10964             return true;
10965 
10966         off += ETH_ALEN;
10967     }
10968     return false;
10969 }
10970 
10971 static void bnxt_set_rx_mode(struct net_device *dev)
10972 {
10973     struct bnxt *bp = netdev_priv(dev);
10974     struct bnxt_vnic_info *vnic;
10975     bool mc_update = false;
10976     bool uc_update;
10977     u32 mask;
10978 
10979     if (!test_bit(BNXT_STATE_OPEN, &bp->state))
10980         return;
10981 
10982     vnic = &bp->vnic_info[0];
10983     mask = vnic->rx_mask;
10984     mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
10985           CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
10986           CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
10987           CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
10988 
10989     if (dev->flags & IFF_PROMISC)
10990         mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
10991 
10992     uc_update = bnxt_uc_list_updated(bp);
10993 
10994     if (dev->flags & IFF_BROADCAST)
10995         mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
10996     if (dev->flags & IFF_ALLMULTI) {
10997         mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
10998         vnic->mc_list_count = 0;
10999     } else if (dev->flags & IFF_MULTICAST) {
11000         mc_update = bnxt_mc_list_updated(bp, &mask);
11001     }
11002 
11003     if (mask != vnic->rx_mask || uc_update || mc_update) {
11004         vnic->rx_mask = mask;
11005 
11006         set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
11007         bnxt_queue_sp_work(bp);
11008     }
11009 }
11010 
11011 static int bnxt_cfg_rx_mode(struct bnxt *bp)
11012 {
11013     struct net_device *dev = bp->dev;
11014     struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
11015     struct hwrm_cfa_l2_filter_free_input *req;
11016     struct netdev_hw_addr *ha;
11017     int i, off = 0, rc;
11018     bool uc_update;
11019 
11020     netif_addr_lock_bh(dev);
11021     uc_update = bnxt_uc_list_updated(bp);
11022     netif_addr_unlock_bh(dev);
11023 
11024     if (!uc_update)
11025         goto skip_uc;
11026 
11027     rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE);
11028     if (rc)
11029         return rc;
11030     hwrm_req_hold(bp, req);
11031     for (i = 1; i < vnic->uc_filter_count; i++) {
11032         req->l2_filter_id = vnic->fw_l2_filter_id[i];
11033 
11034         rc = hwrm_req_send(bp, req);
11035     }
11036     hwrm_req_drop(bp, req);
11037 
11038     vnic->uc_filter_count = 1;
11039 
11040     netif_addr_lock_bh(dev);
11041     if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
11042         vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11043     } else {
11044         netdev_for_each_uc_addr(ha, dev) {
11045             memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
11046             off += ETH_ALEN;
11047             vnic->uc_filter_count++;
11048         }
11049     }
11050     netif_addr_unlock_bh(dev);
11051 
11052     for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
11053         rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
11054         if (rc) {
11055             if (BNXT_VF(bp) && rc == -ENODEV) {
11056                 if (!test_and_set_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11057                     netdev_warn(bp->dev, "Cannot configure L2 filters while PF is unavailable, will retry\n");
11058                 else
11059                     netdev_dbg(bp->dev, "PF still unavailable while configuring L2 filters.\n");
11060                 rc = 0;
11061             } else {
11062                 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
11063             }
11064             vnic->uc_filter_count = i;
11065             return rc;
11066         }
11067     }
11068     if (test_and_clear_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state))
11069         netdev_notice(bp->dev, "Retry of L2 filter configuration successful.\n");
11070 
11071 skip_uc:
11072     if ((vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS) &&
11073         !bnxt_promisc_ok(bp))
11074         vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
11075     rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11076     if (rc && (vnic->rx_mask & CFA_L2_SET_RX_MASK_REQ_MASK_MCAST)) {
11077         netdev_info(bp->dev, "Failed setting MC filters rc: %d, turning on ALL_MCAST mode\n",
11078                 rc);
11079         vnic->rx_mask &= ~CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
11080         vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
11081         vnic->mc_list_count = 0;
11082         rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
11083     }
11084     if (rc)
11085         netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %d\n",
11086                rc);
11087 
11088     return rc;
11089 }
11090 
11091 static bool bnxt_can_reserve_rings(struct bnxt *bp)
11092 {
11093 #ifdef CONFIG_BNXT_SRIOV
11094     if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
11095         struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
11096 
11097         /* No minimum rings were provisioned by the PF.  Don't
11098          * reserve rings by default when device is down.
11099          */
11100         if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
11101             return true;
11102 
11103         if (!netif_running(bp->dev))
11104             return false;
11105     }
11106 #endif
11107     return true;
11108 }
11109 
11110 /* If the chip and firmware supports RFS */
11111 static bool bnxt_rfs_supported(struct bnxt *bp)
11112 {
11113     if (bp->flags & BNXT_FLAG_CHIP_P5) {
11114         if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2)
11115             return true;
11116         return false;
11117     }
11118     /* 212 firmware is broken for aRFS */
11119     if (BNXT_FW_MAJ(bp) == 212)
11120         return false;
11121     if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
11122         return true;
11123     if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11124         return true;
11125     return false;
11126 }
11127 
11128 /* If runtime conditions support RFS */
11129 static bool bnxt_rfs_capable(struct bnxt *bp)
11130 {
11131 #ifdef CONFIG_RFS_ACCEL
11132     int vnics, max_vnics, max_rss_ctxs;
11133 
11134     if (bp->flags & BNXT_FLAG_CHIP_P5)
11135         return bnxt_rfs_supported(bp);
11136     if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
11137         return false;
11138 
11139     vnics = 1 + bp->rx_nr_rings;
11140     max_vnics = bnxt_get_max_func_vnics(bp);
11141     max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
11142 
11143     /* RSS contexts not a limiting factor */
11144     if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
11145         max_rss_ctxs = max_vnics;
11146     if (vnics > max_vnics || vnics > max_rss_ctxs) {
11147         if (bp->rx_nr_rings > 1)
11148             netdev_warn(bp->dev,
11149                     "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
11150                     min(max_rss_ctxs - 1, max_vnics - 1));
11151         return false;
11152     }
11153 
11154     if (!BNXT_NEW_RM(bp))
11155         return true;
11156 
11157     if (vnics == bp->hw_resc.resv_vnics)
11158         return true;
11159 
11160     bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics);
11161     if (vnics <= bp->hw_resc.resv_vnics)
11162         return true;
11163 
11164     netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
11165     bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1);
11166     return false;
11167 #else
11168     return false;
11169 #endif
11170 }
11171 
11172 static netdev_features_t bnxt_fix_features(struct net_device *dev,
11173                        netdev_features_t features)
11174 {
11175     struct bnxt *bp = netdev_priv(dev);
11176     netdev_features_t vlan_features;
11177 
11178     if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
11179         features &= ~NETIF_F_NTUPLE;
11180 
11181     if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
11182         features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
11183 
11184     if (!(features & NETIF_F_GRO))
11185         features &= ~NETIF_F_GRO_HW;
11186 
11187     if (features & NETIF_F_GRO_HW)
11188         features &= ~NETIF_F_LRO;
11189 
11190     /* Both CTAG and STAG VLAN accelaration on the RX side have to be
11191      * turned on or off together.
11192      */
11193     vlan_features = features & BNXT_HW_FEATURE_VLAN_ALL_RX;
11194     if (vlan_features != BNXT_HW_FEATURE_VLAN_ALL_RX) {
11195         if (dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11196             features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11197         else if (vlan_features)
11198             features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
11199     }
11200 #ifdef CONFIG_BNXT_SRIOV
11201     if (BNXT_VF(bp) && bp->vf.vlan)
11202         features &= ~BNXT_HW_FEATURE_VLAN_ALL_RX;
11203 #endif
11204     return features;
11205 }
11206 
11207 static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
11208 {
11209     struct bnxt *bp = netdev_priv(dev);
11210     u32 flags = bp->flags;
11211     u32 changes;
11212     int rc = 0;
11213     bool re_init = false;
11214     bool update_tpa = false;
11215 
11216     flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
11217     if (features & NETIF_F_GRO_HW)
11218         flags |= BNXT_FLAG_GRO;
11219     else if (features & NETIF_F_LRO)
11220         flags |= BNXT_FLAG_LRO;
11221 
11222     if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
11223         flags &= ~BNXT_FLAG_TPA;
11224 
11225     if (features & BNXT_HW_FEATURE_VLAN_ALL_RX)
11226         flags |= BNXT_FLAG_STRIP_VLAN;
11227 
11228     if (features & NETIF_F_NTUPLE)
11229         flags |= BNXT_FLAG_RFS;
11230 
11231     changes = flags ^ bp->flags;
11232     if (changes & BNXT_FLAG_TPA) {
11233         update_tpa = true;
11234         if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
11235             (flags & BNXT_FLAG_TPA) == 0 ||
11236             (bp->flags & BNXT_FLAG_CHIP_P5))
11237             re_init = true;
11238     }
11239 
11240     if (changes & ~BNXT_FLAG_TPA)
11241         re_init = true;
11242 
11243     if (flags != bp->flags) {
11244         u32 old_flags = bp->flags;
11245 
11246         if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11247             bp->flags = flags;
11248             if (update_tpa)
11249                 bnxt_set_ring_params(bp);
11250             return rc;
11251         }
11252 
11253         if (re_init) {
11254             bnxt_close_nic(bp, false, false);
11255             bp->flags = flags;
11256             if (update_tpa)
11257                 bnxt_set_ring_params(bp);
11258 
11259             return bnxt_open_nic(bp, false, false);
11260         }
11261         if (update_tpa) {
11262             bp->flags = flags;
11263             rc = bnxt_set_tpa(bp,
11264                       (flags & BNXT_FLAG_TPA) ?
11265                       true : false);
11266             if (rc)
11267                 bp->flags = old_flags;
11268         }
11269     }
11270     return rc;
11271 }
11272 
11273 static bool bnxt_exthdr_check(struct bnxt *bp, struct sk_buff *skb, int nw_off,
11274                   u8 **nextp)
11275 {
11276     struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + nw_off);
11277     int hdr_count = 0;
11278     u8 *nexthdr;
11279     int start;
11280 
11281     /* Check that there are at most 2 IPv6 extension headers, no
11282      * fragment header, and each is <= 64 bytes.
11283      */
11284     start = nw_off + sizeof(*ip6h);
11285     nexthdr = &ip6h->nexthdr;
11286     while (ipv6_ext_hdr(*nexthdr)) {
11287         struct ipv6_opt_hdr *hp;
11288         int hdrlen;
11289 
11290         if (hdr_count >= 3 || *nexthdr == NEXTHDR_NONE ||
11291             *nexthdr == NEXTHDR_FRAGMENT)
11292             return false;
11293         hp = __skb_header_pointer(NULL, start, sizeof(*hp), skb->data,
11294                       skb_headlen(skb), NULL);
11295         if (!hp)
11296             return false;
11297         if (*nexthdr == NEXTHDR_AUTH)
11298             hdrlen = ipv6_authlen(hp);
11299         else
11300             hdrlen = ipv6_optlen(hp);
11301 
11302         if (hdrlen > 64)
11303             return false;
11304         nexthdr = &hp->nexthdr;
11305         start += hdrlen;
11306         hdr_count++;
11307     }
11308     if (nextp) {
11309         /* Caller will check inner protocol */
11310         if (skb->encapsulation) {
11311             *nextp = nexthdr;
11312             return true;
11313         }
11314         *nextp = NULL;
11315     }
11316     /* Only support TCP/UDP for non-tunneled ipv6 and inner ipv6 */
11317     return *nexthdr == IPPROTO_TCP || *nexthdr == IPPROTO_UDP;
11318 }
11319 
11320 /* For UDP, we can only handle 1 Vxlan port and 1 Geneve port. */
11321 static bool bnxt_udp_tunl_check(struct bnxt *bp, struct sk_buff *skb)
11322 {
11323     struct udphdr *uh = udp_hdr(skb);
11324     __be16 udp_port = uh->dest;
11325 
11326     if (udp_port != bp->vxlan_port && udp_port != bp->nge_port)
11327         return false;
11328     if (skb->inner_protocol_type == ENCAP_TYPE_ETHER) {
11329         struct ethhdr *eh = inner_eth_hdr(skb);
11330 
11331         switch (eh->h_proto) {
11332         case htons(ETH_P_IP):
11333             return true;
11334         case htons(ETH_P_IPV6):
11335             return bnxt_exthdr_check(bp, skb,
11336                          skb_inner_network_offset(skb),
11337                          NULL);
11338         }
11339     }
11340     return false;
11341 }
11342 
11343 static bool bnxt_tunl_check(struct bnxt *bp, struct sk_buff *skb, u8 l4_proto)
11344 {
11345     switch (l4_proto) {
11346     case IPPROTO_UDP:
11347         return bnxt_udp_tunl_check(bp, skb);
11348     case IPPROTO_IPIP:
11349         return true;
11350     case IPPROTO_GRE: {
11351         switch (skb->inner_protocol) {
11352         default:
11353             return false;
11354         case htons(ETH_P_IP):
11355             return true;
11356         case htons(ETH_P_IPV6):
11357             fallthrough;
11358         }
11359     }
11360     case IPPROTO_IPV6:
11361         /* Check ext headers of inner ipv6 */
11362         return bnxt_exthdr_check(bp, skb, skb_inner_network_offset(skb),
11363                      NULL);
11364     }
11365     return false;
11366 }
11367 
11368 static netdev_features_t bnxt_features_check(struct sk_buff *skb,
11369                          struct net_device *dev,
11370                          netdev_features_t features)
11371 {
11372     struct bnxt *bp = netdev_priv(dev);
11373     u8 *l4_proto;
11374 
11375     features = vlan_features_check(skb, features);
11376     switch (vlan_get_protocol(skb)) {
11377     case htons(ETH_P_IP):
11378         if (!skb->encapsulation)
11379             return features;
11380         l4_proto = &ip_hdr(skb)->protocol;
11381         if (bnxt_tunl_check(bp, skb, *l4_proto))
11382             return features;
11383         break;
11384     case htons(ETH_P_IPV6):
11385         if (!bnxt_exthdr_check(bp, skb, skb_network_offset(skb),
11386                        &l4_proto))
11387             break;
11388         if (!l4_proto || bnxt_tunl_check(bp, skb, *l4_proto))
11389             return features;
11390         break;
11391     }
11392     return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
11393 }
11394 
11395 int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
11396              u32 *reg_buf)
11397 {
11398     struct hwrm_dbg_read_direct_output *resp;
11399     struct hwrm_dbg_read_direct_input *req;
11400     __le32 *dbg_reg_buf;
11401     dma_addr_t mapping;
11402     int rc, i;
11403 
11404     rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT);
11405     if (rc)
11406         return rc;
11407 
11408     dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4,
11409                      &mapping);
11410     if (!dbg_reg_buf) {
11411         rc = -ENOMEM;
11412         goto dbg_rd_reg_exit;
11413     }
11414 
11415     req->host_dest_addr = cpu_to_le64(mapping);
11416 
11417     resp = hwrm_req_hold(bp, req);
11418     req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR);
11419     req->read_len32 = cpu_to_le32(num_words);
11420 
11421     rc = hwrm_req_send(bp, req);
11422     if (rc || resp->error_code) {
11423         rc = -EIO;
11424         goto dbg_rd_reg_exit;
11425     }
11426     for (i = 0; i < num_words; i++)
11427         reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]);
11428 
11429 dbg_rd_reg_exit:
11430     hwrm_req_drop(bp, req);
11431     return rc;
11432 }
11433 
11434 static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
11435                        u32 ring_id, u32 *prod, u32 *cons)
11436 {
11437     struct hwrm_dbg_ring_info_get_output *resp;
11438     struct hwrm_dbg_ring_info_get_input *req;
11439     int rc;
11440 
11441     rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET);
11442     if (rc)
11443         return rc;
11444 
11445     req->ring_type = ring_type;
11446     req->fw_ring_id = cpu_to_le32(ring_id);
11447     resp = hwrm_req_hold(bp, req);
11448     rc = hwrm_req_send(bp, req);
11449     if (!rc) {
11450         *prod = le32_to_cpu(resp->producer_index);
11451         *cons = le32_to_cpu(resp->consumer_index);
11452     }
11453     hwrm_req_drop(bp, req);
11454     return rc;
11455 }
11456 
11457 static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
11458 {
11459     struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
11460     int i = bnapi->index;
11461 
11462     if (!txr)
11463         return;
11464 
11465     netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
11466             i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
11467             txr->tx_cons);
11468 }
11469 
11470 static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
11471 {
11472     struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
11473     int i = bnapi->index;
11474 
11475     if (!rxr)
11476         return;
11477 
11478     netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
11479             i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
11480             rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
11481             rxr->rx_sw_agg_prod);
11482 }
11483 
11484 static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
11485 {
11486     struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
11487     int i = bnapi->index;
11488 
11489     netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
11490             i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
11491 }
11492 
11493 static void bnxt_dbg_dump_states(struct bnxt *bp)
11494 {
11495     int i;
11496     struct bnxt_napi *bnapi;
11497 
11498     for (i = 0; i < bp->cp_nr_rings; i++) {
11499         bnapi = bp->bnapi[i];
11500         if (netif_msg_drv(bp)) {
11501             bnxt_dump_tx_sw_state(bnapi);
11502             bnxt_dump_rx_sw_state(bnapi);
11503             bnxt_dump_cp_sw_state(bnapi);
11504         }
11505     }
11506 }
11507 
11508 static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
11509 {
11510     struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
11511     struct hwrm_ring_reset_input *req;
11512     struct bnxt_napi *bnapi = rxr->bnapi;
11513     struct bnxt_cp_ring_info *cpr;
11514     u16 cp_ring_id;
11515     int rc;
11516 
11517     rc = hwrm_req_init(bp, req, HWRM_RING_RESET);
11518     if (rc)
11519         return rc;
11520 
11521     cpr = &bnapi->cp_ring;
11522     cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
11523     req->cmpl_ring = cpu_to_le16(cp_ring_id);
11524     req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
11525     req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
11526     return hwrm_req_send_silent(bp, req);
11527 }
11528 
11529 static void bnxt_reset_task(struct bnxt *bp, bool silent)
11530 {
11531     if (!silent)
11532         bnxt_dbg_dump_states(bp);
11533     if (netif_running(bp->dev)) {
11534         int rc;
11535 
11536         if (silent) {
11537             bnxt_close_nic(bp, false, false);
11538             bnxt_open_nic(bp, false, false);
11539         } else {
11540             bnxt_ulp_stop(bp);
11541             bnxt_close_nic(bp, true, false);
11542             rc = bnxt_open_nic(bp, true, false);
11543             bnxt_ulp_start(bp, rc);
11544         }
11545     }
11546 }
11547 
11548 static void bnxt_tx_timeout(struct net_device *dev, unsigned int txqueue)
11549 {
11550     struct bnxt *bp = netdev_priv(dev);
11551 
11552     netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
11553     set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
11554     bnxt_queue_sp_work(bp);
11555 }
11556 
11557 static void bnxt_fw_health_check(struct bnxt *bp)
11558 {
11559     struct bnxt_fw_health *fw_health = bp->fw_health;
11560     u32 val;
11561 
11562     if (!fw_health->enabled || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11563         return;
11564 
11565     /* Make sure it is enabled before checking the tmr_counter. */
11566     smp_rmb();
11567     if (fw_health->tmr_counter) {
11568         fw_health->tmr_counter--;
11569         return;
11570     }
11571 
11572     val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11573     if (val == fw_health->last_fw_heartbeat) {
11574         fw_health->arrests++;
11575         goto fw_reset;
11576     }
11577 
11578     fw_health->last_fw_heartbeat = val;
11579 
11580     val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11581     if (val != fw_health->last_fw_reset_cnt) {
11582         fw_health->discoveries++;
11583         goto fw_reset;
11584     }
11585 
11586     fw_health->tmr_counter = fw_health->tmr_multiplier;
11587     return;
11588 
11589 fw_reset:
11590     set_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event);
11591     bnxt_queue_sp_work(bp);
11592 }
11593 
11594 static void bnxt_timer(struct timer_list *t)
11595 {
11596     struct bnxt *bp = from_timer(bp, t, timer);
11597     struct net_device *dev = bp->dev;
11598 
11599     if (!netif_running(dev) || !test_bit(BNXT_STATE_OPEN, &bp->state))
11600         return;
11601 
11602     if (atomic_read(&bp->intr_sem) != 0)
11603         goto bnxt_restart_timer;
11604 
11605     if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
11606         bnxt_fw_health_check(bp);
11607 
11608     if (BNXT_LINK_IS_UP(bp) && bp->stats_coal_ticks) {
11609         set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
11610         bnxt_queue_sp_work(bp);
11611     }
11612 
11613     if (bnxt_tc_flower_enabled(bp)) {
11614         set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
11615         bnxt_queue_sp_work(bp);
11616     }
11617 
11618 #ifdef CONFIG_RFS_ACCEL
11619     if ((bp->flags & BNXT_FLAG_RFS) && bp->ntp_fltr_count) {
11620         set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
11621         bnxt_queue_sp_work(bp);
11622     }
11623 #endif /*CONFIG_RFS_ACCEL*/
11624 
11625     if (bp->link_info.phy_retry) {
11626         if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
11627             bp->link_info.phy_retry = false;
11628             netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
11629         } else {
11630             set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
11631             bnxt_queue_sp_work(bp);
11632         }
11633     }
11634 
11635     if (test_bit(BNXT_STATE_L2_FILTER_RETRY, &bp->state)) {
11636         set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
11637         bnxt_queue_sp_work(bp);
11638     }
11639 
11640     if ((bp->flags & BNXT_FLAG_CHIP_P5) && !bp->chip_rev &&
11641         netif_carrier_ok(dev)) {
11642         set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
11643         bnxt_queue_sp_work(bp);
11644     }
11645 bnxt_restart_timer:
11646     mod_timer(&bp->timer, jiffies + bp->current_interval);
11647 }
11648 
11649 static void bnxt_rtnl_lock_sp(struct bnxt *bp)
11650 {
11651     /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
11652      * set.  If the device is being closed, bnxt_close() may be holding
11653      * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear.  So we
11654      * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
11655      */
11656     clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11657     rtnl_lock();
11658 }
11659 
11660 static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
11661 {
11662     set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11663     rtnl_unlock();
11664 }
11665 
11666 /* Only called from bnxt_sp_task() */
11667 static void bnxt_reset(struct bnxt *bp, bool silent)
11668 {
11669     bnxt_rtnl_lock_sp(bp);
11670     if (test_bit(BNXT_STATE_OPEN, &bp->state))
11671         bnxt_reset_task(bp, silent);
11672     bnxt_rtnl_unlock_sp(bp);
11673 }
11674 
11675 /* Only called from bnxt_sp_task() */
11676 static void bnxt_rx_ring_reset(struct bnxt *bp)
11677 {
11678     int i;
11679 
11680     bnxt_rtnl_lock_sp(bp);
11681     if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11682         bnxt_rtnl_unlock_sp(bp);
11683         return;
11684     }
11685     /* Disable and flush TPA before resetting the RX ring */
11686     if (bp->flags & BNXT_FLAG_TPA)
11687         bnxt_set_tpa(bp, false);
11688     for (i = 0; i < bp->rx_nr_rings; i++) {
11689         struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
11690         struct bnxt_cp_ring_info *cpr;
11691         int rc;
11692 
11693         if (!rxr->bnapi->in_reset)
11694             continue;
11695 
11696         rc = bnxt_hwrm_rx_ring_reset(bp, i);
11697         if (rc) {
11698             if (rc == -EINVAL || rc == -EOPNOTSUPP)
11699                 netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
11700             else
11701                 netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
11702                         rc);
11703             bnxt_reset_task(bp, true);
11704             break;
11705         }
11706         bnxt_free_one_rx_ring_skbs(bp, i);
11707         rxr->rx_prod = 0;
11708         rxr->rx_agg_prod = 0;
11709         rxr->rx_sw_agg_prod = 0;
11710         rxr->rx_next_cons = 0;
11711         rxr->bnapi->in_reset = false;
11712         bnxt_alloc_one_rx_ring(bp, i);
11713         cpr = &rxr->bnapi->cp_ring;
11714         cpr->sw_stats.rx.rx_resets++;
11715         if (bp->flags & BNXT_FLAG_AGG_RINGS)
11716             bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
11717         bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
11718     }
11719     if (bp->flags & BNXT_FLAG_TPA)
11720         bnxt_set_tpa(bp, true);
11721     bnxt_rtnl_unlock_sp(bp);
11722 }
11723 
11724 static void bnxt_fw_reset_close(struct bnxt *bp)
11725 {
11726     bnxt_ulp_stop(bp);
11727     /* When firmware is in fatal state, quiesce device and disable
11728      * bus master to prevent any potential bad DMAs before freeing
11729      * kernel memory.
11730      */
11731     if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
11732         u16 val = 0;
11733 
11734         pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
11735         if (val == 0xffff)
11736             bp->fw_reset_min_dsecs = 0;
11737         bnxt_tx_disable(bp);
11738         bnxt_disable_napi(bp);
11739         bnxt_disable_int_sync(bp);
11740         bnxt_free_irq(bp);
11741         bnxt_clear_int_mode(bp);
11742         pci_disable_device(bp->pdev);
11743     }
11744     __bnxt_close_nic(bp, true, false);
11745     bnxt_vf_reps_free(bp);
11746     bnxt_clear_int_mode(bp);
11747     bnxt_hwrm_func_drv_unrgtr(bp);
11748     if (pci_is_enabled(bp->pdev))
11749         pci_disable_device(bp->pdev);
11750     bnxt_free_ctx_mem(bp);
11751     kfree(bp->ctx);
11752     bp->ctx = NULL;
11753 }
11754 
11755 static bool is_bnxt_fw_ok(struct bnxt *bp)
11756 {
11757     struct bnxt_fw_health *fw_health = bp->fw_health;
11758     bool no_heartbeat = false, has_reset = false;
11759     u32 val;
11760 
11761     val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
11762     if (val == fw_health->last_fw_heartbeat)
11763         no_heartbeat = true;
11764 
11765     val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
11766     if (val != fw_health->last_fw_reset_cnt)
11767         has_reset = true;
11768 
11769     if (!no_heartbeat && has_reset)
11770         return true;
11771 
11772     return false;
11773 }
11774 
11775 /* rtnl_lock is acquired before calling this function */
11776 static void bnxt_force_fw_reset(struct bnxt *bp)
11777 {
11778     struct bnxt_fw_health *fw_health = bp->fw_health;
11779     struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11780     u32 wait_dsecs;
11781 
11782     if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
11783         test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
11784         return;
11785 
11786     if (ptp) {
11787         spin_lock_bh(&ptp->ptp_lock);
11788         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11789         spin_unlock_bh(&ptp->ptp_lock);
11790     } else {
11791         set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11792     }
11793     bnxt_fw_reset_close(bp);
11794     wait_dsecs = fw_health->master_func_wait_dsecs;
11795     if (fw_health->primary) {
11796         if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
11797             wait_dsecs = 0;
11798         bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
11799     } else {
11800         bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
11801         wait_dsecs = fw_health->normal_func_wait_dsecs;
11802         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11803     }
11804 
11805     bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
11806     bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
11807     bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
11808 }
11809 
11810 void bnxt_fw_exception(struct bnxt *bp)
11811 {
11812     netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
11813     set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
11814     bnxt_rtnl_lock_sp(bp);
11815     bnxt_force_fw_reset(bp);
11816     bnxt_rtnl_unlock_sp(bp);
11817 }
11818 
11819 /* Returns the number of registered VFs, or 1 if VF configuration is pending, or
11820  * < 0 on error.
11821  */
11822 static int bnxt_get_registered_vfs(struct bnxt *bp)
11823 {
11824 #ifdef CONFIG_BNXT_SRIOV
11825     int rc;
11826 
11827     if (!BNXT_PF(bp))
11828         return 0;
11829 
11830     rc = bnxt_hwrm_func_qcfg(bp);
11831     if (rc) {
11832         netdev_err(bp->dev, "func_qcfg cmd failed, rc = %d\n", rc);
11833         return rc;
11834     }
11835     if (bp->pf.registered_vfs)
11836         return bp->pf.registered_vfs;
11837     if (bp->sriov_cfg)
11838         return 1;
11839 #endif
11840     return 0;
11841 }
11842 
11843 void bnxt_fw_reset(struct bnxt *bp)
11844 {
11845     bnxt_rtnl_lock_sp(bp);
11846     if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
11847         !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
11848         struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
11849         int n = 0, tmo;
11850 
11851         if (ptp) {
11852             spin_lock_bh(&ptp->ptp_lock);
11853             set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11854             spin_unlock_bh(&ptp->ptp_lock);
11855         } else {
11856             set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11857         }
11858         if (bp->pf.active_vfs &&
11859             !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state))
11860             n = bnxt_get_registered_vfs(bp);
11861         if (n < 0) {
11862             netdev_err(bp->dev, "Firmware reset aborted, rc = %d\n",
11863                    n);
11864             clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
11865             dev_close(bp->dev);
11866             goto fw_reset_exit;
11867         } else if (n > 0) {
11868             u16 vf_tmo_dsecs = n * 10;
11869 
11870             if (bp->fw_reset_max_dsecs < vf_tmo_dsecs)
11871                 bp->fw_reset_max_dsecs = vf_tmo_dsecs;
11872             bp->fw_reset_state =
11873                 BNXT_FW_RESET_STATE_POLL_VF;
11874             bnxt_queue_fw_reset_work(bp, HZ / 10);
11875             goto fw_reset_exit;
11876         }
11877         bnxt_fw_reset_close(bp);
11878         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
11879             bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
11880             tmo = HZ / 10;
11881         } else {
11882             bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
11883             tmo = bp->fw_reset_min_dsecs * HZ / 10;
11884         }
11885         bnxt_queue_fw_reset_work(bp, tmo);
11886     }
11887 fw_reset_exit:
11888     bnxt_rtnl_unlock_sp(bp);
11889 }
11890 
11891 static void bnxt_chk_missed_irq(struct bnxt *bp)
11892 {
11893     int i;
11894 
11895     if (!(bp->flags & BNXT_FLAG_CHIP_P5))
11896         return;
11897 
11898     for (i = 0; i < bp->cp_nr_rings; i++) {
11899         struct bnxt_napi *bnapi = bp->bnapi[i];
11900         struct bnxt_cp_ring_info *cpr;
11901         u32 fw_ring_id;
11902         int j;
11903 
11904         if (!bnapi)
11905             continue;
11906 
11907         cpr = &bnapi->cp_ring;
11908         for (j = 0; j < 2; j++) {
11909             struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
11910             u32 val[2];
11911 
11912             if (!cpr2 || cpr2->has_more_work ||
11913                 !bnxt_has_work(bp, cpr2))
11914                 continue;
11915 
11916             if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
11917                 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
11918                 continue;
11919             }
11920             fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
11921             bnxt_dbg_hwrm_ring_info_get(bp,
11922                 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
11923                 fw_ring_id, &val[0], &val[1]);
11924             cpr->sw_stats.cmn.missed_irqs++;
11925         }
11926     }
11927 }
11928 
11929 static void bnxt_cfg_ntp_filters(struct bnxt *);
11930 
11931 static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
11932 {
11933     struct bnxt_link_info *link_info = &bp->link_info;
11934 
11935     if (BNXT_AUTO_MODE(link_info->auto_mode)) {
11936         link_info->autoneg = BNXT_AUTONEG_SPEED;
11937         if (bp->hwrm_spec_code >= 0x10201) {
11938             if (link_info->auto_pause_setting &
11939                 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
11940                 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11941         } else {
11942             link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
11943         }
11944         link_info->advertising = link_info->auto_link_speeds;
11945         link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
11946     } else {
11947         link_info->req_link_speed = link_info->force_link_speed;
11948         link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
11949         if (link_info->force_pam4_link_speed) {
11950             link_info->req_link_speed =
11951                 link_info->force_pam4_link_speed;
11952             link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
11953         }
11954         link_info->req_duplex = link_info->duplex_setting;
11955     }
11956     if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
11957         link_info->req_flow_ctrl =
11958             link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
11959     else
11960         link_info->req_flow_ctrl = link_info->force_pause_setting;
11961 }
11962 
11963 static void bnxt_fw_echo_reply(struct bnxt *bp)
11964 {
11965     struct bnxt_fw_health *fw_health = bp->fw_health;
11966     struct hwrm_func_echo_response_input *req;
11967     int rc;
11968 
11969     rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE);
11970     if (rc)
11971         return;
11972     req->event_data1 = cpu_to_le32(fw_health->echo_req_data1);
11973     req->event_data2 = cpu_to_le32(fw_health->echo_req_data2);
11974     hwrm_req_send(bp, req);
11975 }
11976 
11977 static void bnxt_sp_task(struct work_struct *work)
11978 {
11979     struct bnxt *bp = container_of(work, struct bnxt, sp_task);
11980 
11981     set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11982     smp_mb__after_atomic();
11983     if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
11984         clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
11985         return;
11986     }
11987 
11988     if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
11989         bnxt_cfg_rx_mode(bp);
11990 
11991     if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
11992         bnxt_cfg_ntp_filters(bp);
11993     if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
11994         bnxt_hwrm_exec_fwd_req(bp);
11995     if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
11996         bnxt_hwrm_port_qstats(bp, 0);
11997         bnxt_hwrm_port_qstats_ext(bp, 0);
11998         bnxt_accumulate_all_stats(bp);
11999     }
12000 
12001     if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
12002         int rc;
12003 
12004         mutex_lock(&bp->link_lock);
12005         if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
12006                        &bp->sp_event))
12007             bnxt_hwrm_phy_qcaps(bp);
12008 
12009         rc = bnxt_update_link(bp, true);
12010         if (rc)
12011             netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
12012                    rc);
12013 
12014         if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
12015                        &bp->sp_event))
12016             bnxt_init_ethtool_link_settings(bp);
12017         mutex_unlock(&bp->link_lock);
12018     }
12019     if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
12020         int rc;
12021 
12022         mutex_lock(&bp->link_lock);
12023         rc = bnxt_update_phy_setting(bp);
12024         mutex_unlock(&bp->link_lock);
12025         if (rc) {
12026             netdev_warn(bp->dev, "update phy settings retry failed\n");
12027         } else {
12028             bp->link_info.phy_retry = false;
12029             netdev_info(bp->dev, "update phy settings retry succeeded\n");
12030         }
12031     }
12032     if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
12033         mutex_lock(&bp->link_lock);
12034         bnxt_get_port_module_status(bp);
12035         mutex_unlock(&bp->link_lock);
12036     }
12037 
12038     if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
12039         bnxt_tc_flow_stats_work(bp);
12040 
12041     if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
12042         bnxt_chk_missed_irq(bp);
12043 
12044     if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
12045         bnxt_fw_echo_reply(bp);
12046 
12047     /* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
12048      * must be the last functions to be called before exiting.
12049      */
12050     if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
12051         bnxt_reset(bp, false);
12052 
12053     if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
12054         bnxt_reset(bp, true);
12055 
12056     if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
12057         bnxt_rx_ring_reset(bp);
12058 
12059     if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
12060         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
12061             test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
12062             bnxt_devlink_health_fw_report(bp);
12063         else
12064             bnxt_fw_reset(bp);
12065     }
12066 
12067     if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
12068         if (!is_bnxt_fw_ok(bp))
12069             bnxt_devlink_health_fw_report(bp);
12070     }
12071 
12072     smp_mb__before_atomic();
12073     clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
12074 }
12075 
12076 /* Under rtnl_lock */
12077 int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
12078              int tx_xdp)
12079 {
12080     int max_rx, max_tx, tx_sets = 1;
12081     int tx_rings_needed, stats;
12082     int rx_rings = rx;
12083     int cp, vnics, rc;
12084 
12085     if (tcs)
12086         tx_sets = tcs;
12087 
12088     rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
12089     if (rc)
12090         return rc;
12091 
12092     if (max_rx < rx)
12093         return -ENOMEM;
12094 
12095     tx_rings_needed = tx * tx_sets + tx_xdp;
12096     if (max_tx < tx_rings_needed)
12097         return -ENOMEM;
12098 
12099     vnics = 1;
12100     if ((bp->flags & (BNXT_FLAG_RFS | BNXT_FLAG_CHIP_P5)) == BNXT_FLAG_RFS)
12101         vnics += rx_rings;
12102 
12103     if (bp->flags & BNXT_FLAG_AGG_RINGS)
12104         rx_rings <<= 1;
12105     cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
12106     stats = cp;
12107     if (BNXT_NEW_RM(bp)) {
12108         cp += bnxt_get_ulp_msix_num(bp);
12109         stats += bnxt_get_ulp_stat_ctxs(bp);
12110     }
12111     return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
12112                      stats, vnics);
12113 }
12114 
12115 static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
12116 {
12117     if (bp->bar2) {
12118         pci_iounmap(pdev, bp->bar2);
12119         bp->bar2 = NULL;
12120     }
12121 
12122     if (bp->bar1) {
12123         pci_iounmap(pdev, bp->bar1);
12124         bp->bar1 = NULL;
12125     }
12126 
12127     if (bp->bar0) {
12128         pci_iounmap(pdev, bp->bar0);
12129         bp->bar0 = NULL;
12130     }
12131 }
12132 
12133 static void bnxt_cleanup_pci(struct bnxt *bp)
12134 {
12135     bnxt_unmap_bars(bp, bp->pdev);
12136     pci_release_regions(bp->pdev);
12137     if (pci_is_enabled(bp->pdev))
12138         pci_disable_device(bp->pdev);
12139 }
12140 
12141 static void bnxt_init_dflt_coal(struct bnxt *bp)
12142 {
12143     struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
12144     struct bnxt_coal *coal;
12145     u16 flags = 0;
12146 
12147     if (coal_cap->cmpl_params &
12148         RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
12149         flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
12150 
12151     /* Tick values in micro seconds.
12152      * 1 coal_buf x bufs_per_record = 1 completion record.
12153      */
12154     coal = &bp->rx_coal;
12155     coal->coal_ticks = 10;
12156     coal->coal_bufs = 30;
12157     coal->coal_ticks_irq = 1;
12158     coal->coal_bufs_irq = 2;
12159     coal->idle_thresh = 50;
12160     coal->bufs_per_record = 2;
12161     coal->budget = 64;      /* NAPI budget */
12162     coal->flags = flags;
12163 
12164     coal = &bp->tx_coal;
12165     coal->coal_ticks = 28;
12166     coal->coal_bufs = 30;
12167     coal->coal_ticks_irq = 2;
12168     coal->coal_bufs_irq = 2;
12169     coal->bufs_per_record = 1;
12170     coal->flags = flags;
12171 
12172     bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
12173 }
12174 
12175 static int bnxt_fw_init_one_p1(struct bnxt *bp)
12176 {
12177     int rc;
12178 
12179     bp->fw_cap = 0;
12180     rc = bnxt_hwrm_ver_get(bp);
12181     bnxt_try_map_fw_health_reg(bp);
12182     if (rc) {
12183         rc = bnxt_try_recover_fw(bp);
12184         if (rc)
12185             return rc;
12186         rc = bnxt_hwrm_ver_get(bp);
12187         if (rc)
12188             return rc;
12189     }
12190 
12191     bnxt_nvm_cfg_ver_get(bp);
12192 
12193     rc = bnxt_hwrm_func_reset(bp);
12194     if (rc)
12195         return -ENODEV;
12196 
12197     bnxt_hwrm_fw_set_time(bp);
12198     return 0;
12199 }
12200 
12201 static int bnxt_fw_init_one_p2(struct bnxt *bp)
12202 {
12203     int rc;
12204 
12205     /* Get the MAX capabilities for this function */
12206     rc = bnxt_hwrm_func_qcaps(bp);
12207     if (rc) {
12208         netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
12209                rc);
12210         return -ENODEV;
12211     }
12212 
12213     rc = bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(bp);
12214     if (rc)
12215         netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
12216                 rc);
12217 
12218     if (bnxt_alloc_fw_health(bp)) {
12219         netdev_warn(bp->dev, "no memory for firmware error recovery\n");
12220     } else {
12221         rc = bnxt_hwrm_error_recovery_qcfg(bp);
12222         if (rc)
12223             netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
12224                     rc);
12225     }
12226 
12227     rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
12228     if (rc)
12229         return -ENODEV;
12230 
12231     bnxt_hwrm_func_qcfg(bp);
12232     bnxt_hwrm_vnic_qcaps(bp);
12233     bnxt_hwrm_port_led_qcaps(bp);
12234     bnxt_ethtool_init(bp);
12235     bnxt_dcb_init(bp);
12236     return 0;
12237 }
12238 
12239 static void bnxt_set_dflt_rss_hash_type(struct bnxt *bp)
12240 {
12241     bp->flags &= ~BNXT_FLAG_UDP_RSS_CAP;
12242     bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
12243                VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
12244                VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
12245                VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
12246     if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
12247         bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
12248         bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
12249                     VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
12250     }
12251 }
12252 
12253 static void bnxt_set_dflt_rfs(struct bnxt *bp)
12254 {
12255     struct net_device *dev = bp->dev;
12256 
12257     dev->hw_features &= ~NETIF_F_NTUPLE;
12258     dev->features &= ~NETIF_F_NTUPLE;
12259     bp->flags &= ~BNXT_FLAG_RFS;
12260     if (bnxt_rfs_supported(bp)) {
12261         dev->hw_features |= NETIF_F_NTUPLE;
12262         if (bnxt_rfs_capable(bp)) {
12263             bp->flags |= BNXT_FLAG_RFS;
12264             dev->features |= NETIF_F_NTUPLE;
12265         }
12266     }
12267 }
12268 
12269 static void bnxt_fw_init_one_p3(struct bnxt *bp)
12270 {
12271     struct pci_dev *pdev = bp->pdev;
12272 
12273     bnxt_set_dflt_rss_hash_type(bp);
12274     bnxt_set_dflt_rfs(bp);
12275 
12276     bnxt_get_wol_settings(bp);
12277     if (bp->flags & BNXT_FLAG_WOL_CAP)
12278         device_set_wakeup_enable(&pdev->dev, bp->wol);
12279     else
12280         device_set_wakeup_capable(&pdev->dev, false);
12281 
12282     bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
12283     bnxt_hwrm_coal_params_qcaps(bp);
12284 }
12285 
12286 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
12287 
12288 int bnxt_fw_init_one(struct bnxt *bp)
12289 {
12290     int rc;
12291 
12292     rc = bnxt_fw_init_one_p1(bp);
12293     if (rc) {
12294         netdev_err(bp->dev, "Firmware init phase 1 failed\n");
12295         return rc;
12296     }
12297     rc = bnxt_fw_init_one_p2(bp);
12298     if (rc) {
12299         netdev_err(bp->dev, "Firmware init phase 2 failed\n");
12300         return rc;
12301     }
12302     rc = bnxt_probe_phy(bp, false);
12303     if (rc)
12304         return rc;
12305     rc = bnxt_approve_mac(bp, bp->dev->dev_addr, false);
12306     if (rc)
12307         return rc;
12308 
12309     bnxt_fw_init_one_p3(bp);
12310     return 0;
12311 }
12312 
12313 static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
12314 {
12315     struct bnxt_fw_health *fw_health = bp->fw_health;
12316     u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
12317     u32 val = fw_health->fw_reset_seq_vals[reg_idx];
12318     u32 reg_type, reg_off, delay_msecs;
12319 
12320     delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
12321     reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
12322     reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
12323     switch (reg_type) {
12324     case BNXT_FW_HEALTH_REG_TYPE_CFG:
12325         pci_write_config_dword(bp->pdev, reg_off, val);
12326         break;
12327     case BNXT_FW_HEALTH_REG_TYPE_GRC:
12328         writel(reg_off & BNXT_GRC_BASE_MASK,
12329                bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4);
12330         reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
12331         fallthrough;
12332     case BNXT_FW_HEALTH_REG_TYPE_BAR0:
12333         writel(val, bp->bar0 + reg_off);
12334         break;
12335     case BNXT_FW_HEALTH_REG_TYPE_BAR1:
12336         writel(val, bp->bar1 + reg_off);
12337         break;
12338     }
12339     if (delay_msecs) {
12340         pci_read_config_dword(bp->pdev, 0, &val);
12341         msleep(delay_msecs);
12342     }
12343 }
12344 
12345 bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
12346 {
12347     struct hwrm_func_qcfg_output *resp;
12348     struct hwrm_func_qcfg_input *req;
12349     bool result = true; /* firmware will enforce if unknown */
12350 
12351     if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
12352         return result;
12353 
12354     if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
12355         return result;
12356 
12357     req->fid = cpu_to_le16(0xffff);
12358     resp = hwrm_req_hold(bp, req);
12359     if (!hwrm_req_send(bp, req))
12360         result = !!(le16_to_cpu(resp->flags) &
12361                 FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
12362     hwrm_req_drop(bp, req);
12363     return result;
12364 }
12365 
12366 static void bnxt_reset_all(struct bnxt *bp)
12367 {
12368     struct bnxt_fw_health *fw_health = bp->fw_health;
12369     int i, rc;
12370 
12371     if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12372         bnxt_fw_reset_via_optee(bp);
12373         bp->fw_reset_timestamp = jiffies;
12374         return;
12375     }
12376 
12377     if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_HOST) {
12378         for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
12379             bnxt_fw_reset_writel(bp, i);
12380     } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) {
12381         struct hwrm_fw_reset_input *req;
12382 
12383         rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
12384         if (!rc) {
12385             req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG);
12386             req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP;
12387             req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP;
12388             req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL;
12389             rc = hwrm_req_send(bp, req);
12390         }
12391         if (rc != -ENODEV)
12392             netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc);
12393     }
12394     bp->fw_reset_timestamp = jiffies;
12395 }
12396 
12397 static bool bnxt_fw_reset_timeout(struct bnxt *bp)
12398 {
12399     return time_after(jiffies, bp->fw_reset_timestamp +
12400               (bp->fw_reset_max_dsecs * HZ / 10));
12401 }
12402 
12403 static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
12404 {
12405     clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12406     if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
12407         bnxt_ulp_start(bp, rc);
12408         bnxt_dl_health_fw_status_update(bp, false);
12409     }
12410     bp->fw_reset_state = 0;
12411     dev_close(bp->dev);
12412 }
12413 
12414 static void bnxt_fw_reset_task(struct work_struct *work)
12415 {
12416     struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work);
12417     int rc = 0;
12418 
12419     if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
12420         netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
12421         return;
12422     }
12423 
12424     switch (bp->fw_reset_state) {
12425     case BNXT_FW_RESET_STATE_POLL_VF: {
12426         int n = bnxt_get_registered_vfs(bp);
12427         int tmo;
12428 
12429         if (n < 0) {
12430             netdev_err(bp->dev, "Firmware reset aborted, subsequent func_qcfg cmd failed, rc = %d, %d msecs since reset timestamp\n",
12431                    n, jiffies_to_msecs(jiffies -
12432                    bp->fw_reset_timestamp));
12433             goto fw_reset_abort;
12434         } else if (n > 0) {
12435             if (bnxt_fw_reset_timeout(bp)) {
12436                 clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12437                 bp->fw_reset_state = 0;
12438                 netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
12439                        n);
12440                 return;
12441             }
12442             bnxt_queue_fw_reset_work(bp, HZ / 10);
12443             return;
12444         }
12445         bp->fw_reset_timestamp = jiffies;
12446         rtnl_lock();
12447         if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
12448             bnxt_fw_reset_abort(bp, rc);
12449             rtnl_unlock();
12450             return;
12451         }
12452         bnxt_fw_reset_close(bp);
12453         if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
12454             bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
12455             tmo = HZ / 10;
12456         } else {
12457             bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12458             tmo = bp->fw_reset_min_dsecs * HZ / 10;
12459         }
12460         rtnl_unlock();
12461         bnxt_queue_fw_reset_work(bp, tmo);
12462         return;
12463     }
12464     case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
12465         u32 val;
12466 
12467         val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12468         if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
12469             !bnxt_fw_reset_timeout(bp)) {
12470             bnxt_queue_fw_reset_work(bp, HZ / 5);
12471             return;
12472         }
12473 
12474         if (!bp->fw_health->primary) {
12475             u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
12476 
12477             bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12478             bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
12479             return;
12480         }
12481         bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
12482     }
12483         fallthrough;
12484     case BNXT_FW_RESET_STATE_RESET_FW:
12485         bnxt_reset_all(bp);
12486         bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
12487         bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
12488         return;
12489     case BNXT_FW_RESET_STATE_ENABLE_DEV:
12490         bnxt_inv_fw_health_reg(bp);
12491         if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
12492             !bp->fw_reset_min_dsecs) {
12493             u16 val;
12494 
12495             pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
12496             if (val == 0xffff) {
12497                 if (bnxt_fw_reset_timeout(bp)) {
12498                     netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
12499                     rc = -ETIMEDOUT;
12500                     goto fw_reset_abort;
12501                 }
12502                 bnxt_queue_fw_reset_work(bp, HZ / 1000);
12503                 return;
12504             }
12505         }
12506         clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
12507         clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
12508         if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
12509             !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
12510             bnxt_dl_remote_reload(bp);
12511         if (pci_enable_device(bp->pdev)) {
12512             netdev_err(bp->dev, "Cannot re-enable PCI device\n");
12513             rc = -ENODEV;
12514             goto fw_reset_abort;
12515         }
12516         pci_set_master(bp->pdev);
12517         bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
12518         fallthrough;
12519     case BNXT_FW_RESET_STATE_POLL_FW:
12520         bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT;
12521         rc = bnxt_hwrm_poll(bp);
12522         if (rc) {
12523             if (bnxt_fw_reset_timeout(bp)) {
12524                 netdev_err(bp->dev, "Firmware reset aborted\n");
12525                 goto fw_reset_abort_status;
12526             }
12527             bnxt_queue_fw_reset_work(bp, HZ / 5);
12528             return;
12529         }
12530         bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
12531         bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
12532         fallthrough;
12533     case BNXT_FW_RESET_STATE_OPENING:
12534         while (!rtnl_trylock()) {
12535             bnxt_queue_fw_reset_work(bp, HZ / 10);
12536             return;
12537         }
12538         rc = bnxt_open(bp->dev);
12539         if (rc) {
12540             netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
12541             bnxt_fw_reset_abort(bp, rc);
12542             rtnl_unlock();
12543             return;
12544         }
12545 
12546         if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
12547             bp->fw_health->enabled) {
12548             bp->fw_health->last_fw_reset_cnt =
12549                 bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
12550         }
12551         bp->fw_reset_state = 0;
12552         /* Make sure fw_reset_state is 0 before clearing the flag */
12553         smp_mb__before_atomic();
12554         clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
12555         bnxt_ulp_start(bp, 0);
12556         bnxt_reenable_sriov(bp);
12557         bnxt_vf_reps_alloc(bp);
12558         bnxt_vf_reps_open(bp);
12559         bnxt_ptp_reapply_pps(bp);
12560         clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
12561         if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
12562             bnxt_dl_health_fw_recovery_done(bp);
12563             bnxt_dl_health_fw_status_update(bp, true);
12564         }
12565         rtnl_unlock();
12566         break;
12567     }
12568     return;
12569 
12570 fw_reset_abort_status:
12571     if (bp->fw_health->status_reliable ||
12572         (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
12573         u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
12574 
12575         netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
12576     }
12577 fw_reset_abort:
12578     rtnl_lock();
12579     bnxt_fw_reset_abort(bp, rc);
12580     rtnl_unlock();
12581 }
12582 
12583 static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
12584 {
12585     int rc;
12586     struct bnxt *bp = netdev_priv(dev);
12587 
12588     SET_NETDEV_DEV(dev, &pdev->dev);
12589 
12590     /* enable device (incl. PCI PM wakeup), and bus-mastering */
12591     rc = pci_enable_device(pdev);
12592     if (rc) {
12593         dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
12594         goto init_err;
12595     }
12596 
12597     if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12598         dev_err(&pdev->dev,
12599             "Cannot find PCI device base address, aborting\n");
12600         rc = -ENODEV;
12601         goto init_err_disable;
12602     }
12603 
12604     rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12605     if (rc) {
12606         dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
12607         goto init_err_disable;
12608     }
12609 
12610     if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
12611         dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12612         dev_err(&pdev->dev, "System does not support DMA, aborting\n");
12613         rc = -EIO;
12614         goto init_err_release;
12615     }
12616 
12617     pci_set_master(pdev);
12618 
12619     bp->dev = dev;
12620     bp->pdev = pdev;
12621 
12622     /* Doorbell BAR bp->bar1 is mapped after bnxt_fw_init_one_p2()
12623      * determines the BAR size.
12624      */
12625     bp->bar0 = pci_ioremap_bar(pdev, 0);
12626     if (!bp->bar0) {
12627         dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
12628         rc = -ENOMEM;
12629         goto init_err_release;
12630     }
12631 
12632     bp->bar2 = pci_ioremap_bar(pdev, 4);
12633     if (!bp->bar2) {
12634         dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
12635         rc = -ENOMEM;
12636         goto init_err_release;
12637     }
12638 
12639     pci_enable_pcie_error_reporting(pdev);
12640 
12641     INIT_WORK(&bp->sp_task, bnxt_sp_task);
12642     INIT_DELAYED_WORK(&bp->fw_reset_task, bnxt_fw_reset_task);
12643 
12644     spin_lock_init(&bp->ntp_fltr_lock);
12645 #if BITS_PER_LONG == 32
12646     spin_lock_init(&bp->db_lock);
12647 #endif
12648 
12649     bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
12650     bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
12651 
12652     timer_setup(&bp->timer, bnxt_timer, 0);
12653     bp->current_interval = BNXT_TIMER_INTERVAL;
12654 
12655     bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID;
12656     bp->nge_fw_dst_port_id = INVALID_HW_RING_ID;
12657 
12658     clear_bit(BNXT_STATE_OPEN, &bp->state);
12659     return 0;
12660 
12661 init_err_release:
12662     bnxt_unmap_bars(bp, pdev);
12663     pci_release_regions(pdev);
12664 
12665 init_err_disable:
12666     pci_disable_device(pdev);
12667 
12668 init_err:
12669     return rc;
12670 }
12671 
12672 /* rtnl_lock held */
12673 static int bnxt_change_mac_addr(struct net_device *dev, void *p)
12674 {
12675     struct sockaddr *addr = p;
12676     struct bnxt *bp = netdev_priv(dev);
12677     int rc = 0;
12678 
12679     if (!is_valid_ether_addr(addr->sa_data))
12680         return -EADDRNOTAVAIL;
12681 
12682     if (ether_addr_equal(addr->sa_data, dev->dev_addr))
12683         return 0;
12684 
12685     rc = bnxt_approve_mac(bp, addr->sa_data, true);
12686     if (rc)
12687         return rc;
12688 
12689     eth_hw_addr_set(dev, addr->sa_data);
12690     if (netif_running(dev)) {
12691         bnxt_close_nic(bp, false, false);
12692         rc = bnxt_open_nic(bp, false, false);
12693     }
12694 
12695     return rc;
12696 }
12697 
12698 /* rtnl_lock held */
12699 static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
12700 {
12701     struct bnxt *bp = netdev_priv(dev);
12702 
12703     if (netif_running(dev))
12704         bnxt_close_nic(bp, true, false);
12705 
12706     dev->mtu = new_mtu;
12707     bnxt_set_ring_params(bp);
12708 
12709     if (netif_running(dev))
12710         return bnxt_open_nic(bp, true, false);
12711 
12712     return 0;
12713 }
12714 
12715 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
12716 {
12717     struct bnxt *bp = netdev_priv(dev);
12718     bool sh = false;
12719     int rc;
12720 
12721     if (tc > bp->max_tc) {
12722         netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
12723                tc, bp->max_tc);
12724         return -EINVAL;
12725     }
12726 
12727     if (netdev_get_num_tc(dev) == tc)
12728         return 0;
12729 
12730     if (bp->flags & BNXT_FLAG_SHARED_RINGS)
12731         sh = true;
12732 
12733     rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
12734                   sh, tc, bp->tx_nr_rings_xdp);
12735     if (rc)
12736         return rc;
12737 
12738     /* Needs to close the device and do hw resource re-allocations */
12739     if (netif_running(bp->dev))
12740         bnxt_close_nic(bp, true, false);
12741 
12742     if (tc) {
12743         bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
12744         netdev_set_num_tc(dev, tc);
12745     } else {
12746         bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
12747         netdev_reset_tc(dev);
12748     }
12749     bp->tx_nr_rings += bp->tx_nr_rings_xdp;
12750     bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
12751                    bp->tx_nr_rings + bp->rx_nr_rings;
12752 
12753     if (netif_running(bp->dev))
12754         return bnxt_open_nic(bp, true, false);
12755 
12756     return 0;
12757 }
12758 
12759 static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
12760                   void *cb_priv)
12761 {
12762     struct bnxt *bp = cb_priv;
12763 
12764     if (!bnxt_tc_flower_enabled(bp) ||
12765         !tc_cls_can_offload_and_chain0(bp->dev, type_data))
12766         return -EOPNOTSUPP;
12767 
12768     switch (type) {
12769     case TC_SETUP_CLSFLOWER:
12770         return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
12771     default:
12772         return -EOPNOTSUPP;
12773     }
12774 }
12775 
12776 LIST_HEAD(bnxt_block_cb_list);
12777 
12778 static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
12779              void *type_data)
12780 {
12781     struct bnxt *bp = netdev_priv(dev);
12782 
12783     switch (type) {
12784     case TC_SETUP_BLOCK:
12785         return flow_block_cb_setup_simple(type_data,
12786                           &bnxt_block_cb_list,
12787                           bnxt_setup_tc_block_cb,
12788                           bp, bp, true);
12789     case TC_SETUP_QDISC_MQPRIO: {
12790         struct tc_mqprio_qopt *mqprio = type_data;
12791 
12792         mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
12793 
12794         return bnxt_setup_mq_tc(dev, mqprio->num_tc);
12795     }
12796     default:
12797         return -EOPNOTSUPP;
12798     }
12799 }
12800 
12801 #ifdef CONFIG_RFS_ACCEL
12802 static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
12803                 struct bnxt_ntuple_filter *f2)
12804 {
12805     struct flow_keys *keys1 = &f1->fkeys;
12806     struct flow_keys *keys2 = &f2->fkeys;
12807 
12808     if (keys1->basic.n_proto != keys2->basic.n_proto ||
12809         keys1->basic.ip_proto != keys2->basic.ip_proto)
12810         return false;
12811 
12812     if (keys1->basic.n_proto == htons(ETH_P_IP)) {
12813         if (keys1->addrs.v4addrs.src != keys2->addrs.v4addrs.src ||
12814             keys1->addrs.v4addrs.dst != keys2->addrs.v4addrs.dst)
12815             return false;
12816     } else {
12817         if (memcmp(&keys1->addrs.v6addrs.src, &keys2->addrs.v6addrs.src,
12818                sizeof(keys1->addrs.v6addrs.src)) ||
12819             memcmp(&keys1->addrs.v6addrs.dst, &keys2->addrs.v6addrs.dst,
12820                sizeof(keys1->addrs.v6addrs.dst)))
12821             return false;
12822     }
12823 
12824     if (keys1->ports.ports == keys2->ports.ports &&
12825         keys1->control.flags == keys2->control.flags &&
12826         ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
12827         ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
12828         return true;
12829 
12830     return false;
12831 }
12832 
12833 static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
12834                   u16 rxq_index, u32 flow_id)
12835 {
12836     struct bnxt *bp = netdev_priv(dev);
12837     struct bnxt_ntuple_filter *fltr, *new_fltr;
12838     struct flow_keys *fkeys;
12839     struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
12840     int rc = 0, idx, bit_id, l2_idx = 0;
12841     struct hlist_head *head;
12842     u32 flags;
12843 
12844     if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
12845         struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
12846         int off = 0, j;
12847 
12848         netif_addr_lock_bh(dev);
12849         for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
12850             if (ether_addr_equal(eth->h_dest,
12851                          vnic->uc_list + off)) {
12852                 l2_idx = j + 1;
12853                 break;
12854             }
12855         }
12856         netif_addr_unlock_bh(dev);
12857         if (!l2_idx)
12858             return -EINVAL;
12859     }
12860     new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
12861     if (!new_fltr)
12862         return -ENOMEM;
12863 
12864     fkeys = &new_fltr->fkeys;
12865     if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
12866         rc = -EPROTONOSUPPORT;
12867         goto err_free;
12868     }
12869 
12870     if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
12871          fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
12872         ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
12873          (fkeys->basic.ip_proto != IPPROTO_UDP))) {
12874         rc = -EPROTONOSUPPORT;
12875         goto err_free;
12876     }
12877     if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
12878         bp->hwrm_spec_code < 0x10601) {
12879         rc = -EPROTONOSUPPORT;
12880         goto err_free;
12881     }
12882     flags = fkeys->control.flags;
12883     if (((flags & FLOW_DIS_ENCAPSULATION) &&
12884          bp->hwrm_spec_code < 0x10601) || (flags & FLOW_DIS_IS_FRAGMENT)) {
12885         rc = -EPROTONOSUPPORT;
12886         goto err_free;
12887     }
12888 
12889     memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
12890     memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
12891 
12892     idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
12893     head = &bp->ntp_fltr_hash_tbl[idx];
12894     rcu_read_lock();
12895     hlist_for_each_entry_rcu(fltr, head, hash) {
12896         if (bnxt_fltr_match(fltr, new_fltr)) {
12897             rcu_read_unlock();
12898             rc = 0;
12899             goto err_free;
12900         }
12901     }
12902     rcu_read_unlock();
12903 
12904     spin_lock_bh(&bp->ntp_fltr_lock);
12905     bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
12906                      BNXT_NTP_FLTR_MAX_FLTR, 0);
12907     if (bit_id < 0) {
12908         spin_unlock_bh(&bp->ntp_fltr_lock);
12909         rc = -ENOMEM;
12910         goto err_free;
12911     }
12912 
12913     new_fltr->sw_id = (u16)bit_id;
12914     new_fltr->flow_id = flow_id;
12915     new_fltr->l2_fltr_idx = l2_idx;
12916     new_fltr->rxq = rxq_index;
12917     hlist_add_head_rcu(&new_fltr->hash, head);
12918     bp->ntp_fltr_count++;
12919     spin_unlock_bh(&bp->ntp_fltr_lock);
12920 
12921     set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
12922     bnxt_queue_sp_work(bp);
12923 
12924     return new_fltr->sw_id;
12925 
12926 err_free:
12927     kfree(new_fltr);
12928     return rc;
12929 }
12930 
12931 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12932 {
12933     int i;
12934 
12935     for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
12936         struct hlist_head *head;
12937         struct hlist_node *tmp;
12938         struct bnxt_ntuple_filter *fltr;
12939         int rc;
12940 
12941         head = &bp->ntp_fltr_hash_tbl[i];
12942         hlist_for_each_entry_safe(fltr, tmp, head, hash) {
12943             bool del = false;
12944 
12945             if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
12946                 if (rps_may_expire_flow(bp->dev, fltr->rxq,
12947                             fltr->flow_id,
12948                             fltr->sw_id)) {
12949                     bnxt_hwrm_cfa_ntuple_filter_free(bp,
12950                                      fltr);
12951                     del = true;
12952                 }
12953             } else {
12954                 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
12955                                        fltr);
12956                 if (rc)
12957                     del = true;
12958                 else
12959                     set_bit(BNXT_FLTR_VALID, &fltr->state);
12960             }
12961 
12962             if (del) {
12963                 spin_lock_bh(&bp->ntp_fltr_lock);
12964                 hlist_del_rcu(&fltr->hash);
12965                 bp->ntp_fltr_count--;
12966                 spin_unlock_bh(&bp->ntp_fltr_lock);
12967                 synchronize_rcu();
12968                 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
12969                 kfree(fltr);
12970             }
12971         }
12972     }
12973     if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
12974         netdev_info(bp->dev, "Receive PF driver unload event!\n");
12975 }
12976 
12977 #else
12978 
12979 static void bnxt_cfg_ntp_filters(struct bnxt *bp)
12980 {
12981 }
12982 
12983 #endif /* CONFIG_RFS_ACCEL */
12984 
12985 static int bnxt_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
12986 {
12987     struct bnxt *bp = netdev_priv(netdev);
12988     struct udp_tunnel_info ti;
12989     unsigned int cmd;
12990 
12991     udp_tunnel_nic_get_port(netdev, table, 0, &ti);
12992     if (ti.type == UDP_TUNNEL_TYPE_VXLAN)
12993         cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN;
12994     else
12995         cmd = TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE;
12996 
12997     if (ti.port)
12998         return bnxt_hwrm_tunnel_dst_port_alloc(bp, ti.port, cmd);
12999 
13000     return bnxt_hwrm_tunnel_dst_port_free(bp, cmd);
13001 }
13002 
13003 static const struct udp_tunnel_nic_info bnxt_udp_tunnels = {
13004     .sync_table = bnxt_udp_tunnel_sync,
13005     .flags      = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
13006               UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
13007     .tables     = {
13008         { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
13009         { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
13010     },
13011 };
13012 
13013 static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
13014                    struct net_device *dev, u32 filter_mask,
13015                    int nlflags)
13016 {
13017     struct bnxt *bp = netdev_priv(dev);
13018 
13019     return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
13020                        nlflags, filter_mask, NULL);
13021 }
13022 
13023 static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
13024                    u16 flags, struct netlink_ext_ack *extack)
13025 {
13026     struct bnxt *bp = netdev_priv(dev);
13027     struct nlattr *attr, *br_spec;
13028     int rem, rc = 0;
13029 
13030     if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
13031         return -EOPNOTSUPP;
13032 
13033     br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
13034     if (!br_spec)
13035         return -EINVAL;
13036 
13037     nla_for_each_nested(attr, br_spec, rem) {
13038         u16 mode;
13039 
13040         if (nla_type(attr) != IFLA_BRIDGE_MODE)
13041             continue;
13042 
13043         if (nla_len(attr) < sizeof(mode))
13044             return -EINVAL;
13045 
13046         mode = nla_get_u16(attr);
13047         if (mode == bp->br_mode)
13048             break;
13049 
13050         rc = bnxt_hwrm_set_br_mode(bp, mode);
13051         if (!rc)
13052             bp->br_mode = mode;
13053         break;
13054     }
13055     return rc;
13056 }
13057 
13058 int bnxt_get_port_parent_id(struct net_device *dev,
13059                 struct netdev_phys_item_id *ppid)
13060 {
13061     struct bnxt *bp = netdev_priv(dev);
13062 
13063     if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
13064         return -EOPNOTSUPP;
13065 
13066     /* The PF and it's VF-reps only support the switchdev framework */
13067     if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_DSN_VALID))
13068         return -EOPNOTSUPP;
13069 
13070     ppid->id_len = sizeof(bp->dsn);
13071     memcpy(ppid->id, bp->dsn, ppid->id_len);
13072 
13073     return 0;
13074 }
13075 
13076 static struct devlink_port *bnxt_get_devlink_port(struct net_device *dev)
13077 {
13078     struct bnxt *bp = netdev_priv(dev);
13079 
13080     return &bp->dl_port;
13081 }
13082 
13083 static const struct net_device_ops bnxt_netdev_ops = {
13084     .ndo_open       = bnxt_open,
13085     .ndo_start_xmit     = bnxt_start_xmit,
13086     .ndo_stop       = bnxt_close,
13087     .ndo_get_stats64    = bnxt_get_stats64,
13088     .ndo_set_rx_mode    = bnxt_set_rx_mode,
13089     .ndo_eth_ioctl      = bnxt_ioctl,
13090     .ndo_validate_addr  = eth_validate_addr,
13091     .ndo_set_mac_address    = bnxt_change_mac_addr,
13092     .ndo_change_mtu     = bnxt_change_mtu,
13093     .ndo_fix_features   = bnxt_fix_features,
13094     .ndo_set_features   = bnxt_set_features,
13095     .ndo_features_check = bnxt_features_check,
13096     .ndo_tx_timeout     = bnxt_tx_timeout,
13097 #ifdef CONFIG_BNXT_SRIOV
13098     .ndo_get_vf_config  = bnxt_get_vf_config,
13099     .ndo_set_vf_mac     = bnxt_set_vf_mac,
13100     .ndo_set_vf_vlan    = bnxt_set_vf_vlan,
13101     .ndo_set_vf_rate    = bnxt_set_vf_bw,
13102     .ndo_set_vf_link_state  = bnxt_set_vf_link_state,
13103     .ndo_set_vf_spoofchk    = bnxt_set_vf_spoofchk,
13104     .ndo_set_vf_trust   = bnxt_set_vf_trust,
13105 #endif
13106     .ndo_setup_tc           = bnxt_setup_tc,
13107 #ifdef CONFIG_RFS_ACCEL
13108     .ndo_rx_flow_steer  = bnxt_rx_flow_steer,
13109 #endif
13110     .ndo_bpf        = bnxt_xdp,
13111     .ndo_xdp_xmit       = bnxt_xdp_xmit,
13112     .ndo_bridge_getlink = bnxt_bridge_getlink,
13113     .ndo_bridge_setlink = bnxt_bridge_setlink,
13114     .ndo_get_devlink_port   = bnxt_get_devlink_port,
13115 };
13116 
13117 static void bnxt_remove_one(struct pci_dev *pdev)
13118 {
13119     struct net_device *dev = pci_get_drvdata(pdev);
13120     struct bnxt *bp = netdev_priv(dev);
13121 
13122     if (BNXT_PF(bp))
13123         bnxt_sriov_disable(bp);
13124 
13125     if (BNXT_PF(bp))
13126         devlink_port_type_clear(&bp->dl_port);
13127 
13128     bnxt_ptp_clear(bp);
13129     pci_disable_pcie_error_reporting(pdev);
13130     unregister_netdev(dev);
13131     clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
13132     /* Flush any pending tasks */
13133     cancel_work_sync(&bp->sp_task);
13134     cancel_delayed_work_sync(&bp->fw_reset_task);
13135     bp->sp_event = 0;
13136 
13137     bnxt_dl_fw_reporters_destroy(bp);
13138     bnxt_dl_unregister(bp);
13139     bnxt_shutdown_tc(bp);
13140 
13141     bnxt_clear_int_mode(bp);
13142     bnxt_hwrm_func_drv_unrgtr(bp);
13143     bnxt_free_hwrm_resources(bp);
13144     bnxt_ethtool_free(bp);
13145     bnxt_dcb_free(bp);
13146     kfree(bp->edev);
13147     bp->edev = NULL;
13148     kfree(bp->ptp_cfg);
13149     bp->ptp_cfg = NULL;
13150     kfree(bp->fw_health);
13151     bp->fw_health = NULL;
13152     bnxt_cleanup_pci(bp);
13153     bnxt_free_ctx_mem(bp);
13154     kfree(bp->ctx);
13155     bp->ctx = NULL;
13156     kfree(bp->rss_indir_tbl);
13157     bp->rss_indir_tbl = NULL;
13158     bnxt_free_port_stats(bp);
13159     free_netdev(dev);
13160 }
13161 
13162 static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt)
13163 {
13164     int rc = 0;
13165     struct bnxt_link_info *link_info = &bp->link_info;
13166 
13167     bp->phy_flags = 0;
13168     rc = bnxt_hwrm_phy_qcaps(bp);
13169     if (rc) {
13170         netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
13171                rc);
13172         return rc;
13173     }
13174     if (bp->phy_flags & BNXT_PHY_FL_NO_FCS)
13175         bp->dev->priv_flags |= IFF_SUPP_NOFCS;
13176     else
13177         bp->dev->priv_flags &= ~IFF_SUPP_NOFCS;
13178     if (!fw_dflt)
13179         return 0;
13180 
13181     mutex_lock(&bp->link_lock);
13182     rc = bnxt_update_link(bp, false);
13183     if (rc) {
13184         mutex_unlock(&bp->link_lock);
13185         netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
13186                rc);
13187         return rc;
13188     }
13189 
13190     /* Older firmware does not have supported_auto_speeds, so assume
13191      * that all supported speeds can be autonegotiated.
13192      */
13193     if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
13194         link_info->support_auto_speeds = link_info->support_speeds;
13195 
13196     bnxt_init_ethtool_link_settings(bp);
13197     mutex_unlock(&bp->link_lock);
13198     return 0;
13199 }
13200 
13201 static int bnxt_get_max_irq(struct pci_dev *pdev)
13202 {
13203     u16 ctrl;
13204 
13205     if (!pdev->msix_cap)
13206         return 1;
13207 
13208     pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
13209     return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
13210 }
13211 
13212 static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13213                 int *max_cp)
13214 {
13215     struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
13216     int max_ring_grps = 0, max_irq;
13217 
13218     *max_tx = hw_resc->max_tx_rings;
13219     *max_rx = hw_resc->max_rx_rings;
13220     *max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
13221     max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
13222             bnxt_get_ulp_msix_num(bp),
13223             hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
13224     if (!(bp->flags & BNXT_FLAG_CHIP_P5))
13225         *max_cp = min_t(int, *max_cp, max_irq);
13226     max_ring_grps = hw_resc->max_hw_ring_grps;
13227     if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
13228         *max_cp -= 1;
13229         *max_rx -= 2;
13230     }
13231     if (bp->flags & BNXT_FLAG_AGG_RINGS)
13232         *max_rx >>= 1;
13233     if (bp->flags & BNXT_FLAG_CHIP_P5) {
13234         bnxt_trim_rings(bp, max_rx, max_tx, *max_cp, false);
13235         /* On P5 chips, max_cp output param should be available NQs */
13236         *max_cp = max_irq;
13237     }
13238     *max_rx = min_t(int, *max_rx, max_ring_grps);
13239 }
13240 
13241 int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
13242 {
13243     int rx, tx, cp;
13244 
13245     _bnxt_get_max_rings(bp, &rx, &tx, &cp);
13246     *max_rx = rx;
13247     *max_tx = tx;
13248     if (!rx || !tx || !cp)
13249         return -ENOMEM;
13250 
13251     return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
13252 }
13253 
13254 static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
13255                    bool shared)
13256 {
13257     int rc;
13258 
13259     rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13260     if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
13261         /* Not enough rings, try disabling agg rings. */
13262         bp->flags &= ~BNXT_FLAG_AGG_RINGS;
13263         rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
13264         if (rc) {
13265             /* set BNXT_FLAG_AGG_RINGS back for consistency */
13266             bp->flags |= BNXT_FLAG_AGG_RINGS;
13267             return rc;
13268         }
13269         bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
13270         bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13271         bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
13272         bnxt_set_ring_params(bp);
13273     }
13274 
13275     if (bp->flags & BNXT_FLAG_ROCE_CAP) {
13276         int max_cp, max_stat, max_irq;
13277 
13278         /* Reserve minimum resources for RoCE */
13279         max_cp = bnxt_get_max_func_cp_rings(bp);
13280         max_stat = bnxt_get_max_func_stat_ctxs(bp);
13281         max_irq = bnxt_get_max_func_irqs(bp);
13282         if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
13283             max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
13284             max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
13285             return 0;
13286 
13287         max_cp -= BNXT_MIN_ROCE_CP_RINGS;
13288         max_irq -= BNXT_MIN_ROCE_CP_RINGS;
13289         max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
13290         max_cp = min_t(int, max_cp, max_irq);
13291         max_cp = min_t(int, max_cp, max_stat);
13292         rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
13293         if (rc)
13294             rc = 0;
13295     }
13296     return rc;
13297 }
13298 
13299 /* In initial default shared ring setting, each shared ring must have a
13300  * RX/TX ring pair.
13301  */
13302 static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
13303 {
13304     bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
13305     bp->rx_nr_rings = bp->cp_nr_rings;
13306     bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
13307     bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13308 }
13309 
13310 static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
13311 {
13312     int dflt_rings, max_rx_rings, max_tx_rings, rc;
13313 
13314     if (!bnxt_can_reserve_rings(bp))
13315         return 0;
13316 
13317     if (sh)
13318         bp->flags |= BNXT_FLAG_SHARED_RINGS;
13319     dflt_rings = is_kdump_kernel() ? 1 : netif_get_num_default_rss_queues();
13320     /* Reduce default rings on multi-port cards so that total default
13321      * rings do not exceed CPU count.
13322      */
13323     if (bp->port_count > 1) {
13324         int max_rings =
13325             max_t(int, num_online_cpus() / bp->port_count, 1);
13326 
13327         dflt_rings = min_t(int, dflt_rings, max_rings);
13328     }
13329     rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
13330     if (rc)
13331         return rc;
13332     bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
13333     bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
13334     if (sh)
13335         bnxt_trim_dflt_sh_rings(bp);
13336     else
13337         bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
13338     bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
13339 
13340     rc = __bnxt_reserve_rings(bp);
13341     if (rc && rc != -ENODEV)
13342         netdev_warn(bp->dev, "Unable to reserve tx rings\n");
13343     bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13344     if (sh)
13345         bnxt_trim_dflt_sh_rings(bp);
13346 
13347     /* Rings may have been trimmed, re-reserve the trimmed rings. */
13348     if (bnxt_need_reserve_rings(bp)) {
13349         rc = __bnxt_reserve_rings(bp);
13350         if (rc && rc != -ENODEV)
13351             netdev_warn(bp->dev, "2nd rings reservation failed.\n");
13352         bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13353     }
13354     if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
13355         bp->rx_nr_rings++;
13356         bp->cp_nr_rings++;
13357     }
13358     if (rc) {
13359         bp->tx_nr_rings = 0;
13360         bp->rx_nr_rings = 0;
13361     }
13362     return rc;
13363 }
13364 
13365 static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
13366 {
13367     int rc;
13368 
13369     if (bp->tx_nr_rings)
13370         return 0;
13371 
13372     bnxt_ulp_irq_stop(bp);
13373     bnxt_clear_int_mode(bp);
13374     rc = bnxt_set_dflt_rings(bp, true);
13375     if (rc) {
13376         if (BNXT_VF(bp) && rc == -ENODEV)
13377             netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13378         else
13379             netdev_err(bp->dev, "Not enough rings available.\n");
13380         goto init_dflt_ring_err;
13381     }
13382     rc = bnxt_init_int_mode(bp);
13383     if (rc)
13384         goto init_dflt_ring_err;
13385 
13386     bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13387 
13388     bnxt_set_dflt_rfs(bp);
13389 
13390 init_dflt_ring_err:
13391     bnxt_ulp_irq_restart(bp, rc);
13392     return rc;
13393 }
13394 
13395 int bnxt_restore_pf_fw_resources(struct bnxt *bp)
13396 {
13397     int rc;
13398 
13399     ASSERT_RTNL();
13400     bnxt_hwrm_func_qcaps(bp);
13401 
13402     if (netif_running(bp->dev))
13403         __bnxt_close_nic(bp, true, false);
13404 
13405     bnxt_ulp_irq_stop(bp);
13406     bnxt_clear_int_mode(bp);
13407     rc = bnxt_init_int_mode(bp);
13408     bnxt_ulp_irq_restart(bp, rc);
13409 
13410     if (netif_running(bp->dev)) {
13411         if (rc)
13412             dev_close(bp->dev);
13413         else
13414             rc = bnxt_open_nic(bp, true, false);
13415     }
13416 
13417     return rc;
13418 }
13419 
13420 static int bnxt_init_mac_addr(struct bnxt *bp)
13421 {
13422     int rc = 0;
13423 
13424     if (BNXT_PF(bp)) {
13425         eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
13426     } else {
13427 #ifdef CONFIG_BNXT_SRIOV
13428         struct bnxt_vf_info *vf = &bp->vf;
13429         bool strict_approval = true;
13430 
13431         if (is_valid_ether_addr(vf->mac_addr)) {
13432             /* overwrite netdev dev_addr with admin VF MAC */
13433             eth_hw_addr_set(bp->dev, vf->mac_addr);
13434             /* Older PF driver or firmware may not approve this
13435              * correctly.
13436              */
13437             strict_approval = false;
13438         } else {
13439             eth_hw_addr_random(bp->dev);
13440         }
13441         rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
13442 #endif
13443     }
13444     return rc;
13445 }
13446 
13447 static void bnxt_vpd_read_info(struct bnxt *bp)
13448 {
13449     struct pci_dev *pdev = bp->pdev;
13450     unsigned int vpd_size, kw_len;
13451     int pos, size;
13452     u8 *vpd_data;
13453 
13454     vpd_data = pci_vpd_alloc(pdev, &vpd_size);
13455     if (IS_ERR(vpd_data)) {
13456         pci_warn(pdev, "Unable to read VPD\n");
13457         return;
13458     }
13459 
13460     pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13461                        PCI_VPD_RO_KEYWORD_PARTNO, &kw_len);
13462     if (pos < 0)
13463         goto read_sn;
13464 
13465     size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13466     memcpy(bp->board_partno, &vpd_data[pos], size);
13467 
13468 read_sn:
13469     pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
13470                        PCI_VPD_RO_KEYWORD_SERIALNO,
13471                        &kw_len);
13472     if (pos < 0)
13473         goto exit;
13474 
13475     size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1);
13476     memcpy(bp->board_serialno, &vpd_data[pos], size);
13477 exit:
13478     kfree(vpd_data);
13479 }
13480 
13481 static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
13482 {
13483     struct pci_dev *pdev = bp->pdev;
13484     u64 qword;
13485 
13486     qword = pci_get_dsn(pdev);
13487     if (!qword) {
13488         netdev_info(bp->dev, "Unable to read adapter's DSN\n");
13489         return -EOPNOTSUPP;
13490     }
13491 
13492     put_unaligned_le64(qword, dsn);
13493 
13494     bp->flags |= BNXT_FLAG_DSN_VALID;
13495     return 0;
13496 }
13497 
13498 static int bnxt_map_db_bar(struct bnxt *bp)
13499 {
13500     if (!bp->db_size)
13501         return -ENODEV;
13502     bp->bar1 = pci_iomap(bp->pdev, 2, bp->db_size);
13503     if (!bp->bar1)
13504         return -ENOMEM;
13505     return 0;
13506 }
13507 
13508 void bnxt_print_device_info(struct bnxt *bp)
13509 {
13510     netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
13511             board_info[bp->board_idx].name,
13512             (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
13513 
13514     pcie_print_link_status(bp->pdev);
13515 }
13516 
13517 static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
13518 {
13519     struct net_device *dev;
13520     struct bnxt *bp;
13521     int rc, max_irqs;
13522 
13523     if (pci_is_bridge(pdev))
13524         return -ENODEV;
13525 
13526     /* Clear any pending DMA transactions from crash kernel
13527      * while loading driver in capture kernel.
13528      */
13529     if (is_kdump_kernel()) {
13530         pci_clear_master(pdev);
13531         pcie_flr(pdev);
13532     }
13533 
13534     max_irqs = bnxt_get_max_irq(pdev);
13535     dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
13536     if (!dev)
13537         return -ENOMEM;
13538 
13539     bp = netdev_priv(dev);
13540     bp->board_idx = ent->driver_data;
13541     bp->msg_enable = BNXT_DEF_MSG_ENABLE;
13542     bnxt_set_max_func_irqs(bp, max_irqs);
13543 
13544     if (bnxt_vf_pciid(bp->board_idx))
13545         bp->flags |= BNXT_FLAG_VF;
13546 
13547     if (pdev->msix_cap)
13548         bp->flags |= BNXT_FLAG_MSIX_CAP;
13549 
13550     rc = bnxt_init_board(pdev, dev);
13551     if (rc < 0)
13552         goto init_err_free;
13553 
13554     dev->netdev_ops = &bnxt_netdev_ops;
13555     dev->watchdog_timeo = BNXT_TX_TIMEOUT;
13556     dev->ethtool_ops = &bnxt_ethtool_ops;
13557     pci_set_drvdata(pdev, dev);
13558 
13559     rc = bnxt_alloc_hwrm_resources(bp);
13560     if (rc)
13561         goto init_err_pci_clean;
13562 
13563     mutex_init(&bp->hwrm_cmd_lock);
13564     mutex_init(&bp->link_lock);
13565 
13566     rc = bnxt_fw_init_one_p1(bp);
13567     if (rc)
13568         goto init_err_pci_clean;
13569 
13570     if (BNXT_PF(bp))
13571         bnxt_vpd_read_info(bp);
13572 
13573     if (BNXT_CHIP_P5(bp)) {
13574         bp->flags |= BNXT_FLAG_CHIP_P5;
13575         if (BNXT_CHIP_SR2(bp))
13576             bp->flags |= BNXT_FLAG_CHIP_SR2;
13577     }
13578 
13579     rc = bnxt_alloc_rss_indir_tbl(bp);
13580     if (rc)
13581         goto init_err_pci_clean;
13582 
13583     rc = bnxt_fw_init_one_p2(bp);
13584     if (rc)
13585         goto init_err_pci_clean;
13586 
13587     rc = bnxt_map_db_bar(bp);
13588     if (rc) {
13589         dev_err(&pdev->dev, "Cannot map doorbell BAR rc = %d, aborting\n",
13590             rc);
13591         goto init_err_pci_clean;
13592     }
13593 
13594     dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13595                NETIF_F_TSO | NETIF_F_TSO6 |
13596                NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13597                NETIF_F_GSO_IPXIP4 |
13598                NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13599                NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
13600                NETIF_F_RXCSUM | NETIF_F_GRO;
13601 
13602     if (BNXT_SUPPORTS_TPA(bp))
13603         dev->hw_features |= NETIF_F_LRO;
13604 
13605     dev->hw_enc_features =
13606             NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13607             NETIF_F_TSO | NETIF_F_TSO6 |
13608             NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
13609             NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
13610             NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
13611     dev->udp_tunnel_nic_info = &bnxt_udp_tunnels;
13612 
13613     dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
13614                     NETIF_F_GSO_GRE_CSUM;
13615     dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
13616     if (bp->fw_cap & BNXT_FW_CAP_VLAN_RX_STRIP)
13617         dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_RX;
13618     if (bp->fw_cap & BNXT_FW_CAP_VLAN_TX_INSERT)
13619         dev->hw_features |= BNXT_HW_FEATURE_VLAN_ALL_TX;
13620     if (BNXT_SUPPORTS_TPA(bp))
13621         dev->hw_features |= NETIF_F_GRO_HW;
13622     dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
13623     if (dev->features & NETIF_F_GRO_HW)
13624         dev->features &= ~NETIF_F_LRO;
13625     dev->priv_flags |= IFF_UNICAST_FLT;
13626 
13627 #ifdef CONFIG_BNXT_SRIOV
13628     init_waitqueue_head(&bp->sriov_cfg_wait);
13629 #endif
13630     if (BNXT_SUPPORTS_TPA(bp)) {
13631         bp->gro_func = bnxt_gro_func_5730x;
13632         if (BNXT_CHIP_P4(bp))
13633             bp->gro_func = bnxt_gro_func_5731x;
13634         else if (BNXT_CHIP_P5(bp))
13635             bp->gro_func = bnxt_gro_func_5750x;
13636     }
13637     if (!BNXT_CHIP_P4_PLUS(bp))
13638         bp->flags |= BNXT_FLAG_DOUBLE_DB;
13639 
13640     rc = bnxt_init_mac_addr(bp);
13641     if (rc) {
13642         dev_err(&pdev->dev, "Unable to initialize mac address.\n");
13643         rc = -EADDRNOTAVAIL;
13644         goto init_err_pci_clean;
13645     }
13646 
13647     if (BNXT_PF(bp)) {
13648         /* Read the adapter's DSN to use as the eswitch switch_id */
13649         rc = bnxt_pcie_dsn_get(bp, bp->dsn);
13650     }
13651 
13652     /* MTU range: 60 - FW defined max */
13653     dev->min_mtu = ETH_ZLEN;
13654     dev->max_mtu = bp->max_mtu;
13655 
13656     rc = bnxt_probe_phy(bp, true);
13657     if (rc)
13658         goto init_err_pci_clean;
13659 
13660     bnxt_set_rx_skb_mode(bp, false);
13661     bnxt_set_tpa_flags(bp);
13662     bnxt_set_ring_params(bp);
13663     rc = bnxt_set_dflt_rings(bp, true);
13664     if (rc) {
13665         if (BNXT_VF(bp) && rc == -ENODEV) {
13666             netdev_err(bp->dev, "Cannot configure VF rings while PF is unavailable.\n");
13667         } else {
13668             netdev_err(bp->dev, "Not enough rings available.\n");
13669             rc = -ENOMEM;
13670         }
13671         goto init_err_pci_clean;
13672     }
13673 
13674     bnxt_fw_init_one_p3(bp);
13675 
13676     bnxt_init_dflt_coal(bp);
13677 
13678     if (dev->hw_features & BNXT_HW_FEATURE_VLAN_ALL_RX)
13679         bp->flags |= BNXT_FLAG_STRIP_VLAN;
13680 
13681     rc = bnxt_init_int_mode(bp);
13682     if (rc)
13683         goto init_err_pci_clean;
13684 
13685     /* No TC has been set yet and rings may have been trimmed due to
13686      * limited MSIX, so we re-initialize the TX rings per TC.
13687      */
13688     bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
13689 
13690     if (BNXT_PF(bp)) {
13691         if (!bnxt_pf_wq) {
13692             bnxt_pf_wq =
13693                 create_singlethread_workqueue("bnxt_pf_wq");
13694             if (!bnxt_pf_wq) {
13695                 dev_err(&pdev->dev, "Unable to create workqueue.\n");
13696                 rc = -ENOMEM;
13697                 goto init_err_pci_clean;
13698             }
13699         }
13700         rc = bnxt_init_tc(bp);
13701         if (rc)
13702             netdev_err(dev, "Failed to initialize TC flower offload, err = %d.\n",
13703                    rc);
13704     }
13705 
13706     bnxt_inv_fw_health_reg(bp);
13707     rc = bnxt_dl_register(bp);
13708     if (rc)
13709         goto init_err_dl;
13710 
13711     rc = register_netdev(dev);
13712     if (rc)
13713         goto init_err_cleanup;
13714 
13715     if (BNXT_PF(bp))
13716         devlink_port_type_eth_set(&bp->dl_port, bp->dev);
13717     bnxt_dl_fw_reporters_create(bp);
13718 
13719     bnxt_print_device_info(bp);
13720 
13721     pci_save_state(pdev);
13722     return 0;
13723 
13724 init_err_cleanup:
13725     bnxt_dl_unregister(bp);
13726 init_err_dl:
13727     bnxt_shutdown_tc(bp);
13728     bnxt_clear_int_mode(bp);
13729 
13730 init_err_pci_clean:
13731     bnxt_hwrm_func_drv_unrgtr(bp);
13732     bnxt_free_hwrm_resources(bp);
13733     bnxt_ethtool_free(bp);
13734     bnxt_ptp_clear(bp);
13735     kfree(bp->ptp_cfg);
13736     bp->ptp_cfg = NULL;
13737     kfree(bp->fw_health);
13738     bp->fw_health = NULL;
13739     bnxt_cleanup_pci(bp);
13740     bnxt_free_ctx_mem(bp);
13741     kfree(bp->ctx);
13742     bp->ctx = NULL;
13743     kfree(bp->rss_indir_tbl);
13744     bp->rss_indir_tbl = NULL;
13745 
13746 init_err_free:
13747     free_netdev(dev);
13748     return rc;
13749 }
13750 
13751 static void bnxt_shutdown(struct pci_dev *pdev)
13752 {
13753     struct net_device *dev = pci_get_drvdata(pdev);
13754     struct bnxt *bp;
13755 
13756     if (!dev)
13757         return;
13758 
13759     rtnl_lock();
13760     bp = netdev_priv(dev);
13761     if (!bp)
13762         goto shutdown_exit;
13763 
13764     if (netif_running(dev))
13765         dev_close(dev);
13766 
13767     bnxt_ulp_shutdown(bp);
13768     bnxt_clear_int_mode(bp);
13769     pci_disable_device(pdev);
13770 
13771     if (system_state == SYSTEM_POWER_OFF) {
13772         pci_wake_from_d3(pdev, bp->wol);
13773         pci_set_power_state(pdev, PCI_D3hot);
13774     }
13775 
13776 shutdown_exit:
13777     rtnl_unlock();
13778 }
13779 
13780 #ifdef CONFIG_PM_SLEEP
13781 static int bnxt_suspend(struct device *device)
13782 {
13783     struct net_device *dev = dev_get_drvdata(device);
13784     struct bnxt *bp = netdev_priv(dev);
13785     int rc = 0;
13786 
13787     rtnl_lock();
13788     bnxt_ulp_stop(bp);
13789     if (netif_running(dev)) {
13790         netif_device_detach(dev);
13791         rc = bnxt_close(dev);
13792     }
13793     bnxt_hwrm_func_drv_unrgtr(bp);
13794     pci_disable_device(bp->pdev);
13795     bnxt_free_ctx_mem(bp);
13796     kfree(bp->ctx);
13797     bp->ctx = NULL;
13798     rtnl_unlock();
13799     return rc;
13800 }
13801 
13802 static int bnxt_resume(struct device *device)
13803 {
13804     struct net_device *dev = dev_get_drvdata(device);
13805     struct bnxt *bp = netdev_priv(dev);
13806     int rc = 0;
13807 
13808     rtnl_lock();
13809     rc = pci_enable_device(bp->pdev);
13810     if (rc) {
13811         netdev_err(dev, "Cannot re-enable PCI device during resume, err = %d\n",
13812                rc);
13813         goto resume_exit;
13814     }
13815     pci_set_master(bp->pdev);
13816     if (bnxt_hwrm_ver_get(bp)) {
13817         rc = -ENODEV;
13818         goto resume_exit;
13819     }
13820     rc = bnxt_hwrm_func_reset(bp);
13821     if (rc) {
13822         rc = -EBUSY;
13823         goto resume_exit;
13824     }
13825 
13826     rc = bnxt_hwrm_func_qcaps(bp);
13827     if (rc)
13828         goto resume_exit;
13829 
13830     if (bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
13831         rc = -ENODEV;
13832         goto resume_exit;
13833     }
13834 
13835     bnxt_get_wol_settings(bp);
13836     if (netif_running(dev)) {
13837         rc = bnxt_open(dev);
13838         if (!rc)
13839             netif_device_attach(dev);
13840     }
13841 
13842 resume_exit:
13843     bnxt_ulp_start(bp, rc);
13844     if (!rc)
13845         bnxt_reenable_sriov(bp);
13846     rtnl_unlock();
13847     return rc;
13848 }
13849 
13850 static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
13851 #define BNXT_PM_OPS (&bnxt_pm_ops)
13852 
13853 #else
13854 
13855 #define BNXT_PM_OPS NULL
13856 
13857 #endif /* CONFIG_PM_SLEEP */
13858 
13859 /**
13860  * bnxt_io_error_detected - called when PCI error is detected
13861  * @pdev: Pointer to PCI device
13862  * @state: The current pci connection state
13863  *
13864  * This function is called after a PCI bus error affecting
13865  * this device has been detected.
13866  */
13867 static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
13868                            pci_channel_state_t state)
13869 {
13870     struct net_device *netdev = pci_get_drvdata(pdev);
13871     struct bnxt *bp = netdev_priv(netdev);
13872 
13873     netdev_info(netdev, "PCI I/O error detected\n");
13874 
13875     rtnl_lock();
13876     netif_device_detach(netdev);
13877 
13878     bnxt_ulp_stop(bp);
13879 
13880     if (state == pci_channel_io_perm_failure) {
13881         rtnl_unlock();
13882         return PCI_ERS_RESULT_DISCONNECT;
13883     }
13884 
13885     if (state == pci_channel_io_frozen)
13886         set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
13887 
13888     if (netif_running(netdev))
13889         bnxt_close(netdev);
13890 
13891     if (pci_is_enabled(pdev))
13892         pci_disable_device(pdev);
13893     bnxt_free_ctx_mem(bp);
13894     kfree(bp->ctx);
13895     bp->ctx = NULL;
13896     rtnl_unlock();
13897 
13898     /* Request a slot slot reset. */
13899     return PCI_ERS_RESULT_NEED_RESET;
13900 }
13901 
13902 /**
13903  * bnxt_io_slot_reset - called after the pci bus has been reset.
13904  * @pdev: Pointer to PCI device
13905  *
13906  * Restart the card from scratch, as if from a cold-boot.
13907  * At this point, the card has exprienced a hard reset,
13908  * followed by fixups by BIOS, and has its config space
13909  * set up identically to what it was at cold boot.
13910  */
13911 static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
13912 {
13913     pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
13914     struct net_device *netdev = pci_get_drvdata(pdev);
13915     struct bnxt *bp = netdev_priv(netdev);
13916     int err = 0, off;
13917 
13918     netdev_info(bp->dev, "PCI Slot Reset\n");
13919 
13920     rtnl_lock();
13921 
13922     if (pci_enable_device(pdev)) {
13923         dev_err(&pdev->dev,
13924             "Cannot re-enable PCI device after reset.\n");
13925     } else {
13926         pci_set_master(pdev);
13927         /* Upon fatal error, our device internal logic that latches to
13928          * BAR value is getting reset and will restore only upon
13929          * rewritting the BARs.
13930          *
13931          * As pci_restore_state() does not re-write the BARs if the
13932          * value is same as saved value earlier, driver needs to
13933          * write the BARs to 0 to force restore, in case of fatal error.
13934          */
13935         if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
13936                        &bp->state)) {
13937             for (off = PCI_BASE_ADDRESS_0;
13938                  off <= PCI_BASE_ADDRESS_5; off += 4)
13939                 pci_write_config_dword(bp->pdev, off, 0);
13940         }
13941         pci_restore_state(pdev);
13942         pci_save_state(pdev);
13943 
13944         err = bnxt_hwrm_func_reset(bp);
13945         if (!err)
13946             result = PCI_ERS_RESULT_RECOVERED;
13947     }
13948 
13949     rtnl_unlock();
13950 
13951     return result;
13952 }
13953 
13954 /**
13955  * bnxt_io_resume - called when traffic can start flowing again.
13956  * @pdev: Pointer to PCI device
13957  *
13958  * This callback is called when the error recovery driver tells
13959  * us that its OK to resume normal operation.
13960  */
13961 static void bnxt_io_resume(struct pci_dev *pdev)
13962 {
13963     struct net_device *netdev = pci_get_drvdata(pdev);
13964     struct bnxt *bp = netdev_priv(netdev);
13965     int err;
13966 
13967     netdev_info(bp->dev, "PCI Slot Resume\n");
13968     rtnl_lock();
13969 
13970     err = bnxt_hwrm_func_qcaps(bp);
13971     if (!err && netif_running(netdev))
13972         err = bnxt_open(netdev);
13973 
13974     bnxt_ulp_start(bp, err);
13975     if (!err) {
13976         bnxt_reenable_sriov(bp);
13977         netif_device_attach(netdev);
13978     }
13979 
13980     rtnl_unlock();
13981 }
13982 
13983 static const struct pci_error_handlers bnxt_err_handler = {
13984     .error_detected = bnxt_io_error_detected,
13985     .slot_reset = bnxt_io_slot_reset,
13986     .resume     = bnxt_io_resume
13987 };
13988 
13989 static struct pci_driver bnxt_pci_driver = {
13990     .name       = DRV_MODULE_NAME,
13991     .id_table   = bnxt_pci_tbl,
13992     .probe      = bnxt_init_one,
13993     .remove     = bnxt_remove_one,
13994     .shutdown   = bnxt_shutdown,
13995     .driver.pm  = BNXT_PM_OPS,
13996     .err_handler    = &bnxt_err_handler,
13997 #if defined(CONFIG_BNXT_SRIOV)
13998     .sriov_configure = bnxt_sriov_configure,
13999 #endif
14000 };
14001 
14002 static int __init bnxt_init(void)
14003 {
14004     bnxt_debug_init();
14005     return pci_register_driver(&bnxt_pci_driver);
14006 }
14007 
14008 static void __exit bnxt_exit(void)
14009 {
14010     pci_unregister_driver(&bnxt_pci_driver);
14011     if (bnxt_pf_wq)
14012         destroy_workqueue(bnxt_pf_wq);
14013     bnxt_debug_exit();
14014 }
14015 
14016 module_init(bnxt_init);
14017 module_exit(bnxt_exit);