Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0 */
0002 /* Copyright (c) 2018, Intel Corporation. */
0003 
0004 #ifndef _ICE_H_
0005 #define _ICE_H_
0006 
0007 #include <linux/types.h>
0008 #include <linux/errno.h>
0009 #include <linux/kernel.h>
0010 #include <linux/module.h>
0011 #include <linux/firmware.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/compiler.h>
0014 #include <linux/etherdevice.h>
0015 #include <linux/skbuff.h>
0016 #include <linux/cpumask.h>
0017 #include <linux/rtnetlink.h>
0018 #include <linux/if_vlan.h>
0019 #include <linux/dma-mapping.h>
0020 #include <linux/pci.h>
0021 #include <linux/workqueue.h>
0022 #include <linux/wait.h>
0023 #include <linux/aer.h>
0024 #include <linux/interrupt.h>
0025 #include <linux/ethtool.h>
0026 #include <linux/timer.h>
0027 #include <linux/delay.h>
0028 #include <linux/bitmap.h>
0029 #include <linux/log2.h>
0030 #include <linux/ip.h>
0031 #include <linux/sctp.h>
0032 #include <linux/ipv6.h>
0033 #include <linux/pkt_sched.h>
0034 #include <linux/if_bridge.h>
0035 #include <linux/ctype.h>
0036 #include <linux/bpf.h>
0037 #include <linux/btf.h>
0038 #include <linux/auxiliary_bus.h>
0039 #include <linux/avf/virtchnl.h>
0040 #include <linux/cpu_rmap.h>
0041 #include <linux/dim.h>
0042 #include <net/pkt_cls.h>
0043 #include <net/tc_act/tc_mirred.h>
0044 #include <net/tc_act/tc_gact.h>
0045 #include <net/ip.h>
0046 #include <net/devlink.h>
0047 #include <net/ipv6.h>
0048 #include <net/xdp_sock.h>
0049 #include <net/xdp_sock_drv.h>
0050 #include <net/geneve.h>
0051 #include <net/gre.h>
0052 #include <net/udp_tunnel.h>
0053 #include <net/vxlan.h>
0054 #include <net/gtp.h>
0055 #include <linux/ppp_defs.h>
0056 #include "ice_devids.h"
0057 #include "ice_type.h"
0058 #include "ice_txrx.h"
0059 #include "ice_dcb.h"
0060 #include "ice_switch.h"
0061 #include "ice_common.h"
0062 #include "ice_flow.h"
0063 #include "ice_sched.h"
0064 #include "ice_idc_int.h"
0065 #include "ice_sriov.h"
0066 #include "ice_vf_mbx.h"
0067 #include "ice_ptp.h"
0068 #include "ice_fdir.h"
0069 #include "ice_xsk.h"
0070 #include "ice_arfs.h"
0071 #include "ice_repr.h"
0072 #include "ice_eswitch.h"
0073 #include "ice_lag.h"
0074 #include "ice_vsi_vlan_ops.h"
0075 #include "ice_gnss.h"
0076 
0077 #define ICE_BAR0        0
0078 #define ICE_REQ_DESC_MULTIPLE   32
0079 #define ICE_MIN_NUM_DESC    64
0080 #define ICE_MAX_NUM_DESC    8160
0081 #define ICE_DFLT_MIN_RX_DESC    512
0082 #define ICE_DFLT_NUM_TX_DESC    256
0083 #define ICE_DFLT_NUM_RX_DESC    2048
0084 
0085 #define ICE_DFLT_TRAFFIC_CLASS  BIT(0)
0086 #define ICE_INT_NAME_STR_LEN    (IFNAMSIZ + 16)
0087 #define ICE_AQ_LEN      192
0088 #define ICE_MBXSQ_LEN       64
0089 #define ICE_SBQ_LEN     64
0090 #define ICE_MIN_LAN_TXRX_MSIX   1
0091 #define ICE_MIN_LAN_OICR_MSIX   1
0092 #define ICE_MIN_MSIX        (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
0093 #define ICE_FDIR_MSIX       2
0094 #define ICE_RDMA_NUM_AEQ_MSIX   4
0095 #define ICE_MIN_RDMA_MSIX   2
0096 #define ICE_ESWITCH_MSIX    1
0097 #define ICE_NO_VSI      0xffff
0098 #define ICE_VSI_MAP_CONTIG  0
0099 #define ICE_VSI_MAP_SCATTER 1
0100 #define ICE_MAX_SCATTER_TXQS    16
0101 #define ICE_MAX_SCATTER_RXQS    16
0102 #define ICE_Q_WAIT_RETRY_LIMIT  10
0103 #define ICE_Q_WAIT_MAX_RETRY    (5 * ICE_Q_WAIT_RETRY_LIMIT)
0104 #define ICE_MAX_LG_RSS_QS   256
0105 #define ICE_RES_VALID_BIT   0x8000
0106 #define ICE_RES_MISC_VEC_ID (ICE_RES_VALID_BIT - 1)
0107 #define ICE_RES_RDMA_VEC_ID (ICE_RES_MISC_VEC_ID - 1)
0108 /* All VF control VSIs share the same IRQ, so assign a unique ID for them */
0109 #define ICE_RES_VF_CTRL_VEC_ID  (ICE_RES_RDMA_VEC_ID - 1)
0110 #define ICE_INVAL_Q_INDEX   0xffff
0111 
0112 #define ICE_MAX_RXQS_PER_TC     256 /* Used when setting VSI context per TC Rx queues */
0113 
0114 #define ICE_CHNL_START_TC       1
0115 
0116 #define ICE_MAX_RESET_WAIT      20
0117 
0118 #define ICE_VSIQF_HKEY_ARRAY_SIZE   ((VSIQF_HKEY_MAX_INDEX + 1) *   4)
0119 
0120 #define ICE_DFLT_NETIF_M (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
0121 
0122 #define ICE_MAX_MTU (ICE_AQ_SET_MAC_FRAME_SIZE_MAX - ICE_ETH_PKT_HDR_PAD)
0123 
0124 #define ICE_UP_TABLE_TRANSLATE(val, i) \
0125         (((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
0126           ICE_AQ_VSI_UP_TABLE_UP##i##_M)
0127 
0128 #define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
0129 #define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
0130 #define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
0131 #define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
0132 
0133 /* Minimum BW limit is 500 Kbps for any scheduler node */
0134 #define ICE_MIN_BW_LIMIT        500
0135 /* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes.
0136  * use it to convert user specified BW limit into Kbps
0137  */
0138 #define ICE_BW_KBPS_DIVISOR     125
0139 
0140 /* Macro for each VSI in a PF */
0141 #define ice_for_each_vsi(pf, i) \
0142     for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
0143 
0144 /* Macros for each Tx/Xdp/Rx ring in a VSI */
0145 #define ice_for_each_txq(vsi, i) \
0146     for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
0147 
0148 #define ice_for_each_xdp_txq(vsi, i) \
0149     for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++)
0150 
0151 #define ice_for_each_rxq(vsi, i) \
0152     for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
0153 
0154 /* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
0155 #define ice_for_each_alloc_txq(vsi, i) \
0156     for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
0157 
0158 #define ice_for_each_alloc_rxq(vsi, i) \
0159     for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
0160 
0161 #define ice_for_each_q_vector(vsi, i) \
0162     for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
0163 
0164 #define ice_for_each_chnl_tc(i) \
0165     for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
0166 
0167 #define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_UCAST_RX)
0168 
0169 #define ICE_UCAST_VLAN_PROMISC_BITS (ICE_PROMISC_UCAST_TX | \
0170                      ICE_PROMISC_UCAST_RX | \
0171                      ICE_PROMISC_VLAN_TX  | \
0172                      ICE_PROMISC_VLAN_RX)
0173 
0174 #define ICE_MCAST_PROMISC_BITS (ICE_PROMISC_MCAST_TX | ICE_PROMISC_MCAST_RX)
0175 
0176 #define ICE_MCAST_VLAN_PROMISC_BITS (ICE_PROMISC_MCAST_TX | \
0177                      ICE_PROMISC_MCAST_RX | \
0178                      ICE_PROMISC_VLAN_TX  | \
0179                      ICE_PROMISC_VLAN_RX)
0180 
0181 #define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
0182 
0183 enum ice_feature {
0184     ICE_F_DSCP,
0185     ICE_F_PTP_EXTTS,
0186     ICE_F_SMA_CTRL,
0187     ICE_F_GNSS,
0188     ICE_F_MAX
0189 };
0190 
0191 DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);
0192 
0193 struct ice_channel {
0194     struct list_head list;
0195     u8 type;
0196     u16 sw_id;
0197     u16 base_q;
0198     u16 num_rxq;
0199     u16 num_txq;
0200     u16 vsi_num;
0201     u8 ena_tc;
0202     struct ice_aqc_vsi_props info;
0203     u64 max_tx_rate;
0204     u64 min_tx_rate;
0205     atomic_t num_sb_fltr;
0206     struct ice_vsi *ch_vsi;
0207 };
0208 
0209 struct ice_txq_meta {
0210     u32 q_teid; /* Tx-scheduler element identifier */
0211     u16 q_id;   /* Entry in VSI's txq_map bitmap */
0212     u16 q_handle;   /* Relative index of Tx queue within TC */
0213     u16 vsi_idx;    /* VSI index that Tx queue belongs to */
0214     u8 tc;      /* TC number that Tx queue belongs to */
0215 };
0216 
0217 struct ice_tc_info {
0218     u16 qoffset;
0219     u16 qcount_tx;
0220     u16 qcount_rx;
0221     u8 netdev_tc;
0222 };
0223 
0224 struct ice_tc_cfg {
0225     u8 numtc; /* Total number of enabled TCs */
0226     u16 ena_tc; /* Tx map */
0227     struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
0228 };
0229 
0230 struct ice_res_tracker {
0231     u16 num_entries;
0232     u16 end;
0233     u16 list[];
0234 };
0235 
0236 struct ice_qs_cfg {
0237     struct mutex *qs_mutex;  /* will be assigned to &pf->avail_q_mutex */
0238     unsigned long *pf_map;
0239     unsigned long pf_map_size;
0240     unsigned int q_count;
0241     unsigned int scatter_count;
0242     u16 *vsi_map;
0243     u16 vsi_map_offset;
0244     u8 mapping_mode;
0245 };
0246 
0247 struct ice_sw {
0248     struct ice_pf *pf;
0249     u16 sw_id;      /* switch ID for this switch */
0250     u16 bridge_mode;    /* VEB/VEPA/Port Virtualizer */
0251 };
0252 
0253 enum ice_pf_state {
0254     ICE_TESTING,
0255     ICE_DOWN,
0256     ICE_NEEDS_RESTART,
0257     ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
0258     ICE_RESET_OICR_RECV,        /* set by driver after rcv reset OICR */
0259     ICE_PFR_REQ,        /* set by driver */
0260     ICE_CORER_REQ,      /* set by driver */
0261     ICE_GLOBR_REQ,      /* set by driver */
0262     ICE_CORER_RECV,     /* set by OICR handler */
0263     ICE_GLOBR_RECV,     /* set by OICR handler */
0264     ICE_EMPR_RECV,      /* set by OICR handler */
0265     ICE_SUSPENDED,      /* set on module remove path */
0266     ICE_RESET_FAILED,       /* set by reset/rebuild */
0267     /* When checking for the PF to be in a nominal operating state, the
0268      * bits that are grouped at the beginning of the list need to be
0269      * checked. Bits occurring before ICE_STATE_NOMINAL_CHECK_BITS will
0270      * be checked. If you need to add a bit into consideration for nominal
0271      * operating state, it must be added before
0272      * ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
0273      * without appropriate consideration.
0274      */
0275     ICE_STATE_NOMINAL_CHECK_BITS,
0276     ICE_ADMINQ_EVENT_PENDING,
0277     ICE_MAILBOXQ_EVENT_PENDING,
0278     ICE_SIDEBANDQ_EVENT_PENDING,
0279     ICE_MDD_EVENT_PENDING,
0280     ICE_VFLR_EVENT_PENDING,
0281     ICE_FLTR_OVERFLOW_PROMISC,
0282     ICE_VF_DIS,
0283     ICE_CFG_BUSY,
0284     ICE_SERVICE_SCHED,
0285     ICE_SERVICE_DIS,
0286     ICE_FD_FLUSH_REQ,
0287     ICE_OICR_INTR_DIS,      /* Global OICR interrupt disabled */
0288     ICE_MDD_VF_PRINT_PENDING,   /* set when MDD event handle */
0289     ICE_VF_RESETS_DISABLED, /* disable resets during ice_remove */
0290     ICE_LINK_DEFAULT_OVERRIDE_PENDING,
0291     ICE_PHY_INIT_COMPLETE,
0292     ICE_FD_VF_FLUSH_CTX,        /* set at FD Rx IRQ or timeout */
0293     ICE_AUX_ERR_PENDING,
0294     ICE_STATE_NBITS     /* must be last */
0295 };
0296 
0297 enum ice_vsi_state {
0298     ICE_VSI_DOWN,
0299     ICE_VSI_NEEDS_RESTART,
0300     ICE_VSI_NETDEV_ALLOCD,
0301     ICE_VSI_NETDEV_REGISTERED,
0302     ICE_VSI_UMAC_FLTR_CHANGED,
0303     ICE_VSI_MMAC_FLTR_CHANGED,
0304     ICE_VSI_PROMISC_CHANGED,
0305     ICE_VSI_STATE_NBITS     /* must be last */
0306 };
0307 
0308 /* struct that defines a VSI, associated with a dev */
0309 struct ice_vsi {
0310     struct net_device *netdev;
0311     struct ice_sw *vsw;      /* switch this VSI is on */
0312     struct ice_pf *back;         /* back pointer to PF */
0313     struct ice_port_info *port_info; /* back pointer to port_info */
0314     struct ice_rx_ring **rx_rings;   /* Rx ring array */
0315     struct ice_tx_ring **tx_rings;   /* Tx ring array */
0316     struct ice_q_vector **q_vectors; /* q_vector array */
0317 
0318     irqreturn_t (*irq_handler)(int irq, void *data);
0319 
0320     u64 tx_linearize;
0321     DECLARE_BITMAP(state, ICE_VSI_STATE_NBITS);
0322     unsigned int current_netdev_flags;
0323     u32 tx_restart;
0324     u32 tx_busy;
0325     u32 rx_buf_failed;
0326     u32 rx_page_failed;
0327     u16 num_q_vectors;
0328     u16 base_vector;        /* IRQ base for OS reserved vectors */
0329     enum ice_vsi_type type;
0330     u16 vsi_num;            /* HW (absolute) index of this VSI */
0331     u16 idx;            /* software index in pf->vsi[] */
0332 
0333     struct ice_vf *vf;      /* VF associated with this VSI */
0334 
0335     u16 ethtype;            /* Ethernet protocol for pause frame */
0336     u16 num_gfltr;
0337     u16 num_bfltr;
0338 
0339     /* RSS config */
0340     u16 rss_table_size; /* HW RSS table size */
0341     u16 rss_size;       /* Allocated RSS queues */
0342     u8 *rss_hkey_user;  /* User configured hash keys */
0343     u8 *rss_lut_user;   /* User configured lookup table entries */
0344     u8 rss_lut_type;    /* used to configure Get/Set RSS LUT AQ call */
0345 
0346     /* aRFS members only allocated for the PF VSI */
0347 #define ICE_MAX_ARFS_LIST   1024
0348 #define ICE_ARFS_LST_MASK   (ICE_MAX_ARFS_LIST - 1)
0349     struct hlist_head *arfs_fltr_list;
0350     struct ice_arfs_active_fltr_cntrs *arfs_fltr_cntrs;
0351     spinlock_t arfs_lock;   /* protects aRFS hash table and filter state */
0352     atomic_t *arfs_last_fltr_id;
0353 
0354     u16 max_frame;
0355     u16 rx_buf_len;
0356 
0357     struct ice_aqc_vsi_props info;   /* VSI properties */
0358 
0359     /* VSI stats */
0360     struct rtnl_link_stats64 net_stats;
0361     struct ice_eth_stats eth_stats;
0362     struct ice_eth_stats eth_stats_prev;
0363 
0364     struct list_head tmp_sync_list;     /* MAC filters to be synced */
0365     struct list_head tmp_unsync_list;   /* MAC filters to be unsynced */
0366 
0367     u8 irqs_ready:1;
0368     u8 current_isup:1;       /* Sync 'link up' logging */
0369     u8 stat_offsets_loaded:1;
0370     struct ice_vsi_vlan_ops inner_vlan_ops;
0371     struct ice_vsi_vlan_ops outer_vlan_ops;
0372     u16 num_vlan;
0373 
0374     /* queue information */
0375     u8 tx_mapping_mode;      /* ICE_MAP_MODE_[CONTIG|SCATTER] */
0376     u8 rx_mapping_mode;      /* ICE_MAP_MODE_[CONTIG|SCATTER] */
0377     u16 *txq_map;            /* index in pf->avail_txqs */
0378     u16 *rxq_map;            /* index in pf->avail_rxqs */
0379     u16 alloc_txq;           /* Allocated Tx queues */
0380     u16 num_txq;             /* Used Tx queues */
0381     u16 alloc_rxq;           /* Allocated Rx queues */
0382     u16 num_rxq;             /* Used Rx queues */
0383     u16 req_txq;             /* User requested Tx queues */
0384     u16 req_rxq;             /* User requested Rx queues */
0385     u16 num_rx_desc;
0386     u16 num_tx_desc;
0387     u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
0388     struct ice_tc_cfg tc_cfg;
0389     struct bpf_prog *xdp_prog;
0390     struct ice_tx_ring **xdp_rings;  /* XDP ring array */
0391     unsigned long *af_xdp_zc_qps;    /* tracks AF_XDP ZC enabled qps */
0392     u16 num_xdp_txq;         /* Used XDP queues */
0393     u8 xdp_mapping_mode;         /* ICE_MAP_MODE_[CONTIG|SCATTER] */
0394 
0395     struct net_device **target_netdevs;
0396 
0397     struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
0398 
0399     /* Channel Specific Fields */
0400     struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC];
0401     u16 cnt_q_avail;
0402     u16 next_base_q;    /* next queue to be used for channel setup */
0403     struct list_head ch_list;
0404     u16 num_chnl_rxq;
0405     u16 num_chnl_txq;
0406     u16 ch_rss_size;
0407     u16 num_chnl_fltr;
0408     /* store away rss size info before configuring ADQ channels so that,
0409      * it can be used after tc-qdisc delete, to get back RSS setting as
0410      * they were before
0411      */
0412     u16 orig_rss_size;
0413     /* this keeps tracks of all enabled TC with and without DCB
0414      * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue
0415      * information
0416      */
0417     u8 all_numtc;
0418     u16 all_enatc;
0419 
0420     /* store away TC info, to be used for rebuild logic */
0421     u8 old_numtc;
0422     u16 old_ena_tc;
0423 
0424     struct ice_channel *ch;
0425 
0426     /* setup back reference, to which aggregator node this VSI
0427      * corresponds to
0428      */
0429     struct ice_agg_node *agg_node;
0430 } ____cacheline_internodealigned_in_smp;
0431 
0432 /* struct that defines an interrupt vector */
0433 struct ice_q_vector {
0434     struct ice_vsi *vsi;
0435 
0436     u16 v_idx;          /* index in the vsi->q_vector array. */
0437     u16 reg_idx;
0438     u8 num_ring_rx;         /* total number of Rx rings in vector */
0439     u8 num_ring_tx;         /* total number of Tx rings in vector */
0440     u8 wb_on_itr:1;         /* if true, WB on ITR is enabled */
0441     /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
0442      * value to the device
0443      */
0444     u8 intrl;
0445 
0446     struct napi_struct napi;
0447 
0448     struct ice_ring_container rx;
0449     struct ice_ring_container tx;
0450 
0451     cpumask_t affinity_mask;
0452     struct irq_affinity_notify affinity_notify;
0453 
0454     struct ice_channel *ch;
0455 
0456     char name[ICE_INT_NAME_STR_LEN];
0457 
0458     u16 total_events;   /* net_dim(): number of interrupts processed */
0459 } ____cacheline_internodealigned_in_smp;
0460 
0461 enum ice_pf_flags {
0462     ICE_FLAG_FLTR_SYNC,
0463     ICE_FLAG_RDMA_ENA,
0464     ICE_FLAG_RSS_ENA,
0465     ICE_FLAG_SRIOV_ENA,
0466     ICE_FLAG_SRIOV_CAPABLE,
0467     ICE_FLAG_DCB_CAPABLE,
0468     ICE_FLAG_DCB_ENA,
0469     ICE_FLAG_FD_ENA,
0470     ICE_FLAG_PTP_SUPPORTED,     /* PTP is supported by NVM */
0471     ICE_FLAG_PTP,           /* PTP is enabled by software */
0472     ICE_FLAG_ADV_FEATURES,
0473     ICE_FLAG_TC_MQPRIO,     /* support for Multi queue TC */
0474     ICE_FLAG_CLS_FLOWER,
0475     ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
0476     ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
0477     ICE_FLAG_NO_MEDIA,
0478     ICE_FLAG_FW_LLDP_AGENT,
0479     ICE_FLAG_MOD_POWER_UNSUPPORTED,
0480     ICE_FLAG_PHY_FW_LOAD_FAILED,
0481     ICE_FLAG_ETHTOOL_CTXT,      /* set when ethtool holds RTNL lock */
0482     ICE_FLAG_LEGACY_RX,
0483     ICE_FLAG_VF_TRUE_PROMISC_ENA,
0484     ICE_FLAG_MDD_AUTO_RESET_VF,
0485     ICE_FLAG_VF_VLAN_PRUNING,
0486     ICE_FLAG_LINK_LENIENT_MODE_ENA,
0487     ICE_FLAG_PLUG_AUX_DEV,
0488     ICE_FLAG_MTU_CHANGED,
0489     ICE_FLAG_GNSS,          /* GNSS successfully initialized */
0490     ICE_PF_FLAGS_NBITS      /* must be last */
0491 };
0492 
0493 struct ice_switchdev_info {
0494     struct ice_vsi *control_vsi;
0495     struct ice_vsi *uplink_vsi;
0496     bool is_running;
0497 };
0498 
0499 struct ice_agg_node {
0500     u32 agg_id;
0501 #define ICE_MAX_VSIS_IN_AGG_NODE    64
0502     u32 num_vsis;
0503     u8 valid;
0504 };
0505 
0506 struct ice_pf {
0507     struct pci_dev *pdev;
0508 
0509     struct devlink_region *nvm_region;
0510     struct devlink_region *sram_region;
0511     struct devlink_region *devcaps_region;
0512 
0513     /* devlink port data */
0514     struct devlink_port devlink_port;
0515 
0516     /* OS reserved IRQ details */
0517     struct msix_entry *msix_entries;
0518     struct ice_res_tracker *irq_tracker;
0519     /* First MSIX vector used by SR-IOV VFs. Calculated by subtracting the
0520      * number of MSIX vectors needed for all SR-IOV VFs from the number of
0521      * MSIX vectors allowed on this PF.
0522      */
0523     u16 sriov_base_vector;
0524 
0525     u16 ctrl_vsi_idx;       /* control VSI index in pf->vsi array */
0526 
0527     struct ice_vsi **vsi;       /* VSIs created by the driver */
0528     struct ice_sw *first_sw;    /* first switch created by firmware */
0529     u16 eswitch_mode;       /* current mode of eswitch */
0530     struct ice_vfs vfs;
0531     DECLARE_BITMAP(features, ICE_F_MAX);
0532     DECLARE_BITMAP(state, ICE_STATE_NBITS);
0533     DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
0534     unsigned long *avail_txqs;  /* bitmap to track PF Tx queue usage */
0535     unsigned long *avail_rxqs;  /* bitmap to track PF Rx queue usage */
0536     unsigned long serv_tmr_period;
0537     unsigned long serv_tmr_prev;
0538     struct timer_list serv_tmr;
0539     struct work_struct serv_task;
0540     struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
0541     struct mutex sw_mutex;      /* lock for protecting VSI alloc flow */
0542     struct mutex tc_mutex;      /* lock to protect TC changes */
0543     struct mutex adev_mutex;    /* lock to protect aux device access */
0544     u32 msg_enable;
0545     struct ice_ptp ptp;
0546     struct tty_driver *ice_gnss_tty_driver;
0547     struct tty_port *gnss_tty_port[ICE_GNSS_TTY_MINOR_DEVICES];
0548     struct gnss_serial *gnss_serial[ICE_GNSS_TTY_MINOR_DEVICES];
0549     u16 num_rdma_msix;      /* Total MSIX vectors for RDMA driver */
0550     u16 rdma_base_vector;
0551 
0552     /* spinlock to protect the AdminQ wait list */
0553     spinlock_t aq_wait_lock;
0554     struct hlist_head aq_wait_list;
0555     wait_queue_head_t aq_wait_queue;
0556     bool fw_emp_reset_disabled;
0557 
0558     wait_queue_head_t reset_wait_queue;
0559 
0560     u32 hw_csum_rx_error;
0561     u32 oicr_err_reg;
0562     u16 oicr_idx;       /* Other interrupt cause MSIX vector index */
0563     u16 num_avail_sw_msix;  /* remaining MSIX SW vectors left unclaimed */
0564     u16 max_pf_txqs;    /* Total Tx queues PF wide */
0565     u16 max_pf_rxqs;    /* Total Rx queues PF wide */
0566     u16 num_lan_msix;   /* Total MSIX vectors for base driver */
0567     u16 num_lan_tx;     /* num LAN Tx queues setup */
0568     u16 num_lan_rx;     /* num LAN Rx queues setup */
0569     u16 next_vsi;       /* Next free slot in pf->vsi[] - 0-based! */
0570     u16 num_alloc_vsi;
0571     u16 corer_count;    /* Core reset count */
0572     u16 globr_count;    /* Global reset count */
0573     u16 empr_count;     /* EMP reset count */
0574     u16 pfr_count;      /* PF reset count */
0575 
0576     u8 wol_ena : 1;     /* software state of WoL */
0577     u32 wakeup_reason;  /* last wakeup reason */
0578     struct ice_hw_port_stats stats;
0579     struct ice_hw_port_stats stats_prev;
0580     struct ice_hw hw;
0581     u8 stat_prev_loaded:1; /* has previous stats been loaded */
0582     u8 rdma_mode;
0583     u16 dcbx_cap;
0584     u32 tx_timeout_count;
0585     unsigned long tx_timeout_last_recovery;
0586     u32 tx_timeout_recovery_level;
0587     char int_name[ICE_INT_NAME_STR_LEN];
0588     struct auxiliary_device *adev;
0589     int aux_idx;
0590     u32 sw_int_count;
0591     /* count of tc_flower filters specific to channel (aka where filter
0592      * action is "hw_tc <tc_num>")
0593      */
0594     u16 num_dmac_chnl_fltrs;
0595     struct hlist_head tc_flower_fltr_list;
0596 
0597     __le64 nvm_phy_type_lo; /* NVM PHY type low */
0598     __le64 nvm_phy_type_hi; /* NVM PHY type high */
0599     struct ice_link_default_override_tlv link_dflt_override;
0600     struct ice_lag *lag; /* Link Aggregation information */
0601 
0602     struct ice_switchdev_info switchdev;
0603 
0604 #define ICE_INVALID_AGG_NODE_ID     0
0605 #define ICE_PF_AGG_NODE_ID_START    1
0606 #define ICE_MAX_PF_AGG_NODES        32
0607     struct ice_agg_node pf_agg_node[ICE_MAX_PF_AGG_NODES];
0608 #define ICE_VF_AGG_NODE_ID_START    65
0609 #define ICE_MAX_VF_AGG_NODES        32
0610     struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
0611 };
0612 
0613 struct ice_netdev_priv {
0614     struct ice_vsi *vsi;
0615     struct ice_repr *repr;
0616     /* indirect block callbacks on registered higher level devices
0617      * (e.g. tunnel devices)
0618      *
0619      * tc_indr_block_cb_priv_list is used to look up indirect callback
0620      * private data
0621      */
0622     struct list_head tc_indr_block_priv_list;
0623 };
0624 
0625 /**
0626  * ice_vector_ch_enabled
0627  * @qv: pointer to q_vector, can be NULL
0628  *
0629  * This function returns true if vector is channel enabled otherwise false
0630  */
0631 static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
0632 {
0633     return !!qv->ch; /* Enable it to run with TC */
0634 }
0635 
0636 /**
0637  * ice_irq_dynamic_ena - Enable default interrupt generation settings
0638  * @hw: pointer to HW struct
0639  * @vsi: pointer to VSI struct, can be NULL
0640  * @q_vector: pointer to q_vector, can be NULL
0641  */
0642 static inline void
0643 ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
0644             struct ice_q_vector *q_vector)
0645 {
0646     u32 vector = (vsi && q_vector) ? q_vector->reg_idx :
0647                 ((struct ice_pf *)hw->back)->oicr_idx;
0648     int itr = ICE_ITR_NONE;
0649     u32 val;
0650 
0651     /* clear the PBA here, as this function is meant to clean out all
0652      * previous interrupts and enable the interrupt
0653      */
0654     val = GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
0655           (itr << GLINT_DYN_CTL_ITR_INDX_S);
0656     if (vsi)
0657         if (test_bit(ICE_VSI_DOWN, vsi->state))
0658             return;
0659     wr32(hw, GLINT_DYN_CTL(vector), val);
0660 }
0661 
0662 /**
0663  * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev
0664  * @netdev: pointer to the netdev struct
0665  */
0666 static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
0667 {
0668     struct ice_netdev_priv *np = netdev_priv(netdev);
0669 
0670     return np->vsi->back;
0671 }
0672 
0673 static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
0674 {
0675     return !!READ_ONCE(vsi->xdp_prog);
0676 }
0677 
0678 static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
0679 {
0680     ring->flags |= ICE_TX_FLAGS_RING_XDP;
0681 }
0682 
0683 /**
0684  * ice_xsk_pool - get XSK buffer pool bound to a ring
0685  * @ring: Rx ring to use
0686  *
0687  * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
0688  * present, NULL otherwise.
0689  */
0690 static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
0691 {
0692     struct ice_vsi *vsi = ring->vsi;
0693     u16 qid = ring->q_index;
0694 
0695     if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
0696         return NULL;
0697 
0698     return xsk_get_pool_from_qid(vsi->netdev, qid);
0699 }
0700 
0701 /**
0702  * ice_tx_xsk_pool - assign XSK buff pool to XDP ring
0703  * @vsi: pointer to VSI
0704  * @qid: index of a queue to look at XSK buff pool presence
0705  *
0706  * Sets XSK buff pool pointer on XDP ring.
0707  *
0708  * XDP ring is picked from Rx ring, whereas Rx ring is picked based on provided
0709  * queue id. Reason for doing so is that queue vectors might have assigned more
0710  * than one XDP ring, e.g. when user reduced the queue count on netdev; Rx ring
0711  * carries a pointer to one of these XDP rings for its own purposes, such as
0712  * handling XDP_TX action, therefore we can piggyback here on the
0713  * rx_ring->xdp_ring assignment that was done during XDP rings initialization.
0714  */
0715 static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
0716 {
0717     struct ice_tx_ring *ring;
0718 
0719     ring = vsi->rx_rings[qid]->xdp_ring;
0720     if (!ring)
0721         return;
0722 
0723     if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
0724         ring->xsk_pool = NULL;
0725         return;
0726     }
0727 
0728     ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
0729 }
0730 
0731 /**
0732  * ice_get_main_vsi - Get the PF VSI
0733  * @pf: PF instance
0734  *
0735  * returns pf->vsi[0], which by definition is the PF VSI
0736  */
0737 static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
0738 {
0739     if (pf->vsi)
0740         return pf->vsi[0];
0741 
0742     return NULL;
0743 }
0744 
0745 /**
0746  * ice_get_netdev_priv_vsi - return VSI associated with netdev priv.
0747  * @np: private netdev structure
0748  */
0749 static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np)
0750 {
0751     /* In case of port representor return source port VSI. */
0752     if (np->repr)
0753         return np->repr->src_vsi;
0754     else
0755         return np->vsi;
0756 }
0757 
0758 /**
0759  * ice_get_ctrl_vsi - Get the control VSI
0760  * @pf: PF instance
0761  */
0762 static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
0763 {
0764     /* if pf->ctrl_vsi_idx is ICE_NO_VSI, control VSI was not set up */
0765     if (!pf->vsi || pf->ctrl_vsi_idx == ICE_NO_VSI)
0766         return NULL;
0767 
0768     return pf->vsi[pf->ctrl_vsi_idx];
0769 }
0770 
0771 /**
0772  * ice_find_vsi - Find the VSI from VSI ID
0773  * @pf: The PF pointer to search in
0774  * @vsi_num: The VSI ID to search for
0775  */
0776 static inline struct ice_vsi *ice_find_vsi(struct ice_pf *pf, u16 vsi_num)
0777 {
0778     int i;
0779 
0780     ice_for_each_vsi(pf, i)
0781         if (pf->vsi[i] && pf->vsi[i]->vsi_num == vsi_num)
0782             return  pf->vsi[i];
0783     return NULL;
0784 }
0785 
0786 /**
0787  * ice_is_switchdev_running - check if switchdev is configured
0788  * @pf: pointer to PF structure
0789  *
0790  * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV
0791  * and switchdev is configured, false otherwise.
0792  */
0793 static inline bool ice_is_switchdev_running(struct ice_pf *pf)
0794 {
0795     return pf->switchdev.is_running;
0796 }
0797 
0798 /**
0799  * ice_set_sriov_cap - enable SRIOV in PF flags
0800  * @pf: PF struct
0801  */
0802 static inline void ice_set_sriov_cap(struct ice_pf *pf)
0803 {
0804     if (pf->hw.func_caps.common_cap.sr_iov_1_1)
0805         set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
0806 }
0807 
0808 /**
0809  * ice_clear_sriov_cap - disable SRIOV in PF flags
0810  * @pf: PF struct
0811  */
0812 static inline void ice_clear_sriov_cap(struct ice_pf *pf)
0813 {
0814     clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
0815 }
0816 
0817 #define ICE_FD_STAT_CTR_BLOCK_COUNT 256
0818 #define ICE_FD_STAT_PF_IDX(base_idx) \
0819             ((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
0820 #define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
0821 #define ICE_FD_STAT_CH          1
0822 #define ICE_FD_CH_STAT_IDX(base_idx) \
0823             (ICE_FD_STAT_PF_IDX(base_idx) + ICE_FD_STAT_CH)
0824 
0825 /**
0826  * ice_is_adq_active - any active ADQs
0827  * @pf: pointer to PF
0828  *
0829  * This function returns true if there are any ADQs configured (which is
0830  * determined by looking at VSI type (which should be VSI_PF), numtc, and
0831  * TC_MQPRIO flag) otherwise return false
0832  */
0833 static inline bool ice_is_adq_active(struct ice_pf *pf)
0834 {
0835     struct ice_vsi *vsi;
0836 
0837     vsi = ice_get_main_vsi(pf);
0838     if (!vsi)
0839         return false;
0840 
0841     /* is ADQ configured */
0842     if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC &&
0843         test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
0844         return true;
0845 
0846     return false;
0847 }
0848 
0849 bool netif_is_ice(struct net_device *dev);
0850 int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
0851 int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
0852 int ice_vsi_open_ctrl(struct ice_vsi *vsi);
0853 int ice_vsi_open(struct ice_vsi *vsi);
0854 void ice_set_ethtool_ops(struct net_device *netdev);
0855 void ice_set_ethtool_repr_ops(struct net_device *netdev);
0856 void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
0857 u16 ice_get_avail_txq_count(struct ice_pf *pf);
0858 u16 ice_get_avail_rxq_count(struct ice_pf *pf);
0859 int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx);
0860 void ice_update_vsi_stats(struct ice_vsi *vsi);
0861 void ice_update_pf_stats(struct ice_pf *pf);
0862 void
0863 ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
0864                  struct ice_q_stats stats, u64 *pkts, u64 *bytes);
0865 int ice_up(struct ice_vsi *vsi);
0866 int ice_down(struct ice_vsi *vsi);
0867 int ice_vsi_cfg(struct ice_vsi *vsi);
0868 struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
0869 int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
0870 int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
0871 int ice_destroy_xdp_rings(struct ice_vsi *vsi);
0872 int
0873 ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
0874          u32 flags);
0875 int ice_set_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
0876 int ice_get_rss_lut(struct ice_vsi *vsi, u8 *lut, u16 lut_size);
0877 int ice_set_rss_key(struct ice_vsi *vsi, u8 *seed);
0878 int ice_get_rss_key(struct ice_vsi *vsi, u8 *seed);
0879 void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
0880 int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
0881 void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
0882 int ice_plug_aux_dev(struct ice_pf *pf);
0883 void ice_unplug_aux_dev(struct ice_pf *pf);
0884 int ice_init_rdma(struct ice_pf *pf);
0885 const char *ice_aq_str(enum ice_aq_err aq_err);
0886 bool ice_is_wol_supported(struct ice_hw *hw);
0887 void ice_fdir_del_all_fltrs(struct ice_vsi *vsi);
0888 int
0889 ice_fdir_write_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input, bool add,
0890             bool is_tun);
0891 void ice_vsi_manage_fdir(struct ice_vsi *vsi, bool ena);
0892 int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
0893 int ice_del_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd);
0894 int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd);
0895 int
0896 ice_get_fdir_fltr_ids(struct ice_hw *hw, struct ethtool_rxnfc *cmd,
0897               u32 *rule_locs);
0898 void ice_fdir_rem_adq_chnl(struct ice_hw *hw, u16 vsi_idx);
0899 void ice_fdir_release_flows(struct ice_hw *hw);
0900 void ice_fdir_replay_flows(struct ice_hw *hw);
0901 void ice_fdir_replay_fltrs(struct ice_pf *pf);
0902 int ice_fdir_create_dflt_rules(struct ice_pf *pf);
0903 int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
0904               struct ice_rq_event_info *event);
0905 int ice_open(struct net_device *netdev);
0906 int ice_open_internal(struct net_device *netdev);
0907 int ice_stop(struct net_device *netdev);
0908 void ice_service_task_schedule(struct ice_pf *pf);
0909 
0910 /**
0911  * ice_set_rdma_cap - enable RDMA support
0912  * @pf: PF struct
0913  */
0914 static inline void ice_set_rdma_cap(struct ice_pf *pf)
0915 {
0916     if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
0917         set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
0918         set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
0919     }
0920 }
0921 
0922 /**
0923  * ice_clear_rdma_cap - disable RDMA support
0924  * @pf: PF struct
0925  */
0926 static inline void ice_clear_rdma_cap(struct ice_pf *pf)
0927 {
0928     /* We can directly unplug aux device here only if the flag bit
0929      * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev()
0930      * could race with ice_plug_aux_dev() called from
0931      * ice_service_task(). In this case we only clear that bit now and
0932      * aux device will be unplugged later once ice_plug_aux_device()
0933      * called from ice_service_task() finishes (see ice_service_task()).
0934      */
0935     if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
0936         ice_unplug_aux_dev(pf);
0937 
0938     clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
0939 }
0940 #endif /* _ICE_H_ */