Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * Copyright (C) 2015 Cavium, Inc.
0004  */
0005 
0006 #ifndef NIC_H
0007 #define NIC_H
0008 
0009 #include <linux/netdevice.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/pci.h>
0012 #include "thunder_bgx.h"
0013 
0014 /* PCI device IDs */
0015 #define PCI_DEVICE_ID_THUNDER_NIC_PF        0xA01E
0016 #define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF  0x0011
0017 #define PCI_DEVICE_ID_THUNDER_NIC_VF        0xA034
0018 #define PCI_DEVICE_ID_THUNDER_BGX       0xA026
0019 
0020 /* Subsystem device IDs */
0021 #define PCI_SUBSYS_DEVID_88XX_NIC_PF        0xA11E
0022 #define PCI_SUBSYS_DEVID_81XX_NIC_PF        0xA21E
0023 #define PCI_SUBSYS_DEVID_83XX_NIC_PF        0xA31E
0024 
0025 #define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF  0xA11E
0026 #define PCI_SUBSYS_DEVID_88XX_NIC_VF        0xA134
0027 #define PCI_SUBSYS_DEVID_81XX_NIC_VF        0xA234
0028 #define PCI_SUBSYS_DEVID_83XX_NIC_VF        0xA334
0029 
0030 
0031 /* PCI BAR nos */
0032 #define PCI_CFG_REG_BAR_NUM     0
0033 #define PCI_MSIX_REG_BAR_NUM        4
0034 
0035 /* NIC SRIOV VF count */
0036 #define MAX_NUM_VFS_SUPPORTED       128
0037 #define DEFAULT_NUM_VF_ENABLED      8
0038 
0039 #define NIC_TNS_BYPASS_MODE     0
0040 #define NIC_TNS_MODE            1
0041 
0042 /* NIC priv flags */
0043 #define NIC_SRIOV_ENABLED       BIT(0)
0044 
0045 /* Min/Max packet size */
0046 #define NIC_HW_MIN_FRS          64
0047 #define NIC_HW_MAX_FRS          9190 /* Excluding L2 header and FCS */
0048 
0049 /* Max pkinds */
0050 #define NIC_MAX_PKIND           16
0051 
0052 /* Max when CPI_ALG is IP diffserv */
0053 #define NIC_MAX_CPI_PER_LMAC        64
0054 
0055 /* NIC VF Interrupts */
0056 #define NICVF_INTR_CQ           0
0057 #define NICVF_INTR_SQ           1
0058 #define NICVF_INTR_RBDR         2
0059 #define NICVF_INTR_PKT_DROP     3
0060 #define NICVF_INTR_TCP_TIMER        4
0061 #define NICVF_INTR_MBOX         5
0062 #define NICVF_INTR_QS_ERR       6
0063 
0064 #define NICVF_INTR_CQ_SHIFT     0
0065 #define NICVF_INTR_SQ_SHIFT     8
0066 #define NICVF_INTR_RBDR_SHIFT       16
0067 #define NICVF_INTR_PKT_DROP_SHIFT   20
0068 #define NICVF_INTR_TCP_TIMER_SHIFT  21
0069 #define NICVF_INTR_MBOX_SHIFT       22
0070 #define NICVF_INTR_QS_ERR_SHIFT     23
0071 
0072 #define NICVF_INTR_CQ_MASK      (0xFF << NICVF_INTR_CQ_SHIFT)
0073 #define NICVF_INTR_SQ_MASK      (0xFF << NICVF_INTR_SQ_SHIFT)
0074 #define NICVF_INTR_RBDR_MASK        (0x03 << NICVF_INTR_RBDR_SHIFT)
0075 #define NICVF_INTR_PKT_DROP_MASK    BIT(NICVF_INTR_PKT_DROP_SHIFT)
0076 #define NICVF_INTR_TCP_TIMER_MASK   BIT(NICVF_INTR_TCP_TIMER_SHIFT)
0077 #define NICVF_INTR_MBOX_MASK        BIT(NICVF_INTR_MBOX_SHIFT)
0078 #define NICVF_INTR_QS_ERR_MASK      BIT(NICVF_INTR_QS_ERR_SHIFT)
0079 
0080 /* MSI-X interrupts */
0081 #define NIC_PF_MSIX_VECTORS     10
0082 #define NIC_VF_MSIX_VECTORS     20
0083 
0084 #define NIC_PF_INTR_ID_ECC0_SBE     0
0085 #define NIC_PF_INTR_ID_ECC0_DBE     1
0086 #define NIC_PF_INTR_ID_ECC1_SBE     2
0087 #define NIC_PF_INTR_ID_ECC1_DBE     3
0088 #define NIC_PF_INTR_ID_ECC2_SBE     4
0089 #define NIC_PF_INTR_ID_ECC2_DBE     5
0090 #define NIC_PF_INTR_ID_ECC3_SBE     6
0091 #define NIC_PF_INTR_ID_ECC3_DBE     7
0092 #define NIC_PF_INTR_ID_MBOX0        8
0093 #define NIC_PF_INTR_ID_MBOX1        9
0094 
0095 /* Minimum FIFO level before all packets for the CQ are dropped
0096  *
0097  * This value ensures that once a packet has been "accepted"
0098  * for reception it will not get dropped due to non-availability
0099  * of CQ descriptor. An errata in HW mandates this value to be
0100  * atleast 0x100.
0101  */
0102 #define NICPF_CQM_MIN_DROP_LEVEL       0x100
0103 
0104 /* Global timer for CQ timer thresh interrupts
0105  * Calculated for SCLK of 700Mhz
0106  * value written should be a 1/16th of what is expected
0107  *
0108  * 1 tick per 0.025usec
0109  */
0110 #define NICPF_CLK_PER_INT_TICK      1
0111 
0112 /* Time to wait before we decide that a SQ is stuck.
0113  *
0114  * Since both pkt rx and tx notifications are done with same CQ,
0115  * when packets are being received at very high rate (eg: L2 forwarding)
0116  * then freeing transmitted skbs will be delayed and watchdog
0117  * will kick in, resetting interface. Hence keeping this value high.
0118  */
0119 #define NICVF_TX_TIMEOUT        (50 * HZ)
0120 
0121 struct nicvf_cq_poll {
0122     struct  nicvf *nicvf;
0123     u8  cq_idx;     /* Completion queue index */
0124     struct  napi_struct napi;
0125 };
0126 
0127 #define NIC_MAX_RSS_HASH_BITS       8
0128 #define NIC_MAX_RSS_IDR_TBL_SIZE    (1 << NIC_MAX_RSS_HASH_BITS)
0129 #define RSS_HASH_KEY_SIZE       5 /* 320 bit key */
0130 
0131 struct nicvf_rss_info {
0132     bool enable;
0133 #define RSS_L2_EXTENDED_HASH_ENA    BIT(0)
0134 #define RSS_IP_HASH_ENA         BIT(1)
0135 #define RSS_TCP_HASH_ENA        BIT(2)
0136 #define RSS_TCP_SYN_DIS         BIT(3)
0137 #define RSS_UDP_HASH_ENA        BIT(4)
0138 #define RSS_L4_EXTENDED_HASH_ENA    BIT(5)
0139 #define RSS_ROCE_ENA            BIT(6)
0140 #define RSS_L3_BI_DIRECTION_ENA     BIT(7)
0141 #define RSS_L4_BI_DIRECTION_ENA     BIT(8)
0142     u64 cfg;
0143     u8  hash_bits;
0144     u16 rss_size;
0145     u8  ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
0146     u64 key[RSS_HASH_KEY_SIZE];
0147 } ____cacheline_aligned_in_smp;
0148 
0149 struct nicvf_pfc {
0150     u8    autoneg;
0151     u8    fc_rx;
0152     u8    fc_tx;
0153 };
0154 
0155 enum rx_stats_reg_offset {
0156     RX_OCTS = 0x0,
0157     RX_UCAST = 0x1,
0158     RX_BCAST = 0x2,
0159     RX_MCAST = 0x3,
0160     RX_RED = 0x4,
0161     RX_RED_OCTS = 0x5,
0162     RX_ORUN = 0x6,
0163     RX_ORUN_OCTS = 0x7,
0164     RX_FCS = 0x8,
0165     RX_L2ERR = 0x9,
0166     RX_DRP_BCAST = 0xa,
0167     RX_DRP_MCAST = 0xb,
0168     RX_DRP_L3BCAST = 0xc,
0169     RX_DRP_L3MCAST = 0xd,
0170     RX_STATS_ENUM_LAST,
0171 };
0172 
0173 enum tx_stats_reg_offset {
0174     TX_OCTS = 0x0,
0175     TX_UCAST = 0x1,
0176     TX_BCAST = 0x2,
0177     TX_MCAST = 0x3,
0178     TX_DROP = 0x4,
0179     TX_STATS_ENUM_LAST,
0180 };
0181 
0182 struct nicvf_hw_stats {
0183     u64 rx_bytes;
0184     u64 rx_frames;
0185     u64 rx_ucast_frames;
0186     u64 rx_bcast_frames;
0187     u64 rx_mcast_frames;
0188     u64 rx_drops;
0189     u64 rx_drop_red;
0190     u64 rx_drop_red_bytes;
0191     u64 rx_drop_overrun;
0192     u64 rx_drop_overrun_bytes;
0193     u64 rx_drop_bcast;
0194     u64 rx_drop_mcast;
0195     u64 rx_drop_l3_bcast;
0196     u64 rx_drop_l3_mcast;
0197     u64 rx_fcs_errors;
0198     u64 rx_l2_errors;
0199 
0200     u64 tx_bytes;
0201     u64 tx_frames;
0202     u64 tx_ucast_frames;
0203     u64 tx_bcast_frames;
0204     u64 tx_mcast_frames;
0205     u64 tx_drops;
0206 };
0207 
0208 struct nicvf_drv_stats {
0209     /* CQE Rx errs */
0210     u64 rx_bgx_truncated_pkts;
0211     u64 rx_jabber_errs;
0212     u64 rx_fcs_errs;
0213     u64 rx_bgx_errs;
0214     u64 rx_prel2_errs;
0215     u64 rx_l2_hdr_malformed;
0216     u64 rx_oversize;
0217     u64 rx_undersize;
0218     u64 rx_l2_len_mismatch;
0219     u64 rx_l2_pclp;
0220     u64 rx_ip_ver_errs;
0221     u64 rx_ip_csum_errs;
0222     u64 rx_ip_hdr_malformed;
0223     u64 rx_ip_payload_malformed;
0224     u64 rx_ip_ttl_errs;
0225     u64 rx_l3_pclp;
0226     u64 rx_l4_malformed;
0227     u64 rx_l4_csum_errs;
0228     u64 rx_udp_len_errs;
0229     u64 rx_l4_port_errs;
0230     u64 rx_tcp_flag_errs;
0231     u64 rx_tcp_offset_errs;
0232     u64 rx_l4_pclp;
0233     u64 rx_truncated_pkts;
0234 
0235     /* CQE Tx errs */
0236     u64 tx_desc_fault;
0237     u64 tx_hdr_cons_err;
0238     u64 tx_subdesc_err;
0239     u64 tx_max_size_exceeded;
0240     u64 tx_imm_size_oflow;
0241     u64 tx_data_seq_err;
0242     u64 tx_mem_seq_err;
0243     u64 tx_lock_viol;
0244     u64 tx_data_fault;
0245     u64 tx_tstmp_conflict;
0246     u64 tx_tstmp_timeout;
0247     u64 tx_mem_fault;
0248     u64 tx_csum_overlap;
0249     u64 tx_csum_overflow;
0250 
0251     /* driver debug stats */
0252     u64 tx_tso;
0253     u64 tx_timeout;
0254     u64 txq_stop;
0255     u64 txq_wake;
0256 
0257     u64 rcv_buffer_alloc_failures;
0258     u64 page_alloc;
0259 
0260     struct u64_stats_sync   syncp;
0261 };
0262 
0263 struct cavium_ptp;
0264 
0265 struct xcast_addr_list {
0266     int              count;
0267     u64              mc[];
0268 };
0269 
0270 struct nicvf_work {
0271     struct work_struct     work;
0272     u8                     mode;
0273     struct xcast_addr_list *mc;
0274 };
0275 
0276 struct nicvf {
0277     struct nicvf        *pnicvf;
0278     struct net_device   *netdev;
0279     struct pci_dev      *pdev;
0280     void __iomem        *reg_base;
0281     struct bpf_prog         *xdp_prog;
0282 #define MAX_QUEUES_PER_QSET         8
0283     struct queue_set    *qs;
0284     void            *iommu_domain;
0285     u8          vf_id;
0286     u8          sqs_id;
0287     bool                    sqs_mode;
0288     bool            hw_tso;
0289     bool            t88;
0290 
0291     /* Receive buffer alloc */
0292     u32         rb_page_offset;
0293     u16         rb_pageref;
0294     bool            rb_alloc_fail;
0295     bool            rb_work_scheduled;
0296     struct page     *rb_page;
0297     struct delayed_work rbdr_work;
0298     struct tasklet_struct   rbdr_task;
0299 
0300     /* Secondary Qset */
0301     u8          sqs_count;
0302 #define MAX_SQS_PER_VF_SINGLE_NODE      5
0303 #define MAX_SQS_PER_VF              11
0304     struct nicvf        *snicvf[MAX_SQS_PER_VF];
0305 
0306     /* Queue count */
0307     u8          rx_queues;
0308     u8          tx_queues;
0309     u8          xdp_tx_queues;
0310     u8          max_queues;
0311 
0312     u8          node;
0313     u8          cpi_alg;
0314     bool            link_up;
0315     u8          mac_type;
0316     u8          duplex;
0317     u32         speed;
0318     bool            tns_mode;
0319     bool            loopback_supported;
0320     struct nicvf_rss_info   rss_info;
0321     struct nicvf_pfc    pfc;
0322     struct tasklet_struct   qs_err_task;
0323     struct work_struct  reset_task;
0324     struct nicvf_work       rx_mode_work;
0325     /* spinlock to protect workqueue arguments from concurrent access */
0326     spinlock_t              rx_mode_wq_lock;
0327     /* workqueue for handling kernel ndo_set_rx_mode() calls */
0328     struct workqueue_struct *nicvf_rx_mode_wq;
0329     /* mutex to protect VF's mailbox contents from concurrent access */
0330     struct mutex            rx_mode_mtx;
0331     struct delayed_work link_change_work;
0332     /* PTP timestamp */
0333     struct cavium_ptp   *ptp_clock;
0334     /* Inbound timestamping is on */
0335     bool            hw_rx_tstamp;
0336     /* When the packet that requires timestamping is sent, hardware inserts
0337      * two entries to the completion queue.  First is the regular
0338      * CQE_TYPE_SEND entry that signals that the packet was sent.
0339      * The second is CQE_TYPE_SEND_PTP that contains the actual timestamp
0340      * for that packet.
0341      * `ptp_skb` is initialized in the handler for the CQE_TYPE_SEND
0342      * entry and is used and zeroed in the handler for the CQE_TYPE_SEND_PTP
0343      * entry.
0344      * So `ptp_skb` is used to hold the pointer to the packet between
0345      * the calls to CQE_TYPE_SEND and CQE_TYPE_SEND_PTP handlers.
0346      */
0347     struct sk_buff      *ptp_skb;
0348     /* `tx_ptp_skbs` is set when the hardware is sending a packet that
0349      * requires timestamping.  Cavium hardware can not process more than one
0350      * such packet at once so this is set each time the driver submits
0351      * a packet that requires timestamping to the send queue and clears
0352      * each time it receives the entry on the completion queue saying
0353      * that such packet was sent.
0354      * So `tx_ptp_skbs` prevents driver from submitting more than one
0355      * packet that requires timestamping to the hardware for transmitting.
0356      */
0357     atomic_t        tx_ptp_skbs;
0358 
0359     /* Interrupt coalescing settings */
0360     u32         cq_coalesce_usecs;
0361     u32         msg_enable;
0362 
0363     /* Stats */
0364     struct nicvf_hw_stats   hw_stats;
0365     struct nicvf_drv_stats  __percpu *drv_stats;
0366     struct bgx_stats    bgx_stats;
0367 
0368     /* Napi */
0369     struct nicvf_cq_poll    *napi[8];
0370 
0371     /* MSI-X  */
0372     u8          num_vec;
0373     char            irq_name[NIC_VF_MSIX_VECTORS][IFNAMSIZ + 15];
0374     bool            irq_allocated[NIC_VF_MSIX_VECTORS];
0375     cpumask_var_t       affinity_mask[NIC_VF_MSIX_VECTORS];
0376 
0377     /* VF <-> PF mailbox communication */
0378     bool            pf_acked;
0379     bool            pf_nacked;
0380     bool            set_mac_pending;
0381 } ____cacheline_aligned_in_smp;
0382 
0383 /* PF <--> VF Mailbox communication
0384  * Eight 64bit registers are shared between PF and VF.
0385  * Separate set for each VF.
0386  * Writing '1' into last register mbx7 means end of message.
0387  */
0388 
0389 /* PF <--> VF mailbox communication */
0390 #define NIC_PF_VF_MAILBOX_SIZE      2
0391 #define NIC_MBOX_MSG_TIMEOUT        2000 /* ms */
0392 
0393 /* Mailbox message types */
0394 #define NIC_MBOX_MSG_READY      0x01    /* Is PF ready to rcv msgs */
0395 #define NIC_MBOX_MSG_ACK        0x02    /* ACK the message received */
0396 #define NIC_MBOX_MSG_NACK       0x03    /* NACK the message received */
0397 #define NIC_MBOX_MSG_QS_CFG     0x04    /* Configure Qset */
0398 #define NIC_MBOX_MSG_RQ_CFG     0x05    /* Configure receive queue */
0399 #define NIC_MBOX_MSG_SQ_CFG     0x06    /* Configure Send queue */
0400 #define NIC_MBOX_MSG_RQ_DROP_CFG    0x07    /* Configure receive queue */
0401 #define NIC_MBOX_MSG_SET_MAC        0x08    /* Add MAC ID to DMAC filter */
0402 #define NIC_MBOX_MSG_SET_MAX_FRS    0x09    /* Set max frame size */
0403 #define NIC_MBOX_MSG_CPI_CFG        0x0A    /* Config CPI, RSSI */
0404 #define NIC_MBOX_MSG_RSS_SIZE       0x0B    /* Get RSS indir_tbl size */
0405 #define NIC_MBOX_MSG_RSS_CFG        0x0C    /* Config RSS table */
0406 #define NIC_MBOX_MSG_RSS_CFG_CONT   0x0D    /* RSS config continuation */
0407 #define NIC_MBOX_MSG_RQ_BP_CFG      0x0E    /* RQ backpressure config */
0408 #define NIC_MBOX_MSG_RQ_SW_SYNC     0x0F    /* Flush inflight pkts to RQ */
0409 #define NIC_MBOX_MSG_BGX_STATS      0x10    /* Get stats from BGX */
0410 #define NIC_MBOX_MSG_BGX_LINK_CHANGE    0x11    /* BGX:LMAC link status */
0411 #define NIC_MBOX_MSG_ALLOC_SQS      0x12    /* Allocate secondary Qset */
0412 #define NIC_MBOX_MSG_NICVF_PTR      0x13    /* Send nicvf ptr to PF */
0413 #define NIC_MBOX_MSG_PNICVF_PTR     0x14    /* Get primary qset nicvf ptr */
0414 #define NIC_MBOX_MSG_SNICVF_PTR     0x15    /* Send sqet nicvf ptr to PVF */
0415 #define NIC_MBOX_MSG_LOOPBACK       0x16    /* Set interface in loopback */
0416 #define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17    /* Reset statistics counters */
0417 #define NIC_MBOX_MSG_PFC        0x18    /* Pause frame control */
0418 #define NIC_MBOX_MSG_PTP_CFG        0x19    /* HW packet timestamp */
0419 #define NIC_MBOX_MSG_CFG_DONE       0xF0    /* VF configuration done */
0420 #define NIC_MBOX_MSG_SHUTDOWN       0xF1    /* VF is being shutdown */
0421 #define NIC_MBOX_MSG_RESET_XCAST    0xF2    /* Reset DCAM filtering mode */
0422 #define NIC_MBOX_MSG_ADD_MCAST      0xF3    /* Add MAC to DCAM filters */
0423 #define NIC_MBOX_MSG_SET_XCAST      0xF4    /* Set MCAST/BCAST RX mode */
0424 
0425 struct nic_cfg_msg {
0426     u8    msg;
0427     u8    vf_id;
0428     u8    node_id;
0429     u8    tns_mode:1;
0430     u8    sqs_mode:1;
0431     u8    loopback_supported:1;
0432     u8    mac_addr[ETH_ALEN];
0433 };
0434 
0435 /* Qset configuration */
0436 struct qs_cfg_msg {
0437     u8    msg;
0438     u8    num;
0439     u8    sqs_count;
0440     u64   cfg;
0441 };
0442 
0443 /* Receive queue configuration */
0444 struct rq_cfg_msg {
0445     u8    msg;
0446     u8    qs_num;
0447     u8    rq_num;
0448     u64   cfg;
0449 };
0450 
0451 /* Send queue configuration */
0452 struct sq_cfg_msg {
0453     u8    msg;
0454     u8    qs_num;
0455     u8    sq_num;
0456     bool  sqs_mode;
0457     u64   cfg;
0458 };
0459 
0460 /* Set VF's MAC address */
0461 struct set_mac_msg {
0462     u8    msg;
0463     u8    vf_id;
0464     u8    mac_addr[ETH_ALEN];
0465 };
0466 
0467 /* Set Maximum frame size */
0468 struct set_frs_msg {
0469     u8    msg;
0470     u8    vf_id;
0471     u16   max_frs;
0472 };
0473 
0474 /* Set CPI algorithm type */
0475 struct cpi_cfg_msg {
0476     u8    msg;
0477     u8    vf_id;
0478     u8    rq_cnt;
0479     u8    cpi_alg;
0480 };
0481 
0482 /* Get RSS table size */
0483 struct rss_sz_msg {
0484     u8    msg;
0485     u8    vf_id;
0486     u16   ind_tbl_size;
0487 };
0488 
0489 /* Set RSS configuration */
0490 struct rss_cfg_msg {
0491     u8    msg;
0492     u8    vf_id;
0493     u8    hash_bits;
0494     u8    tbl_len;
0495     u8    tbl_offset;
0496 #define RSS_IND_TBL_LEN_PER_MBX_MSG 8
0497     u8    ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
0498 };
0499 
0500 struct bgx_stats_msg {
0501     u8    msg;
0502     u8    vf_id;
0503     u8    rx;
0504     u8    idx;
0505     u64   stats;
0506 };
0507 
0508 /* Physical interface link status */
0509 struct bgx_link_status {
0510     u8    msg;
0511     u8    mac_type;
0512     u8    link_up;
0513     u8    duplex;
0514     u32   speed;
0515 };
0516 
0517 /* Get Extra Qset IDs */
0518 struct sqs_alloc {
0519     u8    msg;
0520     u8    vf_id;
0521     u8    qs_count;
0522 };
0523 
0524 struct nicvf_ptr {
0525     u8    msg;
0526     u8    vf_id;
0527     bool  sqs_mode;
0528     u8    sqs_id;
0529     u64   nicvf;
0530 };
0531 
0532 /* Set interface in loopback mode */
0533 struct set_loopback {
0534     u8    msg;
0535     u8    vf_id;
0536     bool  enable;
0537 };
0538 
0539 /* Reset statistics counters */
0540 struct reset_stat_cfg {
0541     u8    msg;
0542     /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */
0543     u16   rx_stat_mask;
0544     /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */
0545     u8    tx_stat_mask;
0546     /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1)
0547      * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1)
0548      * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1)
0549      * ..
0550      * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1)
0551      * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1)
0552      */
0553     u16   rq_stat_mask;
0554     /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1)
0555      * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1)
0556      * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1)
0557      * ..
0558      * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1)
0559      * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1)
0560      */
0561     u16   sq_stat_mask;
0562 };
0563 
0564 struct pfc {
0565     u8    msg;
0566     u8    get; /* Get or set PFC settings */
0567     u8    autoneg;
0568     u8    fc_rx;
0569     u8    fc_tx;
0570 };
0571 
0572 struct set_ptp {
0573     u8    msg;
0574     bool  enable;
0575 };
0576 
0577 struct xcast {
0578     u8    msg;
0579     u8    mode;
0580     u64   mac:48;
0581 };
0582 
0583 /* 128 bit shared memory between PF and each VF */
0584 union nic_mbx {
0585     struct { u8 msg; }  msg;
0586     struct nic_cfg_msg  nic_cfg;
0587     struct qs_cfg_msg   qs;
0588     struct rq_cfg_msg   rq;
0589     struct sq_cfg_msg   sq;
0590     struct set_mac_msg  mac;
0591     struct set_frs_msg  frs;
0592     struct cpi_cfg_msg  cpi_cfg;
0593     struct rss_sz_msg   rss_size;
0594     struct rss_cfg_msg  rss_cfg;
0595     struct bgx_stats_msg    bgx_stats;
0596     struct bgx_link_status  link_status;
0597     struct sqs_alloc        sqs_alloc;
0598     struct nicvf_ptr    nicvf;
0599     struct set_loopback lbk;
0600     struct reset_stat_cfg   reset_stat;
0601     struct pfc      pfc;
0602     struct set_ptp      ptp;
0603     struct xcast            xcast;
0604 };
0605 
0606 #define NIC_NODE_ID_MASK    0x03
0607 #define NIC_NODE_ID_SHIFT   44
0608 
0609 static inline int nic_get_node_id(struct pci_dev *pdev)
0610 {
0611     u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
0612     return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
0613 }
0614 
0615 static inline bool pass1_silicon(struct pci_dev *pdev)
0616 {
0617     return (pdev->revision < 8) &&
0618         (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
0619 }
0620 
0621 static inline bool pass2_silicon(struct pci_dev *pdev)
0622 {
0623     return (pdev->revision >= 8) &&
0624         (pdev->subsystem_device == PCI_SUBSYS_DEVID_88XX_NIC_PF);
0625 }
0626 
0627 int nicvf_set_real_num_queues(struct net_device *netdev,
0628                   int tx_queues, int rx_queues);
0629 int nicvf_open(struct net_device *netdev);
0630 int nicvf_stop(struct net_device *netdev);
0631 int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
0632 void nicvf_config_rss(struct nicvf *nic);
0633 void nicvf_set_rss_key(struct nicvf *nic);
0634 void nicvf_set_ethtool_ops(struct net_device *netdev);
0635 void nicvf_update_stats(struct nicvf *nic);
0636 void nicvf_update_lmac_stats(struct nicvf *nic);
0637 
0638 #endif /* NIC_H */