Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
0002 /* QLogic qed NIC Driver
0003  * Copyright (c) 2015-2017  QLogic Corporation
0004  * Copyright (c) 2019-2020 Marvell International Ltd.
0005  */
0006 
0007 #ifndef _QED_H
0008 #define _QED_H
0009 
0010 #include <linux/types.h>
0011 #include <linux/io.h>
0012 #include <linux/delay.h>
0013 #include <linux/firmware.h>
0014 #include <linux/interrupt.h>
0015 #include <linux/list.h>
0016 #include <linux/mutex.h>
0017 #include <linux/pci.h>
0018 #include <linux/slab.h>
0019 #include <linux/string.h>
0020 #include <linux/workqueue.h>
0021 #include <linux/zlib.h>
0022 #include <linux/hashtable.h>
0023 #include <linux/qed/qed_if.h>
0024 #include "qed_debug.h"
0025 #include "qed_hsi.h"
0026 #include "qed_dbg_hsi.h"
0027 #include "qed_mfw_hsi.h"
0028 
0029 extern const struct qed_common_ops qed_common_ops_pass;
0030 
0031 #define STORM_FW_VERSION                       \
0032     ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
0033      (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
0034 
0035 #define MAX_HWFNS_PER_DEVICE    (4)
0036 #define NAME_SIZE 16
0037 #define VER_SIZE 16
0038 
0039 #define QED_WFQ_UNIT    100
0040 
0041 #define QED_WID_SIZE            (1024)
0042 #define QED_MIN_WIDS        (4)
0043 #define QED_PF_DEMS_SIZE        (4)
0044 
0045 #define QED_LLH_DONT_CARE 0
0046 
0047 /* cau states */
0048 enum qed_coalescing_mode {
0049     QED_COAL_MODE_DISABLE,
0050     QED_COAL_MODE_ENABLE
0051 };
0052 
0053 enum qed_nvm_cmd {
0054     QED_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN,
0055     QED_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA,
0056     QED_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM,
0057     QED_GET_MCP_NVM_RESP = 0xFFFFFF00
0058 };
0059 
0060 struct qed_eth_cb_ops;
0061 struct qed_dev_info;
0062 union qed_mcp_protocol_stats;
0063 enum qed_mcp_protocol_type;
0064 enum qed_mfw_tlv_type;
0065 union qed_mfw_tlv_data;
0066 
0067 /* helpers */
0068 #define QED_MFW_GET_FIELD(name, field) \
0069     (((name) & (field ## _MASK)) >> (field ## _SHIFT))
0070 
0071 #define QED_MFW_SET_FIELD(name, field, value)                      \
0072     do {                                       \
0073         (name)  &= ~(field ## _MASK);          \
0074         (name)  |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
0075     } while (0)
0076 
0077 static inline u32 qed_db_addr(u32 cid, u32 DEMS)
0078 {
0079     u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
0080               (cid * QED_PF_DEMS_SIZE);
0081 
0082     return db_addr;
0083 }
0084 
0085 static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
0086 {
0087     u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) |
0088               FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid);
0089 
0090     return db_addr;
0091 }
0092 
0093 #define ALIGNED_TYPE_SIZE(type_name, p_hwfn)                     \
0094     ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \
0095      ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
0096 
0097 #define for_each_hwfn(cdev, i)  for (i = 0; i < (cdev)->num_hwfns; i++)
0098 
0099 #define D_TRINE(val, cond1, cond2, true1, true2, def) \
0100     ((val) == (cond1) ? true1 :           \
0101      ((val) == (cond2) ? true2 : def))
0102 
0103 /* forward */
0104 struct qed_ptt_pool;
0105 struct qed_spq;
0106 struct qed_sb_info;
0107 struct qed_sb_attn_info;
0108 struct qed_cxt_mngr;
0109 struct qed_sb_sp_info;
0110 struct qed_ll2_info;
0111 struct qed_mcp_info;
0112 struct qed_llh_info;
0113 
0114 struct qed_rt_data {
0115     u32 *init_val;
0116     bool    *b_valid;
0117 };
0118 
0119 enum qed_tunn_mode {
0120     QED_MODE_L2GENEVE_TUNN,
0121     QED_MODE_IPGENEVE_TUNN,
0122     QED_MODE_L2GRE_TUNN,
0123     QED_MODE_IPGRE_TUNN,
0124     QED_MODE_VXLAN_TUNN,
0125 };
0126 
0127 enum qed_tunn_clss {
0128     QED_TUNN_CLSS_MAC_VLAN,
0129     QED_TUNN_CLSS_MAC_VNI,
0130     QED_TUNN_CLSS_INNER_MAC_VLAN,
0131     QED_TUNN_CLSS_INNER_MAC_VNI,
0132     QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE,
0133     MAX_QED_TUNN_CLSS,
0134 };
0135 
0136 struct qed_tunn_update_type {
0137     bool b_update_mode;
0138     bool b_mode_enabled;
0139     enum qed_tunn_clss tun_cls;
0140 };
0141 
0142 struct qed_tunn_update_udp_port {
0143     bool b_update_port;
0144     u16 port;
0145 };
0146 
0147 struct qed_tunnel_info {
0148     struct qed_tunn_update_type vxlan;
0149     struct qed_tunn_update_type l2_geneve;
0150     struct qed_tunn_update_type ip_geneve;
0151     struct qed_tunn_update_type l2_gre;
0152     struct qed_tunn_update_type ip_gre;
0153 
0154     struct qed_tunn_update_udp_port vxlan_port;
0155     struct qed_tunn_update_udp_port geneve_port;
0156 
0157     bool b_update_rx_cls;
0158     bool b_update_tx_cls;
0159 };
0160 
0161 struct qed_tunn_start_params {
0162     unsigned long   tunn_mode;
0163     u16     vxlan_udp_port;
0164     u16     geneve_udp_port;
0165     u8      update_vxlan_udp_port;
0166     u8      update_geneve_udp_port;
0167     u8      tunn_clss_vxlan;
0168     u8      tunn_clss_l2geneve;
0169     u8      tunn_clss_ipgeneve;
0170     u8      tunn_clss_l2gre;
0171     u8      tunn_clss_ipgre;
0172 };
0173 
0174 struct qed_tunn_update_params {
0175     unsigned long   tunn_mode_update_mask;
0176     unsigned long   tunn_mode;
0177     u16     vxlan_udp_port;
0178     u16     geneve_udp_port;
0179     u8      update_rx_pf_clss;
0180     u8      update_tx_pf_clss;
0181     u8      update_vxlan_udp_port;
0182     u8      update_geneve_udp_port;
0183     u8      tunn_clss_vxlan;
0184     u8      tunn_clss_l2geneve;
0185     u8      tunn_clss_ipgeneve;
0186     u8      tunn_clss_l2gre;
0187     u8      tunn_clss_ipgre;
0188 };
0189 
0190 /* The PCI personality is not quite synonymous to protocol ID:
0191  * 1. All personalities need CORE connections
0192  * 2. The Ethernet personality may support also the RoCE/iWARP protocol
0193  */
0194 enum qed_pci_personality {
0195     QED_PCI_ETH,
0196     QED_PCI_FCOE,
0197     QED_PCI_ISCSI,
0198     QED_PCI_NVMETCP,
0199     QED_PCI_ETH_ROCE,
0200     QED_PCI_ETH_IWARP,
0201     QED_PCI_ETH_RDMA,
0202     QED_PCI_DEFAULT, /* default in shmem */
0203 };
0204 
0205 /* All VFs are symmetric, all counters are PF + all VFs */
0206 struct qed_qm_iids {
0207     u32 cids;
0208     u32 vf_cids;
0209     u32 tids;
0210 };
0211 
0212 /* HW / FW resources, output of features supported below, most information
0213  * is received from MFW.
0214  */
0215 enum qed_resources {
0216     QED_SB,
0217     QED_L2_QUEUE,
0218     QED_VPORT,
0219     QED_RSS_ENG,
0220     QED_PQ,
0221     QED_RL,
0222     QED_MAC,
0223     QED_VLAN,
0224     QED_RDMA_CNQ_RAM,
0225     QED_ILT,
0226     QED_LL2_RAM_QUEUE,
0227     QED_LL2_CTX_QUEUE,
0228     QED_CMDQS_CQS,
0229     QED_RDMA_STATS_QUEUE,
0230     QED_BDQ,
0231     QED_MAX_RESC,
0232 };
0233 
0234 enum QED_FEATURE {
0235     QED_PF_L2_QUE,
0236     QED_VF,
0237     QED_RDMA_CNQ,
0238     QED_NVMETCP_CQ,
0239     QED_ISCSI_CQ,
0240     QED_FCOE_CQ,
0241     QED_VF_L2_QUE,
0242     QED_MAX_FEATURES,
0243 };
0244 
0245 enum qed_dev_cap {
0246     QED_DEV_CAP_ETH,
0247     QED_DEV_CAP_FCOE,
0248     QED_DEV_CAP_ISCSI,
0249     QED_DEV_CAP_ROCE,
0250     QED_DEV_CAP_IWARP,
0251 };
0252 
0253 enum qed_wol_support {
0254     QED_WOL_SUPPORT_NONE,
0255     QED_WOL_SUPPORT_PME,
0256 };
0257 
0258 enum qed_db_rec_exec {
0259     DB_REC_DRY_RUN,
0260     DB_REC_REAL_DEAL,
0261     DB_REC_ONCE,
0262 };
0263 
0264 struct qed_hw_info {
0265     /* PCI personality */
0266     enum qed_pci_personality    personality;
0267 #define QED_IS_RDMA_PERSONALITY(dev)                    \
0268     ((dev)->hw_info.personality == QED_PCI_ETH_ROCE ||      \
0269      (dev)->hw_info.personality == QED_PCI_ETH_IWARP ||     \
0270      (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
0271 #define QED_IS_ROCE_PERSONALITY(dev)                    \
0272     ((dev)->hw_info.personality == QED_PCI_ETH_ROCE ||      \
0273      (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
0274 #define QED_IS_IWARP_PERSONALITY(dev)                   \
0275     ((dev)->hw_info.personality == QED_PCI_ETH_IWARP ||     \
0276      (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
0277 #define QED_IS_L2_PERSONALITY(dev)                  \
0278     ((dev)->hw_info.personality == QED_PCI_ETH ||           \
0279      QED_IS_RDMA_PERSONALITY(dev))
0280 #define QED_IS_FCOE_PERSONALITY(dev)                    \
0281     ((dev)->hw_info.personality == QED_PCI_FCOE)
0282 #define QED_IS_ISCSI_PERSONALITY(dev)                   \
0283     ((dev)->hw_info.personality == QED_PCI_ISCSI)
0284 #define QED_IS_NVMETCP_PERSONALITY(dev)                 \
0285     ((dev)->hw_info.personality == QED_PCI_NVMETCP)
0286 
0287     /* Resource Allocation scheme results */
0288     u32             resc_start[QED_MAX_RESC];
0289     u32             resc_num[QED_MAX_RESC];
0290 #define RESC_START(_p_hwfn, resc)   ((_p_hwfn)->hw_info.resc_start[resc])
0291 #define RESC_NUM(_p_hwfn, resc)     ((_p_hwfn)->hw_info.resc_num[resc])
0292 #define RESC_END(_p_hwfn, resc)     (RESC_START(_p_hwfn, resc) +    \
0293                      RESC_NUM(_p_hwfn, resc))
0294 
0295     u32             feat_num[QED_MAX_FEATURES];
0296 #define FEAT_NUM(_p_hwfn, resc)     ((_p_hwfn)->hw_info.feat_num[resc])
0297 
0298     /* Amount of traffic classes HW supports */
0299     u8              num_hw_tc;
0300 
0301     /* Amount of TCs which should be active according to DCBx or upper
0302      * layer driver configuration.
0303      */
0304     u8              num_active_tc;
0305 
0306     u8              offload_tc;
0307     bool                offload_tc_set;
0308 
0309     bool                multi_tc_roce_en;
0310 #define IS_QED_MULTI_TC_ROCE(p_hwfn)    ((p_hwfn)->hw_info.multi_tc_roce_en)
0311 
0312     u32             concrete_fid;
0313     u16             opaque_fid;
0314     u16             ovlan;
0315     u32             part_num[4];
0316 
0317     unsigned char           hw_mac_addr[ETH_ALEN];
0318     u64             node_wwn;
0319     u64             port_wwn;
0320 
0321     u16             num_fcoe_conns;
0322 
0323     struct qed_igu_info     *p_igu_info;
0324 
0325     u32             hw_mode;
0326     unsigned long           device_capabilities;
0327     u16             mtu;
0328 
0329     enum qed_wol_support        b_wol_support;
0330 };
0331 
0332 /* maximun size of read/write commands (HW limit) */
0333 #define DMAE_MAX_RW_SIZE        0x2000
0334 
0335 struct qed_dmae_info {
0336     /* Mutex for synchronizing access to functions */
0337     struct mutex    mutex;
0338 
0339     u8      channel;
0340 
0341     dma_addr_t  completion_word_phys_addr;
0342 
0343     /* The memory location where the DMAE writes the completion
0344      * value when an operation is finished on this context.
0345      */
0346     u32     *p_completion_word;
0347 
0348     dma_addr_t  intermediate_buffer_phys_addr;
0349 
0350     /* An intermediate buffer for DMAE operations that use virtual
0351      * addresses - data is DMA'd to/from this buffer and then
0352      * memcpy'd to/from the virtual address
0353      */
0354     u32     *p_intermediate_buffer;
0355 
0356     dma_addr_t  dmae_cmd_phys_addr;
0357     struct dmae_cmd *p_dmae_cmd;
0358 };
0359 
0360 struct qed_wfq_data {
0361     /* when feature is configured for at least 1 vport */
0362     u32 min_speed;
0363     bool    configured;
0364 };
0365 
0366 struct qed_qm_info {
0367     struct init_qm_pq_params    *qm_pq_params;
0368     struct init_qm_vport_params *qm_vport_params;
0369     struct init_qm_port_params  *qm_port_params;
0370     u16             start_pq;
0371     u8              start_vport;
0372     u16              pure_lb_pq;
0373     u16             first_ofld_pq;
0374     u16             first_llt_pq;
0375     u16             pure_ack_pq;
0376     u16             ooo_pq;
0377     u16             first_vf_pq;
0378     u16             first_mcos_pq;
0379     u16             first_rl_pq;
0380     u16             num_pqs;
0381     u16             num_vf_pqs;
0382     u8              num_vports;
0383     u8              max_phys_tcs_per_port;
0384     u8              ooo_tc;
0385     bool                pf_rl_en;
0386     bool                pf_wfq_en;
0387     bool                vport_rl_en;
0388     bool                vport_wfq_en;
0389     u8              pf_wfq;
0390     u32             pf_rl;
0391     struct qed_wfq_data     *wfq_data;
0392     u8 num_pf_rls;
0393 };
0394 
0395 #define QED_OVERFLOW_BIT    1
0396 
0397 struct qed_db_recovery_info {
0398     struct list_head list;
0399 
0400     /* Lock to protect the doorbell recovery mechanism list */
0401     spinlock_t lock;
0402     bool dorq_attn;
0403     u32 db_recovery_counter;
0404     unsigned long overflow;
0405 };
0406 
0407 struct storm_stats {
0408     u32     address;
0409     u32     len;
0410 };
0411 
0412 struct qed_storm_stats {
0413     struct storm_stats mstats;
0414     struct storm_stats pstats;
0415     struct storm_stats tstats;
0416     struct storm_stats ustats;
0417 };
0418 
0419 struct qed_fw_data {
0420     struct fw_ver_info  *fw_ver_info;
0421     const u8        *modes_tree_buf;
0422     union init_op       *init_ops;
0423     const u32       *arr_data;
0424     const u32       *fw_overlays;
0425     u32         fw_overlays_len;
0426     u32         init_ops_size;
0427 };
0428 
0429 enum qed_mf_mode_bit {
0430     /* Supports PF-classification based on tag */
0431     QED_MF_OVLAN_CLSS,
0432 
0433     /* Supports PF-classification based on MAC */
0434     QED_MF_LLH_MAC_CLSS,
0435 
0436     /* Supports PF-classification based on protocol type */
0437     QED_MF_LLH_PROTO_CLSS,
0438 
0439     /* Requires a default PF to be set */
0440     QED_MF_NEED_DEF_PF,
0441 
0442     /* Allow LL2 to multicast/broadcast */
0443     QED_MF_LL2_NON_UNICAST,
0444 
0445     /* Allow Cross-PF [& child VFs] Tx-switching */
0446     QED_MF_INTER_PF_SWITCH,
0447 
0448     /* Unified Fabtic Port support enabled */
0449     QED_MF_UFP_SPECIFIC,
0450 
0451     /* Disable Accelerated Receive Flow Steering (aRFS) */
0452     QED_MF_DISABLE_ARFS,
0453 
0454     /* Use vlan for steering */
0455     QED_MF_8021Q_TAGGING,
0456 
0457     /* Use stag for steering */
0458     QED_MF_8021AD_TAGGING,
0459 
0460     /* Allow DSCP to TC mapping */
0461     QED_MF_DSCP_TO_TC_MAP,
0462 
0463     /* Do not insert a vlan tag with id 0 */
0464     QED_MF_DONT_ADD_VLAN0_TAG,
0465 };
0466 
0467 enum qed_ufp_mode {
0468     QED_UFP_MODE_ETS,
0469     QED_UFP_MODE_VNIC_BW,
0470     QED_UFP_MODE_UNKNOWN
0471 };
0472 
0473 enum qed_ufp_pri_type {
0474     QED_UFP_PRI_OS,
0475     QED_UFP_PRI_VNIC,
0476     QED_UFP_PRI_UNKNOWN
0477 };
0478 
0479 struct qed_ufp_info {
0480     enum qed_ufp_pri_type pri_type;
0481     enum qed_ufp_mode mode;
0482     u8 tc;
0483 };
0484 
0485 enum BAR_ID {
0486     BAR_ID_0,       /* used for GRC */
0487     BAR_ID_1        /* Used for doorbells */
0488 };
0489 
0490 struct qed_nvm_image_info {
0491     u32 num_images;
0492     struct bist_nvm_image_att *image_att;
0493     bool valid;
0494 };
0495 
0496 enum qed_hsi_def_type {
0497     QED_HSI_DEF_MAX_NUM_VFS,
0498     QED_HSI_DEF_MAX_NUM_L2_QUEUES,
0499     QED_HSI_DEF_MAX_NUM_PORTS,
0500     QED_HSI_DEF_MAX_SB_PER_PATH,
0501     QED_HSI_DEF_MAX_NUM_PFS,
0502     QED_HSI_DEF_MAX_NUM_VPORTS,
0503     QED_HSI_DEF_NUM_ETH_RSS_ENGINE,
0504     QED_HSI_DEF_MAX_QM_TX_QUEUES,
0505     QED_HSI_DEF_NUM_PXP_ILT_RECORDS,
0506     QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
0507     QED_HSI_DEF_MAX_QM_GLOBAL_RLS,
0508     QED_HSI_DEF_MAX_PBF_CMD_LINES,
0509     QED_HSI_DEF_MAX_BTB_BLOCKS,
0510     QED_NUM_HSI_DEFS
0511 };
0512 
0513 struct qed_simd_fp_handler {
0514     void    *token;
0515     void    (*func)(void *cookie);
0516 };
0517 
0518 enum qed_slowpath_wq_flag {
0519     QED_SLOWPATH_MFW_TLV_REQ,
0520     QED_SLOWPATH_PERIODIC_DB_REC,
0521 };
0522 
0523 struct qed_hwfn {
0524     struct qed_dev          *cdev;
0525     u8              my_id;          /* ID inside the PF */
0526 #define IS_LEAD_HWFN(edev)              (!((edev)->my_id))
0527     u8              rel_pf_id;      /* Relative to engine*/
0528     u8              abs_pf_id;
0529 #define QED_PATH_ID(_p_hwfn) \
0530     (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
0531     u8              port_id;
0532     bool                b_active;
0533 
0534     u32             dp_module;
0535     u8              dp_level;
0536     char                name[NAME_SIZE];
0537 
0538     bool                hw_init_done;
0539 
0540     u8              num_funcs_on_engine;
0541     u8 enabled_func_idx;
0542 
0543     /* BAR access */
0544     void __iomem            *regview;
0545     void __iomem            *doorbells;
0546     u64             db_phys_addr;
0547     unsigned long           db_size;
0548 
0549     /* PTT pool */
0550     struct qed_ptt_pool     *p_ptt_pool;
0551 
0552     /* HW info */
0553     struct qed_hw_info      hw_info;
0554 
0555     /* rt_array (for init-tool) */
0556     struct qed_rt_data      rt_data;
0557 
0558     /* SPQ */
0559     struct qed_spq          *p_spq;
0560 
0561     /* EQ */
0562     struct qed_eq           *p_eq;
0563 
0564     /* Consolidate Q*/
0565     struct qed_consq        *p_consq;
0566 
0567     /* Slow-Path definitions */
0568     struct tasklet_struct       sp_dpc;
0569     bool                b_sp_dpc_enabled;
0570 
0571     struct qed_ptt          *p_main_ptt;
0572     struct qed_ptt          *p_dpc_ptt;
0573 
0574     /* PTP will be used only by the leading function.
0575      * Usage of all PTP-apis should be synchronized as result.
0576      */
0577     struct qed_ptt *p_ptp_ptt;
0578 
0579     struct qed_sb_sp_info       *p_sp_sb;
0580     struct qed_sb_attn_info     *p_sb_attn;
0581 
0582     /* Protocol related */
0583     bool                using_ll2;
0584     struct qed_ll2_info     *p_ll2_info;
0585     struct qed_ooo_info     *p_ooo_info;
0586     struct qed_rdma_info        *p_rdma_info;
0587     struct qed_iscsi_info       *p_iscsi_info;
0588     struct qed_nvmetcp_info     *p_nvmetcp_info;
0589     struct qed_fcoe_info        *p_fcoe_info;
0590     struct qed_pf_params        pf_params;
0591 
0592     bool b_rdma_enabled_in_prs;
0593     u32 rdma_prs_search_reg;
0594 
0595     struct qed_cxt_mngr     *p_cxt_mngr;
0596 
0597     /* Flag indicating whether interrupts are enabled or not*/
0598     bool                b_int_enabled;
0599     bool                b_int_requested;
0600 
0601     /* True if the driver requests for the link */
0602     bool                b_drv_link_init;
0603 
0604     struct qed_vf_iov       *vf_iov_info;
0605     struct qed_pf_iov       *pf_iov_info;
0606     struct qed_mcp_info     *mcp_info;
0607 
0608     struct qed_dcbx_info        *p_dcbx_info;
0609 
0610     struct qed_ufp_info     ufp_info;
0611 
0612     struct qed_dmae_info        dmae_info;
0613 
0614     /* QM init */
0615     struct qed_qm_info      qm_info;
0616     struct qed_storm_stats      storm_stats;
0617 
0618     /* Buffer for unzipping firmware data */
0619     void                *unzip_buf;
0620 
0621     struct dbg_tools_data       dbg_info;
0622     void                *dbg_user_info;
0623     struct virt_mem_desc        dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
0624 
0625     /* PWM region specific data */
0626     u16             wid_count;
0627     u32             dpi_size;
0628     u32             dpi_count;
0629 
0630     /* This is used to calculate the doorbell address */
0631     u32 dpi_start_offset;
0632 
0633     /* If one of the following is set then EDPM shouldn't be used */
0634     u8 dcbx_no_edpm;
0635     u8 db_bar_no_edpm;
0636 
0637     /* L2-related */
0638     struct qed_l2_info *p_l2_info;
0639 
0640     /* Mechanism for recovering from doorbell drop */
0641     struct qed_db_recovery_info db_recovery_info;
0642 
0643     /* Nvm images number and attributes */
0644     struct qed_nvm_image_info nvm_info;
0645 
0646     struct phys_mem_desc *fw_overlay_mem;
0647     struct qed_ptt *p_arfs_ptt;
0648 
0649     struct qed_simd_fp_handler  simd_proto_handler[64];
0650 
0651 #ifdef CONFIG_QED_SRIOV
0652     struct workqueue_struct *iov_wq;
0653     struct delayed_work iov_task;
0654     unsigned long iov_task_flags;
0655 #endif
0656     struct z_stream_s *stream;
0657     bool slowpath_wq_active;
0658     struct workqueue_struct *slowpath_wq;
0659     struct delayed_work slowpath_task;
0660     unsigned long slowpath_task_flags;
0661     u32 periodic_db_rec_count;
0662 };
0663 
0664 struct pci_params {
0665     int     pm_cap;
0666 
0667     unsigned long   mem_start;
0668     unsigned long   mem_end;
0669     unsigned int    irq;
0670     u8      pf_num;
0671 };
0672 
0673 struct qed_int_param {
0674     u32 int_mode;
0675     u8  num_vectors;
0676     u8  min_msix_cnt; /* for minimal functionality */
0677 };
0678 
0679 struct qed_int_params {
0680     struct qed_int_param    in;
0681     struct qed_int_param    out;
0682     struct msix_entry   *msix_table;
0683     bool            fp_initialized;
0684     u8          fp_msix_base;
0685     u8          fp_msix_cnt;
0686     u8          rdma_msix_base;
0687     u8          rdma_msix_cnt;
0688 };
0689 
0690 struct qed_dbg_feature {
0691     struct dentry *dentry;
0692     u8 *dump_buf;
0693     u32 buf_size;
0694     u32 dumped_dwords;
0695 };
0696 
0697 struct qed_dev {
0698     u32             dp_module;
0699     u8              dp_level;
0700     char                name[NAME_SIZE];
0701 
0702     enum qed_dev_type       type;
0703     /* Translate type/revision combo into the proper conditions */
0704 #define QED_IS_BB(dev)          ((dev)->type == QED_DEV_TYPE_BB)
0705 #define QED_IS_BB_B0(dev)       (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
0706 #define QED_IS_AH(dev)          ((dev)->type == QED_DEV_TYPE_AH)
0707 #define QED_IS_K2(dev)          QED_IS_AH(dev)
0708 
0709     u16             vendor_id;
0710 
0711     u16             device_id;
0712 #define QED_DEV_ID_MASK         0xff00
0713 #define QED_DEV_ID_MASK_BB      0x1600
0714 #define QED_DEV_ID_MASK_AH      0x8000
0715 
0716     u16             chip_num;
0717 #define CHIP_NUM_MASK           0xffff
0718 #define CHIP_NUM_SHIFT          16
0719 
0720     u16             chip_rev;
0721 #define CHIP_REV_MASK           0xf
0722 #define CHIP_REV_SHIFT          12
0723 #define CHIP_REV_IS_B0(_cdev)       ((_cdev)->chip_rev == 1)
0724 
0725     u16             chip_metal;
0726 #define CHIP_METAL_MASK         0xff
0727 #define CHIP_METAL_SHIFT        4
0728 
0729     u16             chip_bond_id;
0730 #define CHIP_BOND_ID_MASK       0xf
0731 #define CHIP_BOND_ID_SHIFT      0
0732 
0733     u8              num_engines;
0734     u8              num_ports;
0735     u8              num_ports_in_engine;
0736     u8              num_funcs_in_port;
0737 
0738     u8              path_id;
0739 
0740     unsigned long           mf_bits;
0741 
0742     int             pcie_width;
0743     int             pcie_speed;
0744 
0745     /* Add MF related configuration */
0746     u8              mcp_rev;
0747     u8              boot_mode;
0748 
0749     /* WoL related configurations */
0750     u8 wol_config;
0751     u8 wol_mac[ETH_ALEN];
0752 
0753     u32             int_mode;
0754     enum qed_coalescing_mode    int_coalescing_mode;
0755     u16             rx_coalesce_usecs;
0756     u16             tx_coalesce_usecs;
0757 
0758     /* Start Bar offset of first hwfn */
0759     void __iomem            *regview;
0760     void __iomem            *doorbells;
0761     u64             db_phys_addr;
0762     unsigned long           db_size;
0763 
0764     /* PCI */
0765     u8              cache_shift;
0766 
0767     /* Init */
0768     const u32 *iro_arr;
0769 #define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
0770 
0771     /* HW functions */
0772     u8              num_hwfns;
0773     struct qed_hwfn         hwfns[MAX_HWFNS_PER_DEVICE];
0774 
0775     /* Engine affinity */
0776     u8              l2_affin_hint;
0777     u8              fir_affin;
0778     u8              iwarp_affin;
0779 
0780     /* SRIOV */
0781     struct qed_hw_sriov_info *p_iov_info;
0782 #define IS_QED_SRIOV(cdev)              (!!(cdev)->p_iov_info)
0783     struct qed_tunnel_info      tunnel;
0784     bool                b_is_vf;
0785     u32             drv_type;
0786     struct qed_eth_stats        *reset_stats;
0787     struct qed_fw_data      *fw_data;
0788 
0789     u32             mcp_nvm_resp;
0790 
0791     /* Recovery */
0792     bool recov_in_prog;
0793 
0794     /* Indicates whether should prevent attentions from being reasserted */
0795     bool attn_clr_en;
0796 
0797     /* LLH info */
0798     u8 ppfid_bitmap;
0799     struct qed_llh_info *p_llh_info;
0800 
0801     /* Linux specific here */
0802     struct qed_dev_info     common_dev_info;
0803     struct  qede_dev        *edev;
0804     struct  pci_dev         *pdev;
0805     u32 flags;
0806 #define QED_FLAG_STORAGE_STARTED    (BIT(0))
0807     int             msg_enable;
0808 
0809     struct pci_params       pci_params;
0810 
0811     struct qed_int_params       int_params;
0812 
0813     u8              protocol;
0814 #define IS_QED_ETH_IF(cdev)     ((cdev)->protocol == QED_PROTOCOL_ETH)
0815 #define IS_QED_FCOE_IF(cdev)    ((cdev)->protocol == QED_PROTOCOL_FCOE)
0816 
0817     /* Callbacks to protocol driver */
0818     union {
0819         struct qed_common_cb_ops    *common;
0820         struct qed_eth_cb_ops       *eth;
0821         struct qed_fcoe_cb_ops      *fcoe;
0822         struct qed_iscsi_cb_ops     *iscsi;
0823         struct qed_nvmetcp_cb_ops   *nvmetcp;
0824     } protocol_ops;
0825     void                *ops_cookie;
0826 
0827 #ifdef CONFIG_QED_LL2
0828     struct qed_cb_ll2_info      *ll2;
0829     u8              ll2_mac_address[ETH_ALEN];
0830 #endif
0831     struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
0832     u8 engine_for_debug;
0833     bool disable_ilt_dump;
0834     bool                dbg_bin_dump;
0835 
0836     DECLARE_HASHTABLE(connections, 10);
0837     const struct firmware       *firmware;
0838 
0839     bool print_dbg_data;
0840 
0841     u32 rdma_max_sge;
0842     u32 rdma_max_inline;
0843     u32 rdma_max_srq_sge;
0844     u16 tunn_feature_mask;
0845 
0846     bool                iwarp_cmt;
0847 };
0848 
0849 u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
0850 
0851 #define NUM_OF_VFS(dev) \
0852     qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
0853 #define NUM_OF_L2_QUEUES(dev) \
0854     qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
0855 #define NUM_OF_PORTS(dev) \
0856     qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
0857 #define NUM_OF_SBS(dev) \
0858     qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
0859 #define NUM_OF_ENG_PFS(dev) \
0860     qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
0861 #define NUM_OF_VPORTS(dev) \
0862     qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
0863 #define NUM_OF_RSS_ENGINES(dev) \
0864     qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
0865 #define NUM_OF_QM_TX_QUEUES(dev) \
0866     qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
0867 #define NUM_OF_PXP_ILT_RECORDS(dev) \
0868     qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
0869 #define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
0870     qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
0871 #define NUM_OF_QM_GLOBAL_RLS(dev) \
0872     qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
0873 #define NUM_OF_PBF_CMD_LINES(dev) \
0874     qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
0875 #define NUM_OF_BTB_BLOCKS(dev) \
0876     qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
0877 
0878 /**
0879  * qed_concrete_to_sw_fid(): Get the sw function id from
0880  *                           the concrete value.
0881  *
0882  * @cdev: Qed dev pointer.
0883  * @concrete_fid: Concrete fid.
0884  *
0885  * Return: inline u8.
0886  */
0887 static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
0888                     u32 concrete_fid)
0889 {
0890     u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
0891     u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
0892     u8 vf_valid = GET_FIELD(concrete_fid,
0893                 PXP_CONCRETE_FID_VFVALID);
0894     u8 sw_fid;
0895 
0896     if (vf_valid)
0897         sw_fid = vfid + MAX_NUM_PFS;
0898     else
0899         sw_fid = pfid;
0900 
0901     return sw_fid;
0902 }
0903 
0904 #define PKT_LB_TC   9
0905 
0906 int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
0907 void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
0908                      struct qed_ptt *p_ptt,
0909                      u32 min_pf_rate);
0910 
0911 void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
0912 int qed_device_num_engines(struct qed_dev *cdev);
0913 void qed_set_fw_mac_addr(__le16 *fw_msb,
0914              __le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
0915 
0916 #define QED_LEADING_HWFN(dev)   (&(dev)->hwfns[0])
0917 #define QED_IS_CMT(dev)     ((dev)->num_hwfns > 1)
0918 /* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
0919 #define QED_FIR_AFFIN_HWFN(dev)     (&(dev)->hwfns[dev->fir_affin])
0920 #define QED_IWARP_AFFIN_HWFN(dev)       (&(dev)->hwfns[dev->iwarp_affin])
0921 #define QED_AFFIN_HWFN(dev)                \
0922     (QED_IS_IWARP_PERSONALITY(QED_LEADING_HWFN(dev)) ? \
0923      QED_IWARP_AFFIN_HWFN(dev) : QED_FIR_AFFIN_HWFN(dev))
0924 #define QED_AFFIN_HWFN_IDX(dev) (IS_LEAD_HWFN(QED_AFFIN_HWFN(dev)) ? 0 : 1)
0925 
0926 /* Flags for indication of required queues */
0927 #define PQ_FLAGS_RLS    (BIT(0))
0928 #define PQ_FLAGS_MCOS   (BIT(1))
0929 #define PQ_FLAGS_LB     (BIT(2))
0930 #define PQ_FLAGS_OOO    (BIT(3))
0931 #define PQ_FLAGS_ACK    (BIT(4))
0932 #define PQ_FLAGS_OFLD   (BIT(5))
0933 #define PQ_FLAGS_VFS    (BIT(6))
0934 #define PQ_FLAGS_LLT    (BIT(7))
0935 #define PQ_FLAGS_MTC    (BIT(8))
0936 
0937 /* physical queue index for cm context initialization */
0938 u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
0939 u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
0940 u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
0941 u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc);
0942 u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
0943 
0944 /* doorbell recovery mechanism */
0945 void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
0946 void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
0947 bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
0948 
0949 #define GET_GTT_REG_ADDR(__base, __offset, __idx) \
0950     ((__base) + __offset ## _GTT_OFFSET((__idx)))
0951 
0952 #define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \
0953     ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx)))
0954 
0955 /* Other Linux specific common definitions */
0956 #define DP_NAME(cdev) ((cdev)->name)
0957 
0958 #define REG_ADDR(cdev, offset)          ((void __iomem *)((u8 __iomem *)\
0959                         ((cdev)->regview) + \
0960                              (offset)))
0961 
0962 #define REG_RD(cdev, offset)            readl(REG_ADDR(cdev, offset))
0963 #define REG_WR(cdev, offset, val)       writel((u32)val, REG_ADDR(cdev, offset))
0964 #define REG_WR16(cdev, offset, val)     writew((u16)val, REG_ADDR(cdev, offset))
0965 
0966 #define DOORBELL(cdev, db_addr, val)             \
0967     writel((u32)val, (void __iomem *)((u8 __iomem *)\
0968                       ((cdev)->doorbells) + (db_addr)))
0969 
0970 #define MFW_PORT(_p_hwfn)       ((_p_hwfn)->abs_pf_id %           \
0971                   qed_device_num_ports((_p_hwfn)->cdev))
0972 int qed_device_num_ports(struct qed_dev *cdev);
0973 
0974 /* Prototypes */
0975 int qed_fill_dev_info(struct qed_dev *cdev,
0976               struct qed_dev_info *dev_info);
0977 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
0978 void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
0979 u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
0980            u32 input_len, u8 *input_buf,
0981            u32 max_size, u8 *unzip_buf);
0982 int qed_recovery_process(struct qed_dev *cdev);
0983 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
0984 void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
0985                enum qed_hw_err_type err_type);
0986 void qed_get_protocol_stats(struct qed_dev *cdev,
0987                 enum qed_mcp_protocol_type type,
0988                 union qed_mcp_protocol_stats *stats);
0989 int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
0990 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
0991 int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
0992 
0993 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
0994               enum qed_mfw_tlv_type type,
0995               union qed_mfw_tlv_data *tlv_data);
0996 
0997 void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc);
0998 
0999 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn);
1000 
1001 int qed_llh_add_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
1002 int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port);
1003 void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
1004 void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
1005 void qed_llh_clear_all_filters(struct qed_dev *cdev);
1006 unsigned long qed_get_epoch_time(void);
1007 #endif /* _QED_H */