0001
0002
0003
0004
0005
0006
0007 #ifndef _QED_IF_H
0008 #define _QED_IF_H
0009
0010 #include <linux/ethtool.h>
0011 #include <linux/types.h>
0012 #include <linux/interrupt.h>
0013 #include <linux/netdevice.h>
0014 #include <linux/pci.h>
0015 #include <linux/skbuff.h>
0016 #include <asm/byteorder.h>
0017 #include <linux/io.h>
0018 #include <linux/compiler.h>
0019 #include <linux/kernel.h>
0020 #include <linux/list.h>
0021 #include <linux/slab.h>
0022 #include <linux/qed/common_hsi.h>
0023 #include <linux/qed/qed_chain.h>
0024 #include <linux/io-64-nonatomic-lo-hi.h>
0025 #include <net/devlink.h>
0026
0027 #define QED_TX_SWS_TIMER_DFLT 500
0028 #define QED_TWO_MSL_TIMER_DFLT 4000
0029
0030 enum dcbx_protocol_type {
0031 DCBX_PROTOCOL_ISCSI,
0032 DCBX_PROTOCOL_FCOE,
0033 DCBX_PROTOCOL_ROCE,
0034 DCBX_PROTOCOL_ROCE_V2,
0035 DCBX_PROTOCOL_ETH,
0036 DCBX_MAX_PROTOCOL_TYPE
0037 };
0038
0039 #define QED_ROCE_PROTOCOL_INDEX (3)
0040
0041 #define QED_LLDP_CHASSIS_ID_STAT_LEN 4
0042 #define QED_LLDP_PORT_ID_STAT_LEN 4
0043 #define QED_DCBX_MAX_APP_PROTOCOL 32
0044 #define QED_MAX_PFC_PRIORITIES 8
0045 #define QED_DCBX_DSCP_SIZE 64
0046
0047 struct qed_dcbx_lldp_remote {
0048 u32 peer_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
0049 u32 peer_port_id[QED_LLDP_PORT_ID_STAT_LEN];
0050 bool enable_rx;
0051 bool enable_tx;
0052 u32 tx_interval;
0053 u32 max_credit;
0054 };
0055
0056 struct qed_dcbx_lldp_local {
0057 u32 local_chassis_id[QED_LLDP_CHASSIS_ID_STAT_LEN];
0058 u32 local_port_id[QED_LLDP_PORT_ID_STAT_LEN];
0059 };
0060
0061 struct qed_dcbx_app_prio {
0062 u8 roce;
0063 u8 roce_v2;
0064 u8 fcoe;
0065 u8 iscsi;
0066 u8 eth;
0067 };
0068
0069 struct qed_dbcx_pfc_params {
0070 bool willing;
0071 bool enabled;
0072 u8 prio[QED_MAX_PFC_PRIORITIES];
0073 u8 max_tc;
0074 };
0075
0076 enum qed_dcbx_sf_ieee_type {
0077 QED_DCBX_SF_IEEE_ETHTYPE,
0078 QED_DCBX_SF_IEEE_TCP_PORT,
0079 QED_DCBX_SF_IEEE_UDP_PORT,
0080 QED_DCBX_SF_IEEE_TCP_UDP_PORT
0081 };
0082
0083 struct qed_app_entry {
0084 bool ethtype;
0085 enum qed_dcbx_sf_ieee_type sf_ieee;
0086 bool enabled;
0087 u8 prio;
0088 u16 proto_id;
0089 enum dcbx_protocol_type proto_type;
0090 };
0091
0092 struct qed_dcbx_params {
0093 struct qed_app_entry app_entry[QED_DCBX_MAX_APP_PROTOCOL];
0094 u16 num_app_entries;
0095 bool app_willing;
0096 bool app_valid;
0097 bool app_error;
0098 bool ets_willing;
0099 bool ets_enabled;
0100 bool ets_cbs;
0101 bool valid;
0102 u8 ets_pri_tc_tbl[QED_MAX_PFC_PRIORITIES];
0103 u8 ets_tc_bw_tbl[QED_MAX_PFC_PRIORITIES];
0104 u8 ets_tc_tsa_tbl[QED_MAX_PFC_PRIORITIES];
0105 struct qed_dbcx_pfc_params pfc;
0106 u8 max_ets_tc;
0107 };
0108
0109 struct qed_dcbx_admin_params {
0110 struct qed_dcbx_params params;
0111 bool valid;
0112 };
0113
0114 struct qed_dcbx_remote_params {
0115 struct qed_dcbx_params params;
0116 bool valid;
0117 };
0118
0119 struct qed_dcbx_operational_params {
0120 struct qed_dcbx_app_prio app_prio;
0121 struct qed_dcbx_params params;
0122 bool valid;
0123 bool enabled;
0124 bool ieee;
0125 bool cee;
0126 bool local;
0127 u32 err;
0128 };
0129
0130 struct qed_dcbx_get {
0131 struct qed_dcbx_operational_params operational;
0132 struct qed_dcbx_lldp_remote lldp_remote;
0133 struct qed_dcbx_lldp_local lldp_local;
0134 struct qed_dcbx_remote_params remote;
0135 struct qed_dcbx_admin_params local;
0136 };
0137
0138 enum qed_nvm_images {
0139 QED_NVM_IMAGE_ISCSI_CFG,
0140 QED_NVM_IMAGE_FCOE_CFG,
0141 QED_NVM_IMAGE_MDUMP,
0142 QED_NVM_IMAGE_NVM_CFG1,
0143 QED_NVM_IMAGE_DEFAULT_CFG,
0144 QED_NVM_IMAGE_NVM_META,
0145 };
0146
0147 struct qed_link_eee_params {
0148 u32 tx_lpi_timer;
0149 #define QED_EEE_1G_ADV BIT(0)
0150 #define QED_EEE_10G_ADV BIT(1)
0151
0152
0153 u8 adv_caps;
0154 u8 lp_adv_caps;
0155 bool enable;
0156 bool tx_lpi_enable;
0157 };
0158
0159 enum qed_led_mode {
0160 QED_LED_MODE_OFF,
0161 QED_LED_MODE_ON,
0162 QED_LED_MODE_RESTORE
0163 };
0164
0165 struct qed_mfw_tlv_eth {
0166 u16 lso_maxoff_size;
0167 bool lso_maxoff_size_set;
0168 u16 lso_minseg_size;
0169 bool lso_minseg_size_set;
0170 u8 prom_mode;
0171 bool prom_mode_set;
0172 u16 tx_descr_size;
0173 bool tx_descr_size_set;
0174 u16 rx_descr_size;
0175 bool rx_descr_size_set;
0176 u16 netq_count;
0177 bool netq_count_set;
0178 u32 tcp4_offloads;
0179 bool tcp4_offloads_set;
0180 u32 tcp6_offloads;
0181 bool tcp6_offloads_set;
0182 u16 tx_descr_qdepth;
0183 bool tx_descr_qdepth_set;
0184 u16 rx_descr_qdepth;
0185 bool rx_descr_qdepth_set;
0186 u8 iov_offload;
0187 #define QED_MFW_TLV_IOV_OFFLOAD_NONE (0)
0188 #define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1)
0189 #define QED_MFW_TLV_IOV_OFFLOAD_VEB (2)
0190 #define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3)
0191 bool iov_offload_set;
0192 u8 txqs_empty;
0193 bool txqs_empty_set;
0194 u8 rxqs_empty;
0195 bool rxqs_empty_set;
0196 u8 num_txqs_full;
0197 bool num_txqs_full_set;
0198 u8 num_rxqs_full;
0199 bool num_rxqs_full_set;
0200 };
0201
0202 #define QED_MFW_TLV_TIME_SIZE 14
0203 struct qed_mfw_tlv_time {
0204 bool b_set;
0205 u8 month;
0206 u8 day;
0207 u8 hour;
0208 u8 min;
0209 u16 msec;
0210 u16 usec;
0211 };
0212
0213 struct qed_mfw_tlv_fcoe {
0214 u8 scsi_timeout;
0215 bool scsi_timeout_set;
0216 u32 rt_tov;
0217 bool rt_tov_set;
0218 u32 ra_tov;
0219 bool ra_tov_set;
0220 u32 ed_tov;
0221 bool ed_tov_set;
0222 u32 cr_tov;
0223 bool cr_tov_set;
0224 u8 boot_type;
0225 bool boot_type_set;
0226 u8 npiv_state;
0227 bool npiv_state_set;
0228 u32 num_npiv_ids;
0229 bool num_npiv_ids_set;
0230 u8 switch_name[8];
0231 bool switch_name_set;
0232 u16 switch_portnum;
0233 bool switch_portnum_set;
0234 u8 switch_portid[3];
0235 bool switch_portid_set;
0236 u8 vendor_name[8];
0237 bool vendor_name_set;
0238 u8 switch_model[8];
0239 bool switch_model_set;
0240 u8 switch_fw_version[8];
0241 bool switch_fw_version_set;
0242 u8 qos_pri;
0243 bool qos_pri_set;
0244 u8 port_alias[3];
0245 bool port_alias_set;
0246 u8 port_state;
0247 #define QED_MFW_TLV_PORT_STATE_OFFLINE (0)
0248 #define QED_MFW_TLV_PORT_STATE_LOOP (1)
0249 #define QED_MFW_TLV_PORT_STATE_P2P (2)
0250 #define QED_MFW_TLV_PORT_STATE_FABRIC (3)
0251 bool port_state_set;
0252 u16 fip_tx_descr_size;
0253 bool fip_tx_descr_size_set;
0254 u16 fip_rx_descr_size;
0255 bool fip_rx_descr_size_set;
0256 u16 link_failures;
0257 bool link_failures_set;
0258 u8 fcoe_boot_progress;
0259 bool fcoe_boot_progress_set;
0260 u64 rx_bcast;
0261 bool rx_bcast_set;
0262 u64 tx_bcast;
0263 bool tx_bcast_set;
0264 u16 fcoe_txq_depth;
0265 bool fcoe_txq_depth_set;
0266 u16 fcoe_rxq_depth;
0267 bool fcoe_rxq_depth_set;
0268 u64 fcoe_rx_frames;
0269 bool fcoe_rx_frames_set;
0270 u64 fcoe_rx_bytes;
0271 bool fcoe_rx_bytes_set;
0272 u64 fcoe_tx_frames;
0273 bool fcoe_tx_frames_set;
0274 u64 fcoe_tx_bytes;
0275 bool fcoe_tx_bytes_set;
0276 u16 crc_count;
0277 bool crc_count_set;
0278 u32 crc_err_src_fcid[5];
0279 bool crc_err_src_fcid_set[5];
0280 struct qed_mfw_tlv_time crc_err[5];
0281 u16 losync_err;
0282 bool losync_err_set;
0283 u16 losig_err;
0284 bool losig_err_set;
0285 u16 primtive_err;
0286 bool primtive_err_set;
0287 u16 disparity_err;
0288 bool disparity_err_set;
0289 u16 code_violation_err;
0290 bool code_violation_err_set;
0291 u32 flogi_param[4];
0292 bool flogi_param_set[4];
0293 struct qed_mfw_tlv_time flogi_tstamp;
0294 u32 flogi_acc_param[4];
0295 bool flogi_acc_param_set[4];
0296 struct qed_mfw_tlv_time flogi_acc_tstamp;
0297 u32 flogi_rjt;
0298 bool flogi_rjt_set;
0299 struct qed_mfw_tlv_time flogi_rjt_tstamp;
0300 u32 fdiscs;
0301 bool fdiscs_set;
0302 u8 fdisc_acc;
0303 bool fdisc_acc_set;
0304 u8 fdisc_rjt;
0305 bool fdisc_rjt_set;
0306 u8 plogi;
0307 bool plogi_set;
0308 u8 plogi_acc;
0309 bool plogi_acc_set;
0310 u8 plogi_rjt;
0311 bool plogi_rjt_set;
0312 u32 plogi_dst_fcid[5];
0313 bool plogi_dst_fcid_set[5];
0314 struct qed_mfw_tlv_time plogi_tstamp[5];
0315 u32 plogi_acc_src_fcid[5];
0316 bool plogi_acc_src_fcid_set[5];
0317 struct qed_mfw_tlv_time plogi_acc_tstamp[5];
0318 u8 tx_plogos;
0319 bool tx_plogos_set;
0320 u8 plogo_acc;
0321 bool plogo_acc_set;
0322 u8 plogo_rjt;
0323 bool plogo_rjt_set;
0324 u32 plogo_src_fcid[5];
0325 bool plogo_src_fcid_set[5];
0326 struct qed_mfw_tlv_time plogo_tstamp[5];
0327 u8 rx_logos;
0328 bool rx_logos_set;
0329 u8 tx_accs;
0330 bool tx_accs_set;
0331 u8 tx_prlis;
0332 bool tx_prlis_set;
0333 u8 rx_accs;
0334 bool rx_accs_set;
0335 u8 tx_abts;
0336 bool tx_abts_set;
0337 u8 rx_abts_acc;
0338 bool rx_abts_acc_set;
0339 u8 rx_abts_rjt;
0340 bool rx_abts_rjt_set;
0341 u32 abts_dst_fcid[5];
0342 bool abts_dst_fcid_set[5];
0343 struct qed_mfw_tlv_time abts_tstamp[5];
0344 u8 rx_rscn;
0345 bool rx_rscn_set;
0346 u32 rx_rscn_nport[4];
0347 bool rx_rscn_nport_set[4];
0348 u8 tx_lun_rst;
0349 bool tx_lun_rst_set;
0350 u8 abort_task_sets;
0351 bool abort_task_sets_set;
0352 u8 tx_tprlos;
0353 bool tx_tprlos_set;
0354 u8 tx_nos;
0355 bool tx_nos_set;
0356 u8 rx_nos;
0357 bool rx_nos_set;
0358 u8 ols;
0359 bool ols_set;
0360 u8 lr;
0361 bool lr_set;
0362 u8 lrr;
0363 bool lrr_set;
0364 u8 tx_lip;
0365 bool tx_lip_set;
0366 u8 rx_lip;
0367 bool rx_lip_set;
0368 u8 eofa;
0369 bool eofa_set;
0370 u8 eofni;
0371 bool eofni_set;
0372 u8 scsi_chks;
0373 bool scsi_chks_set;
0374 u8 scsi_cond_met;
0375 bool scsi_cond_met_set;
0376 u8 scsi_busy;
0377 bool scsi_busy_set;
0378 u8 scsi_inter;
0379 bool scsi_inter_set;
0380 u8 scsi_inter_cond_met;
0381 bool scsi_inter_cond_met_set;
0382 u8 scsi_rsv_conflicts;
0383 bool scsi_rsv_conflicts_set;
0384 u8 scsi_tsk_full;
0385 bool scsi_tsk_full_set;
0386 u8 scsi_aca_active;
0387 bool scsi_aca_active_set;
0388 u8 scsi_tsk_abort;
0389 bool scsi_tsk_abort_set;
0390 u32 scsi_rx_chk[5];
0391 bool scsi_rx_chk_set[5];
0392 struct qed_mfw_tlv_time scsi_chk_tstamp[5];
0393 };
0394
0395 struct qed_mfw_tlv_iscsi {
0396 u8 target_llmnr;
0397 bool target_llmnr_set;
0398 u8 header_digest;
0399 bool header_digest_set;
0400 u8 data_digest;
0401 bool data_digest_set;
0402 u8 auth_method;
0403 #define QED_MFW_TLV_AUTH_METHOD_NONE (1)
0404 #define QED_MFW_TLV_AUTH_METHOD_CHAP (2)
0405 #define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3)
0406 bool auth_method_set;
0407 u16 boot_taget_portal;
0408 bool boot_taget_portal_set;
0409 u16 frame_size;
0410 bool frame_size_set;
0411 u16 tx_desc_size;
0412 bool tx_desc_size_set;
0413 u16 rx_desc_size;
0414 bool rx_desc_size_set;
0415 u8 boot_progress;
0416 bool boot_progress_set;
0417 u16 tx_desc_qdepth;
0418 bool tx_desc_qdepth_set;
0419 u16 rx_desc_qdepth;
0420 bool rx_desc_qdepth_set;
0421 u64 rx_frames;
0422 bool rx_frames_set;
0423 u64 rx_bytes;
0424 bool rx_bytes_set;
0425 u64 tx_frames;
0426 bool tx_frames_set;
0427 u64 tx_bytes;
0428 bool tx_bytes_set;
0429 };
0430
0431 enum qed_db_rec_width {
0432 DB_REC_WIDTH_32B,
0433 DB_REC_WIDTH_64B,
0434 };
0435
0436 enum qed_db_rec_space {
0437 DB_REC_KERNEL,
0438 DB_REC_USER,
0439 };
0440
0441 #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
0442 (void __iomem *)(reg_addr))
0443
0444 #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
0445
0446 #define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
0447 (void __iomem *)(reg_addr))
0448
0449 #define QED_COALESCE_MAX 0x1FF
0450 #define QED_DEFAULT_RX_USECS 12
0451 #define QED_DEFAULT_TX_USECS 48
0452
0453
0454 struct qed_dev;
0455
0456 struct qed_eth_pf_params {
0457
0458
0459
0460
0461 u16 num_cons;
0462
0463
0464 u8 num_vf_cons;
0465 #define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
0466
0467
0468
0469
0470
0471 u32 num_arfs_filters;
0472 };
0473
0474 struct qed_fcoe_pf_params {
0475
0476 u64 glbl_q_params_addr;
0477 u64 bdq_pbl_base_addr[2];
0478
0479
0480
0481
0482
0483 u16 num_cons;
0484 u16 num_tasks;
0485
0486
0487 u16 sq_num_pbl_pages;
0488
0489 u16 cq_num_entries;
0490 u16 cmdq_num_entries;
0491 u16 rq_buffer_log_size;
0492 u16 mtu;
0493 u16 dummy_icid;
0494 u16 bdq_xoff_threshold[2];
0495 u16 bdq_xon_threshold[2];
0496 u16 rq_buffer_size;
0497 u8 num_cqs;
0498 u8 log_page_size;
0499 u8 gl_rq_pi;
0500 u8 gl_cmd_pi;
0501 u8 debug_mode;
0502 u8 is_target;
0503 u8 bdq_pbl_num_entries[2];
0504 };
0505
0506
0507 struct qed_iscsi_pf_params {
0508 u64 glbl_q_params_addr;
0509 u64 bdq_pbl_base_addr[3];
0510 u16 cq_num_entries;
0511 u16 cmdq_num_entries;
0512 u32 two_msl_timer;
0513 u16 tx_sws_timer;
0514
0515
0516
0517
0518
0519 u16 num_cons;
0520 u16 num_tasks;
0521
0522
0523 u16 half_way_close_timeout;
0524 u16 bdq_xoff_threshold[3];
0525 u16 bdq_xon_threshold[3];
0526 u16 cmdq_xoff_threshold;
0527 u16 cmdq_xon_threshold;
0528 u16 rq_buffer_size;
0529
0530 u8 num_sq_pages_in_ring;
0531 u8 num_r2tq_pages_in_ring;
0532 u8 num_uhq_pages_in_ring;
0533 u8 num_queues;
0534 u8 log_page_size;
0535 u8 rqe_log_size;
0536 u8 max_fin_rt;
0537 u8 gl_rq_pi;
0538 u8 gl_cmd_pi;
0539 u8 debug_mode;
0540 u8 ll2_ooo_queue_id;
0541
0542 u8 is_target;
0543 u8 is_soc_en;
0544 u8 soc_num_of_blocks_log;
0545 u8 bdq_pbl_num_entries[3];
0546 };
0547
0548 struct qed_nvmetcp_pf_params {
0549 u64 glbl_q_params_addr;
0550 u16 cq_num_entries;
0551 u16 num_cons;
0552 u16 num_tasks;
0553 u8 num_sq_pages_in_ring;
0554 u8 num_r2tq_pages_in_ring;
0555 u8 num_uhq_pages_in_ring;
0556 u8 num_queues;
0557 u8 gl_rq_pi;
0558 u8 gl_cmd_pi;
0559 u8 debug_mode;
0560 u8 ll2_ooo_queue_id;
0561 u16 min_rto;
0562 };
0563
0564 struct qed_rdma_pf_params {
0565
0566
0567
0568 u32 min_dpis;
0569 u32 num_qps;
0570 u32 num_srqs;
0571 u8 roce_edpm_mode;
0572 u8 gl_pi;
0573
0574
0575 u8 enable_dcqcn;
0576 };
0577
0578 struct qed_pf_params {
0579 struct qed_eth_pf_params eth_pf_params;
0580 struct qed_fcoe_pf_params fcoe_pf_params;
0581 struct qed_iscsi_pf_params iscsi_pf_params;
0582 struct qed_nvmetcp_pf_params nvmetcp_pf_params;
0583 struct qed_rdma_pf_params rdma_pf_params;
0584 };
0585
0586 enum qed_int_mode {
0587 QED_INT_MODE_INTA,
0588 QED_INT_MODE_MSIX,
0589 QED_INT_MODE_MSI,
0590 QED_INT_MODE_POLL,
0591 };
0592
0593 struct qed_sb_info {
0594 struct status_block *sb_virt;
0595 dma_addr_t sb_phys;
0596 u32 sb_ack;
0597 u16 igu_sb_id;
0598 void __iomem *igu_addr;
0599 u8 flags;
0600 #define QED_SB_INFO_INIT 0x1
0601 #define QED_SB_INFO_SETUP 0x2
0602
0603 struct qed_dev *cdev;
0604 };
0605
0606 enum qed_hw_err_type {
0607 QED_HW_ERR_FAN_FAIL,
0608 QED_HW_ERR_MFW_RESP_FAIL,
0609 QED_HW_ERR_HW_ATTN,
0610 QED_HW_ERR_DMAE_FAIL,
0611 QED_HW_ERR_RAMROD_FAIL,
0612 QED_HW_ERR_FW_ASSERT,
0613 QED_HW_ERR_LAST,
0614 };
0615
0616 enum qed_dev_type {
0617 QED_DEV_TYPE_BB,
0618 QED_DEV_TYPE_AH,
0619 };
0620
0621 struct qed_dev_info {
0622 unsigned long pci_mem_start;
0623 unsigned long pci_mem_end;
0624 unsigned int pci_irq;
0625 u8 num_hwfns;
0626
0627 u8 hw_mac[ETH_ALEN];
0628
0629
0630 u16 fw_major;
0631 u16 fw_minor;
0632 u16 fw_rev;
0633 u16 fw_eng;
0634
0635
0636 u32 mfw_rev;
0637 #define QED_MFW_VERSION_0_MASK 0x000000FF
0638 #define QED_MFW_VERSION_0_OFFSET 0
0639 #define QED_MFW_VERSION_1_MASK 0x0000FF00
0640 #define QED_MFW_VERSION_1_OFFSET 8
0641 #define QED_MFW_VERSION_2_MASK 0x00FF0000
0642 #define QED_MFW_VERSION_2_OFFSET 16
0643 #define QED_MFW_VERSION_3_MASK 0xFF000000
0644 #define QED_MFW_VERSION_3_OFFSET 24
0645
0646 u32 flash_size;
0647 bool b_arfs_capable;
0648 bool b_inter_pf_switch;
0649 bool tx_switching;
0650 bool rdma_supported;
0651 u16 mtu;
0652
0653 bool wol_support;
0654 bool smart_an;
0655 bool esl;
0656
0657
0658 u32 mbi_version;
0659 #define QED_MBI_VERSION_0_MASK 0x000000FF
0660 #define QED_MBI_VERSION_0_OFFSET 0
0661 #define QED_MBI_VERSION_1_MASK 0x0000FF00
0662 #define QED_MBI_VERSION_1_OFFSET 8
0663 #define QED_MBI_VERSION_2_MASK 0x00FF0000
0664 #define QED_MBI_VERSION_2_OFFSET 16
0665
0666 enum qed_dev_type dev_type;
0667
0668
0669 bool vxlan_enable;
0670 bool gre_enable;
0671 bool geneve_enable;
0672
0673 u8 abs_pf_id;
0674 };
0675
0676 enum qed_sb_type {
0677 QED_SB_TYPE_L2_QUEUE,
0678 QED_SB_TYPE_CNQ,
0679 QED_SB_TYPE_STORAGE,
0680 };
0681
0682 enum qed_protocol {
0683 QED_PROTOCOL_ETH,
0684 QED_PROTOCOL_ISCSI,
0685 QED_PROTOCOL_NVMETCP = QED_PROTOCOL_ISCSI,
0686 QED_PROTOCOL_FCOE,
0687 };
0688
0689 enum qed_fec_mode {
0690 QED_FEC_MODE_NONE = BIT(0),
0691 QED_FEC_MODE_FIRECODE = BIT(1),
0692 QED_FEC_MODE_RS = BIT(2),
0693 QED_FEC_MODE_AUTO = BIT(3),
0694 QED_FEC_MODE_UNSUPPORTED = BIT(4),
0695 };
0696
0697 struct qed_link_params {
0698 bool link_up;
0699
0700 u32 override_flags;
0701 #define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
0702 #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
0703 #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
0704 #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
0705 #define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
0706 #define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
0707 #define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6)
0708
0709 bool autoneg;
0710 __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds);
0711 u32 forced_speed;
0712
0713 u32 pause_config;
0714 #define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
0715 #define QED_LINK_PAUSE_RX_ENABLE BIT(1)
0716 #define QED_LINK_PAUSE_TX_ENABLE BIT(2)
0717
0718 u32 loopback_mode;
0719 #define QED_LINK_LOOPBACK_NONE BIT(0)
0720 #define QED_LINK_LOOPBACK_INT_PHY BIT(1)
0721 #define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
0722 #define QED_LINK_LOOPBACK_EXT BIT(3)
0723 #define QED_LINK_LOOPBACK_MAC BIT(4)
0724 #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5)
0725 #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6)
0726 #define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7)
0727 #define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8)
0728 #define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9)
0729
0730 struct qed_link_eee_params eee;
0731 u32 fec;
0732 };
0733
0734 struct qed_link_output {
0735 bool link_up;
0736
0737 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps);
0738 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps);
0739 __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps);
0740
0741 u32 speed;
0742 u8 duplex;
0743 u8 port;
0744 bool autoneg;
0745 u32 pause_config;
0746
0747
0748 bool eee_supported;
0749 bool eee_active;
0750 u8 sup_caps;
0751 struct qed_link_eee_params eee;
0752
0753 u32 sup_fec;
0754 u32 active_fec;
0755 };
0756
0757 struct qed_probe_params {
0758 enum qed_protocol protocol;
0759 u32 dp_module;
0760 u8 dp_level;
0761 bool is_vf;
0762 bool recov_in_prog;
0763 };
0764
0765 #define QED_DRV_VER_STR_SIZE 12
0766 struct qed_slowpath_params {
0767 u32 int_mode;
0768 u8 drv_major;
0769 u8 drv_minor;
0770 u8 drv_rev;
0771 u8 drv_eng;
0772 u8 name[QED_DRV_VER_STR_SIZE];
0773 };
0774
0775 #define ILT_PAGE_SIZE_TCFC 0x8000
0776
0777 struct qed_int_info {
0778 struct msix_entry *msix;
0779 u8 msix_cnt;
0780
0781
0782 u8 used_cnt;
0783 };
0784
0785 struct qed_generic_tlvs {
0786 #define QED_TLV_IP_CSUM BIT(0)
0787 #define QED_TLV_LSO BIT(1)
0788 u16 feat_flags;
0789 #define QED_TLV_MAC_COUNT 3
0790 u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
0791 };
0792
0793 #define QED_I2C_DEV_ADDR_A0 0xA0
0794 #define QED_I2C_DEV_ADDR_A2 0xA2
0795
0796 #define QED_NVM_SIGNATURE 0x12435687
0797
0798 enum qed_nvm_flash_cmd {
0799 QED_NVM_FLASH_CMD_FILE_DATA = 0x2,
0800 QED_NVM_FLASH_CMD_FILE_START = 0x3,
0801 QED_NVM_FLASH_CMD_NVM_CHANGE = 0x4,
0802 QED_NVM_FLASH_CMD_NVM_CFG_ID = 0x5,
0803 QED_NVM_FLASH_CMD_NVM_MAX,
0804 };
0805
0806 struct qed_devlink {
0807 struct qed_dev *cdev;
0808 struct devlink_health_reporter *fw_reporter;
0809 };
0810
0811 struct qed_sb_info_dbg {
0812 u32 igu_prod;
0813 u32 igu_cons;
0814 u16 pi[PIS_PER_SB];
0815 };
0816
0817 struct qed_common_cb_ops {
0818 void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
0819 void (*link_update)(void *dev, struct qed_link_output *link);
0820 void (*schedule_recovery_handler)(void *dev);
0821 void (*schedule_hw_err_handler)(void *dev,
0822 enum qed_hw_err_type err_type);
0823 void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
0824 void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
0825 void (*get_protocol_tlv_data)(void *dev, void *data);
0826 void (*bw_update)(void *dev);
0827 };
0828
0829 struct qed_selftest_ops {
0830
0831
0832
0833
0834
0835
0836
0837 int (*selftest_interrupt)(struct qed_dev *cdev);
0838
0839
0840
0841
0842
0843
0844
0845
0846 int (*selftest_memory)(struct qed_dev *cdev);
0847
0848
0849
0850
0851
0852
0853
0854
0855 int (*selftest_register)(struct qed_dev *cdev);
0856
0857
0858
0859
0860
0861
0862
0863
0864 int (*selftest_clock)(struct qed_dev *cdev);
0865
0866
0867
0868
0869
0870
0871
0872
0873 int (*selftest_nvram) (struct qed_dev *cdev);
0874 };
0875
0876 struct qed_common_ops {
0877 struct qed_selftest_ops *selftest;
0878
0879 struct qed_dev* (*probe)(struct pci_dev *dev,
0880 struct qed_probe_params *params);
0881
0882 void (*remove)(struct qed_dev *cdev);
0883
0884 int (*set_power_state)(struct qed_dev *cdev, pci_power_t state);
0885
0886 void (*set_name) (struct qed_dev *cdev, char name[]);
0887
0888
0889
0890
0891
0892 void (*update_pf_params)(struct qed_dev *cdev,
0893 struct qed_pf_params *params);
0894
0895 int (*slowpath_start)(struct qed_dev *cdev,
0896 struct qed_slowpath_params *params);
0897
0898 int (*slowpath_stop)(struct qed_dev *cdev);
0899
0900
0901
0902
0903 int (*set_fp_int)(struct qed_dev *cdev, u16 cnt);
0904
0905
0906 int (*get_fp_int)(struct qed_dev *cdev, struct qed_int_info *info);
0907
0908 u32 (*sb_init)(struct qed_dev *cdev,
0909 struct qed_sb_info *sb_info,
0910 void *sb_virt_addr,
0911 dma_addr_t sb_phy_addr,
0912 u16 sb_id,
0913 enum qed_sb_type type);
0914
0915 u32 (*sb_release)(struct qed_dev *cdev,
0916 struct qed_sb_info *sb_info,
0917 u16 sb_id,
0918 enum qed_sb_type type);
0919
0920 void (*simd_handler_config)(struct qed_dev *cdev,
0921 void *token,
0922 int index,
0923 void (*handler)(void *));
0924
0925 void (*simd_handler_clean)(struct qed_dev *cdev, int index);
0926
0927 int (*dbg_grc)(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
0928
0929 int (*dbg_grc_size)(struct qed_dev *cdev);
0930
0931 int (*dbg_all_data)(struct qed_dev *cdev, void *buffer);
0932
0933 int (*dbg_all_data_size)(struct qed_dev *cdev);
0934
0935 int (*report_fatal_error)(struct devlink *devlink,
0936 enum qed_hw_err_type err_type);
0937
0938
0939
0940
0941
0942
0943
0944
0945 bool (*can_link_change)(struct qed_dev *cdev);
0946
0947
0948
0949
0950
0951
0952
0953
0954
0955 int (*set_link)(struct qed_dev *cdev,
0956 struct qed_link_params *params);
0957
0958
0959
0960
0961
0962
0963
0964
0965
0966 void (*get_link)(struct qed_dev *cdev,
0967 struct qed_link_output *if_link);
0968
0969
0970
0971
0972
0973
0974
0975
0976 int (*drain)(struct qed_dev *cdev);
0977
0978
0979
0980
0981
0982
0983
0984
0985
0986
0987 void (*update_msglvl)(struct qed_dev *cdev,
0988 u32 dp_module,
0989 u8 dp_level);
0990
0991 int (*chain_alloc)(struct qed_dev *cdev,
0992 struct qed_chain *chain,
0993 struct qed_chain_init_params *params);
0994
0995 void (*chain_free)(struct qed_dev *cdev,
0996 struct qed_chain *p_chain);
0997
0998
0999
1000
1001
1002
1003
1004
1005
1006 int (*nvm_flash)(struct qed_dev *cdev, const char *name);
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018 int (*nvm_get_image)(struct qed_dev *cdev,
1019 enum qed_nvm_images type, u8 *buf, u16 len);
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031 int (*set_coalesce)(struct qed_dev *cdev,
1032 u16 rx_coal, u16 tx_coal, void *handle);
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042 int (*set_led)(struct qed_dev *cdev,
1043 enum qed_led_mode mode);
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053 void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable);
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067 int (*db_recovery_add)(struct qed_dev *cdev,
1068 void __iomem *db_addr,
1069 void *db_data,
1070 enum qed_db_rec_width db_width,
1071 enum qed_db_rec_space db_space);
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084 int (*db_recovery_del)(struct qed_dev *cdev,
1085 void __iomem *db_addr, void *db_data);
1086
1087
1088
1089
1090
1091
1092
1093
1094 int (*recovery_process)(struct qed_dev *cdev);
1095
1096
1097
1098
1099
1100
1101
1102
1103 int (*recovery_prolog)(struct qed_dev *cdev);
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113 int (*update_drv_state)(struct qed_dev *cdev, bool active);
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 int (*update_mac)(struct qed_dev *cdev, const u8 *mac);
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 int (*update_mtu)(struct qed_dev *cdev, u16 mtu);
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143 int (*update_wol) (struct qed_dev *cdev, bool enabled);
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156 int (*read_module_eeprom)(struct qed_dev *cdev,
1157 char *buf, u8 dev_addr, u32 offset, u32 len);
1158
1159
1160
1161
1162
1163
1164
1165
1166 u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev);
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd,
1179 u32 entity_id);
1180
1181
1182
1183
1184
1185
1186
1187
1188 int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd);
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199 int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
1200
1201 struct devlink* (*devlink_register)(struct qed_dev *cdev);
1202
1203 void (*devlink_unregister)(struct devlink *devlink);
1204
1205 __printf(2, 3) void (*mfw_report)(struct qed_dev *cdev, char *fmt, ...);
1206
1207 int (*get_sb_info)(struct qed_dev *cdev, struct qed_sb_info *sb,
1208 u16 qid, struct qed_sb_info_dbg *sb_dbg);
1209
1210 int (*get_esl_status)(struct qed_dev *cdev, bool *esl_active);
1211 };
1212
1213 #define MASK_FIELD(_name, _value) \
1214 ((_value) &= (_name ## _MASK))
1215
1216 #define FIELD_VALUE(_name, _value) \
1217 ((_value & _name ## _MASK) << _name ## _SHIFT)
1218
1219 #define SET_FIELD(value, name, flag) \
1220 do { \
1221 (value) &= ~(name ## _MASK << name ## _SHIFT); \
1222 (value) |= (((u64)flag) << (name ## _SHIFT)); \
1223 } while (0)
1224
1225 #define GET_FIELD(value, name) \
1226 (((value) >> (name ## _SHIFT)) & name ## _MASK)
1227
1228 #define GET_MFW_FIELD(name, field) \
1229 (((name) & (field ## _MASK)) >> (field ## _OFFSET))
1230
1231 #define SET_MFW_FIELD(name, field, value) \
1232 do { \
1233 (name) &= ~(field ## _MASK); \
1234 (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
1235 } while (0)
1236
1237 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
1238
1239
1240 #define DP_ERR(cdev, fmt, ...) \
1241 do { \
1242 pr_err("[%s:%d(%s)]" fmt, \
1243 __func__, __LINE__, \
1244 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1245 ## __VA_ARGS__); \
1246 } while (0)
1247
1248 #define DP_NOTICE(cdev, fmt, ...) \
1249 do { \
1250 if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
1251 pr_notice("[%s:%d(%s)]" fmt, \
1252 __func__, __LINE__, \
1253 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1254 ## __VA_ARGS__); \
1255 \
1256 } \
1257 } while (0)
1258
1259 #define DP_INFO(cdev, fmt, ...) \
1260 do { \
1261 if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \
1262 pr_notice("[%s:%d(%s)]" fmt, \
1263 __func__, __LINE__, \
1264 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1265 ## __VA_ARGS__); \
1266 } \
1267 } while (0)
1268
1269 #define DP_VERBOSE(cdev, module, fmt, ...) \
1270 do { \
1271 if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
1272 ((cdev)->dp_module & module))) { \
1273 pr_notice("[%s:%d(%s)]" fmt, \
1274 __func__, __LINE__, \
1275 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1276 ## __VA_ARGS__); \
1277 } \
1278 } while (0)
1279
1280 enum DP_LEVEL {
1281 QED_LEVEL_VERBOSE = 0x0,
1282 QED_LEVEL_INFO = 0x1,
1283 QED_LEVEL_NOTICE = 0x2,
1284 QED_LEVEL_ERR = 0x3,
1285 };
1286
1287 #define QED_LOG_LEVEL_SHIFT (30)
1288 #define QED_LOG_VERBOSE_MASK (0x3fffffff)
1289 #define QED_LOG_INFO_MASK (0x40000000)
1290 #define QED_LOG_NOTICE_MASK (0x80000000)
1291
1292 enum DP_MODULE {
1293 QED_MSG_SPQ = 0x10000,
1294 QED_MSG_STATS = 0x20000,
1295 QED_MSG_DCB = 0x40000,
1296 QED_MSG_IOV = 0x80000,
1297 QED_MSG_SP = 0x100000,
1298 QED_MSG_STORAGE = 0x200000,
1299 QED_MSG_CXT = 0x800000,
1300 QED_MSG_LL2 = 0x1000000,
1301 QED_MSG_ILT = 0x2000000,
1302 QED_MSG_RDMA = 0x4000000,
1303 QED_MSG_DEBUG = 0x8000000,
1304
1305 };
1306
1307 enum qed_mf_mode {
1308 QED_MF_DEFAULT,
1309 QED_MF_OVLAN,
1310 QED_MF_NPAR,
1311 };
1312
1313 struct qed_eth_stats_common {
1314 u64 no_buff_discards;
1315 u64 packet_too_big_discard;
1316 u64 ttl0_discard;
1317 u64 rx_ucast_bytes;
1318 u64 rx_mcast_bytes;
1319 u64 rx_bcast_bytes;
1320 u64 rx_ucast_pkts;
1321 u64 rx_mcast_pkts;
1322 u64 rx_bcast_pkts;
1323 u64 mftag_filter_discards;
1324 u64 mac_filter_discards;
1325 u64 gft_filter_drop;
1326 u64 tx_ucast_bytes;
1327 u64 tx_mcast_bytes;
1328 u64 tx_bcast_bytes;
1329 u64 tx_ucast_pkts;
1330 u64 tx_mcast_pkts;
1331 u64 tx_bcast_pkts;
1332 u64 tx_err_drop_pkts;
1333 u64 tpa_coalesced_pkts;
1334 u64 tpa_coalesced_events;
1335 u64 tpa_aborts_num;
1336 u64 tpa_not_coalesced_pkts;
1337 u64 tpa_coalesced_bytes;
1338
1339
1340 u64 rx_64_byte_packets;
1341 u64 rx_65_to_127_byte_packets;
1342 u64 rx_128_to_255_byte_packets;
1343 u64 rx_256_to_511_byte_packets;
1344 u64 rx_512_to_1023_byte_packets;
1345 u64 rx_1024_to_1518_byte_packets;
1346 u64 rx_crc_errors;
1347 u64 rx_mac_crtl_frames;
1348 u64 rx_pause_frames;
1349 u64 rx_pfc_frames;
1350 u64 rx_align_errors;
1351 u64 rx_carrier_errors;
1352 u64 rx_oversize_packets;
1353 u64 rx_jabbers;
1354 u64 rx_undersize_packets;
1355 u64 rx_fragments;
1356 u64 tx_64_byte_packets;
1357 u64 tx_65_to_127_byte_packets;
1358 u64 tx_128_to_255_byte_packets;
1359 u64 tx_256_to_511_byte_packets;
1360 u64 tx_512_to_1023_byte_packets;
1361 u64 tx_1024_to_1518_byte_packets;
1362 u64 tx_pause_frames;
1363 u64 tx_pfc_frames;
1364 u64 brb_truncates;
1365 u64 brb_discards;
1366 u64 rx_mac_bytes;
1367 u64 rx_mac_uc_packets;
1368 u64 rx_mac_mc_packets;
1369 u64 rx_mac_bc_packets;
1370 u64 rx_mac_frames_ok;
1371 u64 tx_mac_bytes;
1372 u64 tx_mac_uc_packets;
1373 u64 tx_mac_mc_packets;
1374 u64 tx_mac_bc_packets;
1375 u64 tx_mac_ctrl_frames;
1376 u64 link_change_count;
1377 };
1378
1379 struct qed_eth_stats_bb {
1380 u64 rx_1519_to_1522_byte_packets;
1381 u64 rx_1519_to_2047_byte_packets;
1382 u64 rx_2048_to_4095_byte_packets;
1383 u64 rx_4096_to_9216_byte_packets;
1384 u64 rx_9217_to_16383_byte_packets;
1385 u64 tx_1519_to_2047_byte_packets;
1386 u64 tx_2048_to_4095_byte_packets;
1387 u64 tx_4096_to_9216_byte_packets;
1388 u64 tx_9217_to_16383_byte_packets;
1389 u64 tx_lpi_entry_count;
1390 u64 tx_total_collisions;
1391 };
1392
1393 struct qed_eth_stats_ah {
1394 u64 rx_1519_to_max_byte_packets;
1395 u64 tx_1519_to_max_byte_packets;
1396 };
1397
1398 struct qed_eth_stats {
1399 struct qed_eth_stats_common common;
1400
1401 union {
1402 struct qed_eth_stats_bb bb;
1403 struct qed_eth_stats_ah ah;
1404 };
1405 };
1406
1407 #define QED_SB_IDX 0x0002
1408
1409 #define RX_PI 0
1410 #define TX_PI(tc) (RX_PI + 1 + tc)
1411
1412 struct qed_sb_cnt_info {
1413
1414 int orig;
1415 int cnt;
1416 int free_cnt;
1417
1418
1419 int iov_orig;
1420 int iov_cnt;
1421 int free_cnt_iov;
1422 };
1423
1424 static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info)
1425 {
1426 u32 prod = 0;
1427 u16 rc = 0;
1428
1429 prod = le32_to_cpu(sb_info->sb_virt->prod_index) &
1430 STATUS_BLOCK_PROD_INDEX_MASK;
1431 if (sb_info->sb_ack != prod) {
1432 sb_info->sb_ack = prod;
1433 rc |= QED_SB_IDX;
1434 }
1435
1436
1437 return rc;
1438 }
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452 static inline void qed_sb_ack(struct qed_sb_info *sb_info,
1453 enum igu_int_cmd int_cmd,
1454 u8 upd_flg)
1455 {
1456 u32 igu_ack;
1457
1458 igu_ack = ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1459 (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1460 (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1461 (IGU_SEG_ACCESS_REG <<
1462 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1463
1464 DIRECT_REG_WR(sb_info->igu_addr, igu_ack);
1465
1466
1467
1468
1469 barrier();
1470 }
1471
1472 static inline void __internal_ram_wr(void *p_hwfn,
1473 void __iomem *addr,
1474 int size,
1475 u32 *data)
1476
1477 {
1478 unsigned int i;
1479
1480 for (i = 0; i < size / sizeof(*data); i++)
1481 DIRECT_REG_WR(&((u32 __iomem *)addr)[i], data[i]);
1482 }
1483
1484 static inline void internal_ram_wr(void __iomem *addr,
1485 int size,
1486 u32 *data)
1487 {
1488 __internal_ram_wr(NULL, addr, size, data);
1489 }
1490
1491 enum qed_rss_caps {
1492 QED_RSS_IPV4 = 0x1,
1493 QED_RSS_IPV6 = 0x2,
1494 QED_RSS_IPV4_TCP = 0x4,
1495 QED_RSS_IPV6_TCP = 0x8,
1496 QED_RSS_IPV4_UDP = 0x10,
1497 QED_RSS_IPV6_UDP = 0x20,
1498 };
1499
1500 #define QED_RSS_IND_TABLE_SIZE 128
1501 #define QED_RSS_KEY_SIZE 10
1502 #endif