0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 #ifndef __CXGB4_H__
0036 #define __CXGB4_H__
0037
0038 #include "t4_hw.h"
0039
0040 #include <linux/bitops.h>
0041 #include <linux/cache.h>
0042 #include <linux/ethtool.h>
0043 #include <linux/interrupt.h>
0044 #include <linux/list.h>
0045 #include <linux/netdevice.h>
0046 #include <linux/pci.h>
0047 #include <linux/spinlock.h>
0048 #include <linux/timer.h>
0049 #include <linux/vmalloc.h>
0050 #include <linux/rhashtable.h>
0051 #include <linux/etherdevice.h>
0052 #include <linux/net_tstamp.h>
0053 #include <linux/ptp_clock_kernel.h>
0054 #include <linux/ptp_classify.h>
0055 #include <linux/crash_dump.h>
0056 #include <linux/thermal.h>
0057 #include <asm/io.h>
0058 #include "t4_chip_type.h"
0059 #include "cxgb4_uld.h"
0060 #include "t4fw_api.h"
0061
0062 #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
0063 extern struct list_head adapter_list;
0064 extern struct list_head uld_list;
0065 extern struct mutex uld_mutex;
0066
0067
0068
0069
0070
0071 #define ETHTXQ_STOP_THRES \
0072 (1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
0073
0074 #define FW_PARAM_DEV(param) \
0075 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
0076 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
0077
0078 #define FW_PARAM_PFVF(param) \
0079 (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
0080 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
0081 FW_PARAMS_PARAM_Y_V(0) | \
0082 FW_PARAMS_PARAM_Z_V(0))
0083
0084 enum {
0085 MAX_NPORTS = 4,
0086 SERNUM_LEN = 24,
0087 ID_LEN = 16,
0088 PN_LEN = 16,
0089 MACADDR_LEN = 12,
0090 };
0091
0092 enum {
0093 T4_REGMAP_SIZE = (160 * 1024),
0094 T5_REGMAP_SIZE = (332 * 1024),
0095 };
0096
0097 enum {
0098 MEM_EDC0,
0099 MEM_EDC1,
0100 MEM_MC,
0101 MEM_MC0 = MEM_MC,
0102 MEM_MC1,
0103 MEM_HMA,
0104 };
0105
0106 enum {
0107 MEMWIN0_APERTURE = 2048,
0108 MEMWIN0_BASE = 0x1b800,
0109 MEMWIN1_APERTURE = 32768,
0110 MEMWIN1_BASE = 0x28000,
0111 MEMWIN1_BASE_T5 = 0x52000,
0112 MEMWIN2_APERTURE = 65536,
0113 MEMWIN2_BASE = 0x30000,
0114 MEMWIN2_APERTURE_T5 = 131072,
0115 MEMWIN2_BASE_T5 = 0x60000,
0116 };
0117
0118 enum dev_master {
0119 MASTER_CANT,
0120 MASTER_MAY,
0121 MASTER_MUST
0122 };
0123
0124 enum dev_state {
0125 DEV_STATE_UNINIT,
0126 DEV_STATE_INIT,
0127 DEV_STATE_ERR
0128 };
0129
0130 enum cc_pause {
0131 PAUSE_RX = 1 << 0,
0132 PAUSE_TX = 1 << 1,
0133 PAUSE_AUTONEG = 1 << 2
0134 };
0135
0136 enum cc_fec {
0137 FEC_AUTO = 1 << 0,
0138 FEC_RS = 1 << 1,
0139 FEC_BASER_RS = 1 << 2
0140 };
0141
0142 enum {
0143 CXGB4_ETHTOOL_FLASH_FW = 1,
0144 CXGB4_ETHTOOL_FLASH_PHY = 2,
0145 CXGB4_ETHTOOL_FLASH_BOOT = 3,
0146 CXGB4_ETHTOOL_FLASH_BOOTCFG = 4
0147 };
0148
0149 enum cxgb4_netdev_tls_ops {
0150 CXGB4_TLSDEV_OPS = 1,
0151 CXGB4_XFRMDEV_OPS
0152 };
0153
0154 struct cxgb4_bootcfg_data {
0155 __le16 signature;
0156 __u8 reserved[2];
0157 };
0158
0159 struct cxgb4_pcir_data {
0160 __le32 signature;
0161 __le16 vendor_id;
0162 __le16 device_id;
0163 __u8 vital_product[2];
0164 __u8 length[2];
0165 __u8 revision;
0166 __u8 class_code[3];
0167 __u8 image_length[2];
0168 __u8 code_revision[2];
0169 __u8 code_type;
0170 __u8 indicator;
0171 __u8 reserved[2];
0172 };
0173
0174
0175 struct cxgb4_pci_exp_rom_header {
0176 __le16 signature;
0177 __u8 reserved[22];
0178 __le16 pcir_offset;
0179 };
0180
0181
0182 struct legacy_pci_rom_hdr {
0183 __u8 signature[2];
0184 __u8 size512;
0185 __u8 initentry_point[4];
0186 __u8 cksum;
0187 __u8 reserved[16];
0188 __le16 pcir_offset;
0189 };
0190
0191 #define CXGB4_HDR_CODE1 0x00
0192 #define CXGB4_HDR_CODE2 0x03
0193 #define CXGB4_HDR_INDI 0x80
0194
0195
0196 enum {
0197 BOOT_CFG_SIG = 0x4243,
0198 BOOT_SIZE_INC = 512,
0199 BOOT_SIGNATURE = 0xaa55,
0200 BOOT_MIN_SIZE = sizeof(struct cxgb4_pci_exp_rom_header),
0201 BOOT_MAX_SIZE = 1024 * BOOT_SIZE_INC,
0202 PCIR_SIGNATURE = 0x52494350
0203 };
0204
0205 struct port_stats {
0206 u64 tx_octets;
0207 u64 tx_frames;
0208 u64 tx_bcast_frames;
0209 u64 tx_mcast_frames;
0210 u64 tx_ucast_frames;
0211 u64 tx_error_frames;
0212
0213 u64 tx_frames_64;
0214 u64 tx_frames_65_127;
0215 u64 tx_frames_128_255;
0216 u64 tx_frames_256_511;
0217 u64 tx_frames_512_1023;
0218 u64 tx_frames_1024_1518;
0219 u64 tx_frames_1519_max;
0220
0221 u64 tx_drop;
0222 u64 tx_pause;
0223 u64 tx_ppp0;
0224 u64 tx_ppp1;
0225 u64 tx_ppp2;
0226 u64 tx_ppp3;
0227 u64 tx_ppp4;
0228 u64 tx_ppp5;
0229 u64 tx_ppp6;
0230 u64 tx_ppp7;
0231
0232 u64 rx_octets;
0233 u64 rx_frames;
0234 u64 rx_bcast_frames;
0235 u64 rx_mcast_frames;
0236 u64 rx_ucast_frames;
0237 u64 rx_too_long;
0238 u64 rx_jabber;
0239 u64 rx_fcs_err;
0240 u64 rx_len_err;
0241 u64 rx_symbol_err;
0242 u64 rx_runt;
0243
0244 u64 rx_frames_64;
0245 u64 rx_frames_65_127;
0246 u64 rx_frames_128_255;
0247 u64 rx_frames_256_511;
0248 u64 rx_frames_512_1023;
0249 u64 rx_frames_1024_1518;
0250 u64 rx_frames_1519_max;
0251
0252 u64 rx_pause;
0253 u64 rx_ppp0;
0254 u64 rx_ppp1;
0255 u64 rx_ppp2;
0256 u64 rx_ppp3;
0257 u64 rx_ppp4;
0258 u64 rx_ppp5;
0259 u64 rx_ppp6;
0260 u64 rx_ppp7;
0261
0262 u64 rx_ovflow0;
0263 u64 rx_ovflow1;
0264 u64 rx_ovflow2;
0265 u64 rx_ovflow3;
0266 u64 rx_trunc0;
0267 u64 rx_trunc1;
0268 u64 rx_trunc2;
0269 u64 rx_trunc3;
0270 };
0271
0272 struct lb_port_stats {
0273 u64 octets;
0274 u64 frames;
0275 u64 bcast_frames;
0276 u64 mcast_frames;
0277 u64 ucast_frames;
0278 u64 error_frames;
0279
0280 u64 frames_64;
0281 u64 frames_65_127;
0282 u64 frames_128_255;
0283 u64 frames_256_511;
0284 u64 frames_512_1023;
0285 u64 frames_1024_1518;
0286 u64 frames_1519_max;
0287
0288 u64 drop;
0289
0290 u64 ovflow0;
0291 u64 ovflow1;
0292 u64 ovflow2;
0293 u64 ovflow3;
0294 u64 trunc0;
0295 u64 trunc1;
0296 u64 trunc2;
0297 u64 trunc3;
0298 };
0299
0300 struct tp_tcp_stats {
0301 u32 tcp_out_rsts;
0302 u64 tcp_in_segs;
0303 u64 tcp_out_segs;
0304 u64 tcp_retrans_segs;
0305 };
0306
0307 struct tp_usm_stats {
0308 u32 frames;
0309 u32 drops;
0310 u64 octets;
0311 };
0312
0313 struct tp_fcoe_stats {
0314 u32 frames_ddp;
0315 u32 frames_drop;
0316 u64 octets_ddp;
0317 };
0318
0319 struct tp_err_stats {
0320 u32 mac_in_errs[4];
0321 u32 hdr_in_errs[4];
0322 u32 tcp_in_errs[4];
0323 u32 tnl_cong_drops[4];
0324 u32 ofld_chan_drops[4];
0325 u32 tnl_tx_drops[4];
0326 u32 ofld_vlan_drops[4];
0327 u32 tcp6_in_errs[4];
0328 u32 ofld_no_neigh;
0329 u32 ofld_cong_defer;
0330 };
0331
0332 struct tp_cpl_stats {
0333 u32 req[4];
0334 u32 rsp[4];
0335 };
0336
0337 struct tp_rdma_stats {
0338 u32 rqe_dfr_pkt;
0339 u32 rqe_dfr_mod;
0340 };
0341
0342 struct sge_params {
0343 u32 hps;
0344 u32 eq_qpp;
0345 u32 iq_qpp;
0346 };
0347
0348 struct tp_params {
0349 unsigned int tre;
0350 unsigned int la_mask;
0351 unsigned short tx_modq_map;
0352
0353
0354 uint32_t dack_re;
0355 unsigned short tx_modq[NCHAN];
0356
0357 u32 vlan_pri_map;
0358 u32 filter_mask;
0359 u32 ingress_config;
0360
0361
0362
0363
0364 int rx_pkt_encap;
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377 int fcoe_shift;
0378 int port_shift;
0379 int vnic_shift;
0380 int vlan_shift;
0381 int tos_shift;
0382 int protocol_shift;
0383 int ethertype_shift;
0384 int macmatch_shift;
0385 int matchtype_shift;
0386 int frag_shift;
0387
0388 u64 hash_filter_mask;
0389 };
0390
0391 struct vpd_params {
0392 unsigned int cclk;
0393 u8 sn[SERNUM_LEN + 1];
0394 u8 id[ID_LEN + 1];
0395 u8 pn[PN_LEN + 1];
0396 u8 na[MACADDR_LEN + 1];
0397 };
0398
0399
0400
0401 struct pf_resources {
0402 unsigned int nvi;
0403 unsigned int neq;
0404 unsigned int nethctrl;
0405 unsigned int niqflint;
0406 unsigned int niq;
0407 unsigned int tc;
0408 unsigned int pmask;
0409 unsigned int nexactf;
0410 unsigned int r_caps;
0411 unsigned int wx_caps;
0412 };
0413
0414 struct pci_params {
0415 unsigned char speed;
0416 unsigned char width;
0417 };
0418
0419 struct devlog_params {
0420 u32 memtype;
0421 u32 start;
0422 u32 size;
0423 };
0424
0425
0426 struct arch_specific_params {
0427 u8 nchan;
0428 u8 pm_stats_cnt;
0429 u8 cng_ch_bits_log;
0430 u16 mps_rplc_size;
0431 u16 vfcount;
0432 u32 sge_fl_db;
0433 u16 mps_tcam_size;
0434 };
0435
0436 struct adapter_params {
0437 struct sge_params sge;
0438 struct tp_params tp;
0439 struct vpd_params vpd;
0440 struct pf_resources pfres;
0441 struct pci_params pci;
0442 struct devlog_params devlog;
0443 enum pcie_memwin drv_memwin;
0444
0445 unsigned int cim_la_size;
0446
0447 unsigned int sf_size;
0448 unsigned int sf_nsec;
0449
0450 unsigned int fw_vers;
0451 unsigned int bs_vers;
0452 unsigned int tp_vers;
0453 unsigned int er_vers;
0454 unsigned int scfg_vers;
0455 unsigned int vpd_vers;
0456 u8 api_vers[7];
0457
0458 unsigned short mtus[NMTUS];
0459 unsigned short a_wnd[NCCTRL_WIN];
0460 unsigned short b_wnd[NCCTRL_WIN];
0461
0462 unsigned char nports;
0463 unsigned char portvec;
0464 enum chip_type chip;
0465 struct arch_specific_params arch;
0466 unsigned char offload;
0467 unsigned char crypto;
0468 unsigned char ethofld;
0469
0470 unsigned char bypass;
0471 unsigned char hash_filter;
0472
0473 unsigned int ofldq_wr_cred;
0474 bool ulptx_memwrite_dsgl;
0475
0476 unsigned int nsched_cls;
0477 unsigned int max_ordird_qp;
0478 unsigned int max_ird_adapter;
0479 bool fr_nsmr_tpte_wr_support;
0480 u8 fw_caps_support;
0481 bool filter2_wr_support;
0482 unsigned int viid_smt_extn_support:1;
0483
0484
0485
0486
0487 u8 mps_bg_map[MAX_NPORTS];
0488 bool write_w_imm_support;
0489 bool write_cmpl_support;
0490 };
0491
0492
0493
0494
0495 struct sge_idma_monitor_state {
0496 unsigned int idma_1s_thresh;
0497 unsigned int idma_stalled[2];
0498 unsigned int idma_state[2];
0499 unsigned int idma_qid[2];
0500 unsigned int idma_warn[2];
0501 };
0502
0503
0504
0505
0506
0507 struct mbox_cmd {
0508 u64 cmd[MBOX_LEN / 8];
0509 u64 timestamp;
0510 u32 seqno;
0511 s16 access;
0512 s16 execute;
0513 };
0514
0515 struct mbox_cmd_log {
0516 unsigned int size;
0517 unsigned int cursor;
0518 u32 seqno;
0519
0520 };
0521
0522
0523
0524
0525 static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
0526 unsigned int entry_idx)
0527 {
0528 return &((struct mbox_cmd *)&(log)[1])[entry_idx];
0529 }
0530
0531 #define FW_VERSION(chip) ( \
0532 FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \
0533 FW_HDR_FW_VER_MINOR_G(chip##FW_VERSION_MINOR) | \
0534 FW_HDR_FW_VER_MICRO_G(chip##FW_VERSION_MICRO) | \
0535 FW_HDR_FW_VER_BUILD_G(chip##FW_VERSION_BUILD))
0536 #define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
0537
0538 struct cxgb4_ethtool_lb_test {
0539 struct completion completion;
0540 int result;
0541 int loopback;
0542 };
0543
0544 struct fw_info {
0545 u8 chip;
0546 char *fs_name;
0547 char *fw_mod_name;
0548 struct fw_hdr fw_hdr;
0549 };
0550
0551 struct trace_params {
0552 u32 data[TRACE_LEN / 4];
0553 u32 mask[TRACE_LEN / 4];
0554 unsigned short snap_len;
0555 unsigned short min_len;
0556 unsigned char skip_ofst;
0557 unsigned char skip_len;
0558 unsigned char invert;
0559 unsigned char port;
0560 };
0561
0562 struct cxgb4_fw_data {
0563 __be32 signature;
0564 __u8 reserved[4];
0565 };
0566
0567
0568
0569 typedef u16 fw_port_cap16_t;
0570 typedef u32 fw_port_cap32_t;
0571
0572 enum fw_caps {
0573 FW_CAPS_UNKNOWN = 0,
0574 FW_CAPS16 = 1,
0575 FW_CAPS32 = 2,
0576 };
0577
0578 struct link_config {
0579 fw_port_cap32_t pcaps;
0580 fw_port_cap32_t def_acaps;
0581 fw_port_cap32_t acaps;
0582 fw_port_cap32_t lpacaps;
0583
0584 fw_port_cap32_t speed_caps;
0585 unsigned int speed;
0586
0587 enum cc_pause requested_fc;
0588 enum cc_pause fc;
0589 enum cc_pause advertised_fc;
0590
0591 enum cc_fec requested_fec;
0592 enum cc_fec fec;
0593
0594 unsigned char autoneg;
0595
0596 unsigned char link_ok;
0597 unsigned char link_down_rc;
0598
0599 bool new_module;
0600 bool redo_l1cfg;
0601 };
0602
0603 #define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
0604
0605 enum {
0606 MAX_ETH_QSETS = 32,
0607 MAX_OFLD_QSETS = 16,
0608 MAX_CTRL_QUEUES = NCHAN,
0609 };
0610
0611 enum {
0612 MAX_TXQ_ENTRIES = 16384,
0613 MAX_CTRL_TXQ_ENTRIES = 1024,
0614 MAX_RSPQ_ENTRIES = 16384,
0615 MAX_RX_BUFFERS = 16384,
0616 MIN_TXQ_ENTRIES = 32,
0617 MIN_CTRL_TXQ_ENTRIES = 32,
0618 MIN_RSPQ_ENTRIES = 128,
0619 MIN_FL_ENTRIES = 16
0620 };
0621
0622 enum {
0623 MAX_TXQ_DESC_SIZE = 64,
0624 MAX_RXQ_DESC_SIZE = 128,
0625 MAX_FL_DESC_SIZE = 8,
0626 MAX_CTRL_TXQ_DESC_SIZE = 64,
0627 };
0628
0629 enum {
0630 INGQ_EXTRAS = 2,
0631
0632 MAX_INGQ = MAX_ETH_QSETS + INGQ_EXTRAS,
0633 };
0634
0635 enum {
0636 PRIV_FLAG_PORT_TX_VM_BIT,
0637 };
0638
0639 #define PRIV_FLAG_PORT_TX_VM BIT(PRIV_FLAG_PORT_TX_VM_BIT)
0640
0641 #define PRIV_FLAGS_ADAP 0
0642 #define PRIV_FLAGS_PORT PRIV_FLAG_PORT_TX_VM
0643
0644 struct adapter;
0645 struct sge_rspq;
0646
0647 #include "cxgb4_dcb.h"
0648
0649 #ifdef CONFIG_CHELSIO_T4_FCOE
0650 #include "cxgb4_fcoe.h"
0651 #endif
0652
0653 struct port_info {
0654 struct adapter *adapter;
0655 u16 viid;
0656 int xact_addr_filt;
0657 u16 rss_size;
0658 s8 mdio_addr;
0659 enum fw_port_type port_type;
0660 u8 mod_type;
0661 u8 port_id;
0662 u8 tx_chan;
0663 u8 lport;
0664 u8 nqsets;
0665 u8 first_qset;
0666 u8 rss_mode;
0667 struct link_config link_cfg;
0668 u16 *rss;
0669 struct port_stats stats_base;
0670 #ifdef CONFIG_CHELSIO_T4_DCB
0671 struct port_dcb_info dcb;
0672 #endif
0673 #ifdef CONFIG_CHELSIO_T4_FCOE
0674 struct cxgb_fcoe fcoe;
0675 #endif
0676 bool rxtstamp;
0677 struct hwtstamp_config tstamp_config;
0678 bool ptp_enable;
0679 struct sched_table *sched_tbl;
0680 u32 eth_flags;
0681
0682
0683
0684
0685 u8 vin;
0686 u8 vivld;
0687 u8 smt_idx;
0688 u8 rx_cchan;
0689
0690 bool tc_block_shared;
0691
0692
0693 u16 viid_mirror;
0694 u16 nmirrorqsets;
0695 u32 vi_mirror_count;
0696 struct mutex vi_mirror_mutex;
0697 struct cxgb4_ethtool_lb_test ethtool_lb;
0698 };
0699
0700 struct dentry;
0701 struct work_struct;
0702
0703 enum {
0704 CXGB4_FULL_INIT_DONE = (1 << 0),
0705 CXGB4_DEV_ENABLED = (1 << 1),
0706 CXGB4_USING_MSI = (1 << 2),
0707 CXGB4_USING_MSIX = (1 << 3),
0708 CXGB4_FW_OK = (1 << 4),
0709 CXGB4_RSS_TNLALLLOOKUP = (1 << 5),
0710 CXGB4_USING_SOFT_PARAMS = (1 << 6),
0711 CXGB4_MASTER_PF = (1 << 7),
0712 CXGB4_FW_OFLD_CONN = (1 << 9),
0713 CXGB4_ROOT_NO_RELAXED_ORDERING = (1 << 10),
0714 CXGB4_SHUTTING_DOWN = (1 << 11),
0715 CXGB4_SGE_DBQ_TIMER = (1 << 12),
0716 };
0717
0718 enum {
0719 ULP_CRYPTO_LOOKASIDE = 1 << 0,
0720 ULP_CRYPTO_IPSEC_INLINE = 1 << 1,
0721 ULP_CRYPTO_KTLS_INLINE = 1 << 3,
0722 };
0723
0724 #define CXGB4_MIRROR_RXQ_DEFAULT_DESC_NUM 1024
0725 #define CXGB4_MIRROR_RXQ_DEFAULT_DESC_SIZE 64
0726 #define CXGB4_MIRROR_RXQ_DEFAULT_INTR_USEC 5
0727 #define CXGB4_MIRROR_RXQ_DEFAULT_PKT_CNT 8
0728
0729 #define CXGB4_MIRROR_FLQ_DEFAULT_DESC_NUM 72
0730
0731 struct rx_sw_desc;
0732
0733 struct sge_fl {
0734 unsigned int avail;
0735 unsigned int pend_cred;
0736 unsigned int cidx;
0737 unsigned int pidx;
0738 unsigned long alloc_failed;
0739 unsigned long large_alloc_failed;
0740 unsigned long mapping_err;
0741 unsigned long low;
0742 unsigned long starving;
0743
0744 unsigned int cntxt_id;
0745 unsigned int size;
0746 struct rx_sw_desc *sdesc;
0747 __be64 *desc;
0748 dma_addr_t addr;
0749 void __iomem *bar2_addr;
0750 unsigned int bar2_qid;
0751 };
0752
0753
0754 struct pkt_gl {
0755 u64 sgetstamp;
0756 struct page_frag frags[MAX_SKB_FRAGS];
0757 void *va;
0758 unsigned int nfrags;
0759 unsigned int tot_len;
0760 };
0761
0762 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
0763 const struct pkt_gl *gl);
0764 typedef void (*rspq_flush_handler_t)(struct sge_rspq *q);
0765
0766 struct t4_lro_mgr {
0767 #define MAX_LRO_SESSIONS 64
0768 u8 lro_session_cnt;
0769 unsigned long lro_pkts;
0770 unsigned long lro_merged;
0771 struct sk_buff_head lroq;
0772 };
0773
0774 struct sge_rspq {
0775 struct napi_struct napi;
0776 const __be64 *cur_desc;
0777 unsigned int cidx;
0778 u8 gen;
0779 u8 intr_params;
0780 u8 next_intr_params;
0781 u8 adaptive_rx;
0782 u8 pktcnt_idx;
0783 u8 uld;
0784 u8 idx;
0785 int offset;
0786 u16 cntxt_id;
0787 u16 abs_id;
0788 __be64 *desc;
0789 dma_addr_t phys_addr;
0790 void __iomem *bar2_addr;
0791 unsigned int bar2_qid;
0792 unsigned int iqe_len;
0793 unsigned int size;
0794 struct adapter *adap;
0795 struct net_device *netdev;
0796 rspq_handler_t handler;
0797 rspq_flush_handler_t flush_handler;
0798 struct t4_lro_mgr lro_mgr;
0799 };
0800
0801 struct sge_eth_stats {
0802 unsigned long pkts;
0803 unsigned long lro_pkts;
0804 unsigned long lro_merged;
0805 unsigned long rx_cso;
0806 unsigned long vlan_ex;
0807 unsigned long rx_drops;
0808 unsigned long bad_rx_pkts;
0809 };
0810
0811 struct sge_eth_rxq {
0812 struct sge_rspq rspq;
0813 struct sge_fl fl;
0814 struct sge_eth_stats stats;
0815 struct msix_info *msix;
0816 } ____cacheline_aligned_in_smp;
0817
0818 struct sge_ofld_stats {
0819 unsigned long pkts;
0820 unsigned long imm;
0821 unsigned long an;
0822 unsigned long nomem;
0823 };
0824
0825 struct sge_ofld_rxq {
0826 struct sge_rspq rspq;
0827 struct sge_fl fl;
0828 struct sge_ofld_stats stats;
0829 struct msix_info *msix;
0830 } ____cacheline_aligned_in_smp;
0831
0832 struct tx_desc {
0833 __be64 flit[8];
0834 };
0835
0836 struct ulptx_sgl;
0837
0838 struct tx_sw_desc {
0839 struct sk_buff *skb;
0840 dma_addr_t addr[MAX_SKB_FRAGS + 1];
0841 };
0842
0843 struct sge_txq {
0844 unsigned int in_use;
0845 unsigned int q_type;
0846 unsigned int size;
0847 unsigned int cidx;
0848 unsigned int pidx;
0849 unsigned long stops;
0850 unsigned long restarts;
0851 unsigned int cntxt_id;
0852 struct tx_desc *desc;
0853 struct tx_sw_desc *sdesc;
0854 struct sge_qstat *stat;
0855 dma_addr_t phys_addr;
0856 spinlock_t db_lock;
0857 int db_disabled;
0858 unsigned short db_pidx;
0859 unsigned short db_pidx_inc;
0860 void __iomem *bar2_addr;
0861 unsigned int bar2_qid;
0862 };
0863
0864 struct sge_eth_txq {
0865 struct sge_txq q;
0866 struct netdev_queue *txq;
0867 #ifdef CONFIG_CHELSIO_T4_DCB
0868 u8 dcb_prio;
0869 #endif
0870 u8 dbqt;
0871 unsigned int dbqtimerix;
0872 unsigned long tso;
0873 unsigned long uso;
0874 unsigned long tx_cso;
0875 unsigned long vlan_ins;
0876 unsigned long mapping_err;
0877 } ____cacheline_aligned_in_smp;
0878
0879 struct sge_uld_txq {
0880 struct sge_txq q;
0881 struct adapter *adap;
0882 struct sk_buff_head sendq;
0883 struct tasklet_struct qresume_tsk;
0884 bool service_ofldq_running;
0885 u8 full;
0886 unsigned long mapping_err;
0887 } ____cacheline_aligned_in_smp;
0888
0889 struct sge_ctrl_txq {
0890 struct sge_txq q;
0891 struct adapter *adap;
0892 struct sk_buff_head sendq;
0893 struct tasklet_struct qresume_tsk;
0894 u8 full;
0895 } ____cacheline_aligned_in_smp;
0896
0897 struct sge_uld_rxq_info {
0898 char name[IFNAMSIZ];
0899 struct sge_ofld_rxq *uldrxq;
0900 u16 *rspq_id;
0901 u16 nrxq;
0902 u16 nciq;
0903 u8 uld;
0904 };
0905
0906 struct sge_uld_txq_info {
0907 struct sge_uld_txq *uldtxq;
0908 atomic_t users;
0909 u16 ntxq;
0910 };
0911
0912
0913 struct cxgb4_uld_list {
0914 struct cxgb4_uld_info uld_info;
0915 struct list_head list_node;
0916 enum cxgb4_uld uld_type;
0917 };
0918
0919 enum sge_eosw_state {
0920 CXGB4_EO_STATE_CLOSED = 0,
0921 CXGB4_EO_STATE_FLOWC_OPEN_SEND,
0922 CXGB4_EO_STATE_FLOWC_OPEN_REPLY,
0923 CXGB4_EO_STATE_ACTIVE,
0924 CXGB4_EO_STATE_FLOWC_CLOSE_SEND,
0925 CXGB4_EO_STATE_FLOWC_CLOSE_REPLY,
0926 };
0927
0928 struct sge_eosw_txq {
0929 spinlock_t lock;
0930 enum sge_eosw_state state;
0931 struct tx_sw_desc *desc;
0932 u32 ndesc;
0933 u32 pidx;
0934 u32 last_pidx;
0935 u32 cidx;
0936 u32 last_cidx;
0937 u32 flowc_idx;
0938 u32 inuse;
0939
0940 u32 cred;
0941 u32 ncompl;
0942 u32 last_compl;
0943
0944 u32 eotid;
0945 u32 hwtid;
0946
0947 u32 hwqid;
0948 struct net_device *netdev;
0949 struct tasklet_struct qresume_tsk;
0950 struct completion completion;
0951 };
0952
0953 struct sge_eohw_txq {
0954 spinlock_t lock;
0955 struct sge_txq q;
0956 struct adapter *adap;
0957 unsigned long tso;
0958 unsigned long uso;
0959 unsigned long tx_cso;
0960 unsigned long vlan_ins;
0961 unsigned long mapping_err;
0962 };
0963
0964 struct sge {
0965 struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
0966 struct sge_eth_txq ptptxq;
0967 struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
0968
0969 struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
0970 struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
0971 struct sge_uld_rxq_info **uld_rxq_info;
0972 struct sge_uld_txq_info **uld_txq_info;
0973
0974 struct sge_rspq intrq ____cacheline_aligned_in_smp;
0975 spinlock_t intrq_lock;
0976
0977 struct sge_eohw_txq *eohw_txq;
0978 struct sge_ofld_rxq *eohw_rxq;
0979
0980 struct sge_eth_rxq *mirror_rxq[NCHAN];
0981
0982 u16 max_ethqsets;
0983 u16 ethqsets;
0984 u16 ethtxq_rover;
0985 u16 ofldqsets;
0986 u16 nqs_per_uld;
0987 u16 eoqsets;
0988 u16 mirrorqsets;
0989
0990 u16 timer_val[SGE_NTIMERS];
0991 u8 counter_val[SGE_NCOUNTERS];
0992 u16 dbqtimer_tick;
0993 u16 dbqtimer_val[SGE_NDBQTIMERS];
0994 u32 fl_pg_order;
0995 u32 stat_len;
0996 u32 pktshift;
0997 u32 fl_align;
0998 u32 fl_starve_thres;
0999
1000 struct sge_idma_monitor_state idma_monitor;
1001 unsigned int egr_start;
1002 unsigned int egr_sz;
1003 unsigned int ingr_start;
1004 unsigned int ingr_sz;
1005 void **egr_map;
1006 struct sge_rspq **ingr_map;
1007 unsigned long *starving_fl;
1008 unsigned long *txq_maperr;
1009 unsigned long *blocked_fl;
1010 struct timer_list rx_timer;
1011 struct timer_list tx_timer;
1012
1013 int fwevtq_msix_idx;
1014 int nd_msix_idx;
1015 };
1016
1017 #define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
1018 #define for_each_ofldtxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
1019
1020 struct l2t_data;
1021
1022 #ifdef CONFIG_PCI_IOV
1023
1024
1025
1026
1027
1028 #define NUM_OF_PF_WITH_SRIOV 4
1029
1030 #endif
1031
1032 struct doorbell_stats {
1033 u32 db_drop;
1034 u32 db_empty;
1035 u32 db_full;
1036 };
1037
1038 struct hash_mac_addr {
1039 struct list_head list;
1040 u8 addr[ETH_ALEN];
1041 unsigned int iface_mac;
1042 };
1043
1044 struct msix_bmap {
1045 unsigned long *msix_bmap;
1046 unsigned int mapsize;
1047 spinlock_t lock;
1048 };
1049
1050 struct msix_info {
1051 unsigned short vec;
1052 char desc[IFNAMSIZ + 10];
1053 unsigned int idx;
1054 cpumask_var_t aff_mask;
1055 };
1056
1057 struct vf_info {
1058 unsigned char vf_mac_addr[ETH_ALEN];
1059 unsigned int tx_rate;
1060 bool pf_set_mac;
1061 u16 vlan;
1062 int link_state;
1063 };
1064
1065 enum {
1066 HMA_DMA_MAPPED_FLAG = 1
1067 };
1068
1069 struct hma_data {
1070 unsigned char flags;
1071 struct sg_table *sgt;
1072 dma_addr_t *phy_addr;
1073 };
1074
1075 struct mbox_list {
1076 struct list_head list;
1077 };
1078
1079 #if IS_ENABLED(CONFIG_THERMAL)
1080 struct ch_thermal {
1081 struct thermal_zone_device *tzdev;
1082 int trip_temp;
1083 int trip_type;
1084 };
1085 #endif
1086
1087 struct mps_entries_ref {
1088 struct list_head list;
1089 u8 addr[ETH_ALEN];
1090 u8 mask[ETH_ALEN];
1091 u16 idx;
1092 refcount_t refcnt;
1093 };
1094
1095 struct cxgb4_ethtool_filter_info {
1096 u32 *loc_array;
1097 unsigned long *bmap;
1098 u32 in_use;
1099 };
1100
1101 struct cxgb4_ethtool_filter {
1102 u32 nentries;
1103 struct cxgb4_ethtool_filter_info *port;
1104 };
1105
1106 struct adapter {
1107 void __iomem *regs;
1108 void __iomem *bar2;
1109 u32 t4_bar0;
1110 struct pci_dev *pdev;
1111 struct device *pdev_dev;
1112 const char *name;
1113 unsigned int mbox;
1114 unsigned int pf;
1115 unsigned int flags;
1116 unsigned int adap_idx;
1117 enum chip_type chip;
1118 u32 eth_flags;
1119
1120 int msg_enable;
1121 __be16 vxlan_port;
1122 __be16 geneve_port;
1123
1124 struct adapter_params params;
1125 struct cxgb4_virt_res vres;
1126 unsigned int swintr;
1127
1128
1129 struct msix_info *msix_info;
1130 struct msix_bmap msix_bmap;
1131
1132 struct doorbell_stats db_stats;
1133 struct sge sge;
1134
1135 struct net_device *port[MAX_NPORTS];
1136 u8 chan_map[NCHAN];
1137
1138 struct vf_info *vfinfo;
1139 u8 num_vfs;
1140
1141 u32 filter_mode;
1142 unsigned int l2t_start;
1143 unsigned int l2t_end;
1144 struct l2t_data *l2t;
1145 unsigned int clipt_start;
1146 unsigned int clipt_end;
1147 struct clip_tbl *clipt;
1148 unsigned int rawf_start;
1149 unsigned int rawf_cnt;
1150 struct smt_data *smt;
1151 struct cxgb4_uld_info *uld;
1152 void *uld_handle[CXGB4_ULD_MAX];
1153 unsigned int num_uld;
1154 unsigned int num_ofld_uld;
1155 struct list_head list_node;
1156 struct list_head rcu_node;
1157 struct list_head mac_hlist;
1158 struct list_head mps_ref;
1159 spinlock_t mps_ref_lock;
1160
1161 void *iscsi_ppm;
1162
1163 struct tid_info tids;
1164 void **tid_release_head;
1165 spinlock_t tid_release_lock;
1166 struct workqueue_struct *workq;
1167 struct work_struct tid_release_task;
1168 struct work_struct db_full_task;
1169 struct work_struct db_drop_task;
1170 struct work_struct fatal_err_notify_task;
1171 bool tid_release_task_busy;
1172
1173
1174 spinlock_t mbox_lock;
1175 struct mbox_list mlist;
1176
1177
1178 #define T4_OS_LOG_MBOX_CMDS 256
1179 struct mbox_cmd_log *mbox_log;
1180
1181 struct mutex uld_mutex;
1182
1183 struct dentry *debugfs_root;
1184 bool use_bd;
1185 bool trace_rss;
1186
1187
1188
1189
1190 struct ptp_clock *ptp_clock;
1191 struct ptp_clock_info ptp_clock_info;
1192 struct sk_buff *ptp_tx_skb;
1193
1194 spinlock_t ptp_lock;
1195 spinlock_t stats_lock;
1196 spinlock_t win0_lock ____cacheline_aligned_in_smp;
1197
1198
1199 struct cxgb4_tc_u32_table *tc_u32;
1200 struct chcr_ktls chcr_ktls;
1201 struct chcr_stats_debug chcr_stats;
1202 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
1203 struct ch_ktls_stats_debug ch_ktls_stats;
1204 #endif
1205 #if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
1206 struct ch_ipsec_stats_debug ch_ipsec_stats;
1207 #endif
1208
1209
1210 bool tc_flower_initialized;
1211 struct rhashtable flower_tbl;
1212 struct rhashtable_params flower_ht_params;
1213 struct timer_list flower_stats_timer;
1214 struct work_struct flower_stats_work;
1215
1216
1217 struct ethtool_dump eth_dump;
1218
1219
1220 struct hma_data hma;
1221
1222 struct srq_data *srq;
1223
1224
1225 struct vmcoredd_data vmcoredd;
1226 #if IS_ENABLED(CONFIG_THERMAL)
1227 struct ch_thermal ch_thermal;
1228 #endif
1229
1230
1231 struct cxgb4_tc_mqprio *tc_mqprio;
1232
1233
1234 struct cxgb4_tc_matchall *tc_matchall;
1235
1236
1237 struct cxgb4_ethtool_filter *ethtool_filters;
1238 };
1239
1240
1241
1242
1243 struct ch_sched_params {
1244 u8 type;
1245 union {
1246 struct {
1247 u8 level;
1248 u8 mode;
1249 u8 rateunit;
1250 u8 ratemode;
1251 u8 channel;
1252 u8 class;
1253 u32 minrate;
1254 u32 maxrate;
1255 u16 weight;
1256 u16 pktsize;
1257 u16 burstsize;
1258 } params;
1259 } u;
1260 };
1261
1262 enum {
1263 SCHED_CLASS_TYPE_PACKET = 0,
1264 };
1265
1266 enum {
1267 SCHED_CLASS_LEVEL_CL_RL = 0,
1268 SCHED_CLASS_LEVEL_CH_RL = 2,
1269 };
1270
1271 enum {
1272 SCHED_CLASS_MODE_CLASS = 0,
1273 SCHED_CLASS_MODE_FLOW,
1274 };
1275
1276 enum {
1277 SCHED_CLASS_RATEUNIT_BITS = 0,
1278 };
1279
1280 enum {
1281 SCHED_CLASS_RATEMODE_ABS = 1,
1282 };
1283
1284
1285
1286
1287 struct ch_sched_queue {
1288 s8 queue;
1289 s8 class;
1290 };
1291
1292
1293
1294
1295 struct ch_sched_flowc {
1296 s32 tid;
1297 s8 class;
1298 };
1299
1300
1301
1302 #define ETHTYPE_BITWIDTH 16
1303 #define FRAG_BITWIDTH 1
1304 #define MACIDX_BITWIDTH 9
1305 #define FCOE_BITWIDTH 1
1306 #define IPORT_BITWIDTH 3
1307 #define MATCHTYPE_BITWIDTH 3
1308 #define PROTO_BITWIDTH 8
1309 #define TOS_BITWIDTH 8
1310 #define PF_BITWIDTH 8
1311 #define VF_BITWIDTH 8
1312 #define IVLAN_BITWIDTH 16
1313 #define OVLAN_BITWIDTH 16
1314 #define ENCAP_VNI_BITWIDTH 24
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 struct ch_filter_tuple {
1334
1335
1336
1337
1338
1339
1340 uint32_t ethtype:ETHTYPE_BITWIDTH;
1341 uint32_t frag:FRAG_BITWIDTH;
1342 uint32_t ivlan_vld:1;
1343 uint32_t ovlan_vld:1;
1344 uint32_t pfvf_vld:1;
1345 uint32_t encap_vld:1;
1346 uint32_t macidx:MACIDX_BITWIDTH;
1347 uint32_t fcoe:FCOE_BITWIDTH;
1348 uint32_t iport:IPORT_BITWIDTH;
1349 uint32_t matchtype:MATCHTYPE_BITWIDTH;
1350 uint32_t proto:PROTO_BITWIDTH;
1351 uint32_t tos:TOS_BITWIDTH;
1352 uint32_t pf:PF_BITWIDTH;
1353 uint32_t vf:VF_BITWIDTH;
1354 uint32_t ivlan:IVLAN_BITWIDTH;
1355 uint32_t ovlan:OVLAN_BITWIDTH;
1356 uint32_t vni:ENCAP_VNI_BITWIDTH;
1357
1358
1359
1360
1361 uint8_t lip[16];
1362 uint8_t fip[16];
1363 uint16_t lport;
1364 uint16_t fport;
1365 };
1366
1367
1368
1369 struct ch_filter_specification {
1370
1371
1372 uint32_t hitcnts:1;
1373 uint32_t prio:1;
1374
1375
1376
1377
1378 uint32_t type:1;
1379 u32 hash:1;
1380
1381
1382
1383
1384
1385 uint32_t action:2;
1386
1387 uint32_t rpttid:1;
1388
1389 uint32_t dirsteer:1;
1390 uint32_t iq:10;
1391
1392 uint32_t maskhash:1;
1393 uint32_t dirsteerhash:1;
1394
1395
1396
1397
1398
1399
1400 uint32_t eport:2;
1401 uint32_t newdmac:1;
1402 uint32_t newsmac:1;
1403 uint32_t newvlan:2;
1404 uint32_t nat_mode:3;
1405 uint8_t dmac[ETH_ALEN];
1406 uint8_t smac[ETH_ALEN];
1407 uint16_t vlan;
1408
1409 u8 nat_lip[16];
1410 u8 nat_fip[16];
1411 u16 nat_lport;
1412 u16 nat_fport;
1413
1414 u32 tc_prio;
1415 u64 tc_cookie;
1416
1417
1418 u8 rsvd[12];
1419
1420
1421
1422 struct ch_filter_tuple val;
1423 struct ch_filter_tuple mask;
1424 };
1425
1426 enum {
1427 FILTER_PASS = 0,
1428 FILTER_DROP,
1429 FILTER_SWITCH
1430 };
1431
1432 enum {
1433 VLAN_NOCHANGE = 0,
1434 VLAN_REMOVE,
1435 VLAN_INSERT,
1436 VLAN_REWRITE
1437 };
1438
1439 enum {
1440 NAT_MODE_NONE = 0,
1441 NAT_MODE_DIP,
1442 NAT_MODE_DIP_DP,
1443 NAT_MODE_DIP_DP_SIP,
1444 NAT_MODE_DIP_DP_SP,
1445 NAT_MODE_SIP_SP,
1446 NAT_MODE_DIP_SIP_SP,
1447 NAT_MODE_ALL
1448 };
1449
1450 #define CXGB4_FILTER_TYPE_MAX 2
1451
1452
1453
1454
1455
1456
1457
1458 struct filter_entry {
1459
1460 u32 valid:1;
1461 u32 locked:1;
1462
1463 u32 pending:1;
1464 struct filter_ctx *ctx;
1465 struct l2t_entry *l2t;
1466 struct smt_entry *smt;
1467 struct net_device *dev;
1468 u32 tid;
1469
1470
1471
1472
1473
1474
1475 struct ch_filter_specification fs;
1476 };
1477
1478 static inline int is_offload(const struct adapter *adap)
1479 {
1480 return adap->params.offload;
1481 }
1482
1483 static inline int is_hashfilter(const struct adapter *adap)
1484 {
1485 return adap->params.hash_filter;
1486 }
1487
1488 static inline int is_pci_uld(const struct adapter *adap)
1489 {
1490 return adap->params.crypto;
1491 }
1492
1493 static inline int is_uld(const struct adapter *adap)
1494 {
1495 return (adap->params.offload || adap->params.crypto);
1496 }
1497
1498 static inline int is_ethofld(const struct adapter *adap)
1499 {
1500 return adap->params.ethofld;
1501 }
1502
1503 static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
1504 {
1505 return readl(adap->regs + reg_addr);
1506 }
1507
1508 static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
1509 {
1510 writel(val, adap->regs + reg_addr);
1511 }
1512
1513 #ifndef readq
1514 static inline u64 readq(const volatile void __iomem *addr)
1515 {
1516 return readl(addr) + ((u64)readl(addr + 4) << 32);
1517 }
1518
1519 static inline void writeq(u64 val, volatile void __iomem *addr)
1520 {
1521 writel(val, addr);
1522 writel(val >> 32, addr + 4);
1523 }
1524 #endif
1525
1526 static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
1527 {
1528 return readq(adap->regs + reg_addr);
1529 }
1530
1531 static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
1532 {
1533 writeq(val, adap->regs + reg_addr);
1534 }
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545 static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
1546 u8 hw_addr[])
1547 {
1548 eth_hw_addr_set(adapter->port[port_idx], hw_addr);
1549 ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
1550 }
1551
1552
1553
1554
1555
1556
1557
1558 static inline struct port_info *netdev2pinfo(const struct net_device *dev)
1559 {
1560 return netdev_priv(dev);
1561 }
1562
1563
1564
1565
1566
1567
1568
1569
1570 static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
1571 {
1572 return netdev_priv(adap->port[idx]);
1573 }
1574
1575
1576
1577
1578
1579
1580
1581 static inline struct adapter *netdev2adap(const struct net_device *dev)
1582 {
1583 return netdev2pinfo(dev)->adapter;
1584 }
1585
1586
1587
1588
1589
1590
1591 static inline unsigned int mk_adap_vers(struct adapter *ap)
1592 {
1593 return CHELSIO_CHIP_VERSION(ap->params.chip) |
1594 (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
1595 }
1596
1597
1598 static inline unsigned int qtimer_val(const struct adapter *adap,
1599 const struct sge_rspq *q)
1600 {
1601 unsigned int idx = q->intr_params >> 1;
1602
1603 return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1604 }
1605
1606
1607 extern char cxgb4_driver_name[];
1608
1609 void t4_os_portmod_changed(struct adapter *adap, int port_id);
1610 void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
1611
1612 void t4_free_sge_resources(struct adapter *adap);
1613 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
1614 irq_handler_t t4_intr_handler(struct adapter *adap);
1615 netdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev);
1616 int cxgb4_selftest_lb_pkt(struct net_device *netdev);
1617 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1618 const struct pkt_gl *gl);
1619 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
1620 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
1621 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1622 struct net_device *dev, int intr_idx,
1623 struct sge_fl *fl, rspq_handler_t hnd,
1624 rspq_flush_handler_t flush_handler, int cong);
1625 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
1626 struct net_device *dev, struct netdev_queue *netdevq,
1627 unsigned int iqid, u8 dbqt);
1628 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
1629 struct net_device *dev, unsigned int iqid,
1630 unsigned int cmplqid);
1631 int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
1632 unsigned int cmplqid);
1633 int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
1634 struct net_device *dev, unsigned int iqid,
1635 unsigned int uld_type);
1636 int t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
1637 struct net_device *dev, u32 iqid);
1638 void t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq);
1639 irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
1640 int t4_sge_init(struct adapter *adap);
1641 void t4_sge_start(struct adapter *adap);
1642 void t4_sge_stop(struct adapter *adap);
1643 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *q,
1644 int maxreclaim);
1645 void cxgb4_set_ethtool_ops(struct net_device *netdev);
1646 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
1647 enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
1648 extern int dbfifo_int_thresh;
1649
1650 #define for_each_port(adapter, iter) \
1651 for (iter = 0; iter < (adapter)->params.nports; ++iter)
1652
1653 static inline int is_bypass(struct adapter *adap)
1654 {
1655 return adap->params.bypass;
1656 }
1657
1658 static inline int is_bypass_device(int device)
1659 {
1660
1661 switch (device) {
1662 case 0x440b:
1663 case 0x440c:
1664 return 1;
1665 default:
1666 return 0;
1667 }
1668 }
1669
1670 static inline int is_10gbt_device(int device)
1671 {
1672
1673 switch (device) {
1674 case 0x4409:
1675 case 0x4486:
1676 return 1;
1677
1678 default:
1679 return 0;
1680 }
1681 }
1682
1683 static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
1684 {
1685 return adap->params.vpd.cclk / 1000;
1686 }
1687
1688 static inline unsigned int us_to_core_ticks(const struct adapter *adap,
1689 unsigned int us)
1690 {
1691 return (us * adap->params.vpd.cclk) / 1000;
1692 }
1693
1694 static inline unsigned int core_ticks_to_us(const struct adapter *adapter,
1695 unsigned int ticks)
1696 {
1697
1698 return ((ticks * 1000 + adapter->params.vpd.cclk/2) /
1699 adapter->params.vpd.cclk);
1700 }
1701
1702 static inline unsigned int dack_ticks_to_usec(const struct adapter *adap,
1703 unsigned int ticks)
1704 {
1705 return (ticks << adap->params.tp.dack_re) / core_ticks_per_usec(adap);
1706 }
1707
1708 void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
1709 u32 val);
1710
1711 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
1712 int size, void *rpl, bool sleep_ok, int timeout);
1713 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
1714 void *rpl, bool sleep_ok);
1715
1716 static inline int t4_wr_mbox_timeout(struct adapter *adap, int mbox,
1717 const void *cmd, int size, void *rpl,
1718 int timeout)
1719 {
1720 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, true,
1721 timeout);
1722 }
1723
1724 static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
1725 int size, void *rpl)
1726 {
1727 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
1728 }
1729
1730 static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
1731 int size, void *rpl)
1732 {
1733 return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
1734 }
1735
1736
1737
1738
1739
1740
1741
1742
1743 static inline int hash_mac_addr(const u8 *addr)
1744 {
1745 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1746 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1747
1748 a ^= b;
1749 a ^= (a >> 12);
1750 a ^= (a >> 6);
1751 return a & 0x3f;
1752 }
1753
1754 int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us,
1755 unsigned int cnt);
1756 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
1757 unsigned int us, unsigned int cnt,
1758 unsigned int size, unsigned int iqe_size)
1759 {
1760 q->adap = adap;
1761 cxgb4_set_rspq_intr_params(q, us, cnt);
1762 q->iqe_len = iqe_size;
1763 q->size = size;
1764 }
1765
1766
1767
1768
1769
1770
1771
1772
1773 static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type)
1774 {
1775 return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
1776 fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED &&
1777 fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN &&
1778 fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
1779 }
1780
1781 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
1782 unsigned int data_reg, const u32 *vals,
1783 unsigned int nregs, unsigned int start_idx);
1784 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
1785 unsigned int data_reg, u32 *vals, unsigned int nregs,
1786 unsigned int start_idx);
1787 void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val);
1788
1789 struct fw_filter_wr;
1790
1791 void t4_intr_enable(struct adapter *adapter);
1792 void t4_intr_disable(struct adapter *adapter);
1793 int t4_slow_intr_handler(struct adapter *adapter);
1794
1795 int t4_wait_dev_ready(void __iomem *regs);
1796
1797 fw_port_cap32_t t4_link_acaps(struct adapter *adapter, unsigned int port,
1798 struct link_config *lc);
1799 int t4_link_l1cfg_core(struct adapter *adap, unsigned int mbox,
1800 unsigned int port, struct link_config *lc,
1801 u8 sleep_ok, int timeout);
1802
1803 static inline int t4_link_l1cfg(struct adapter *adapter, unsigned int mbox,
1804 unsigned int port, struct link_config *lc)
1805 {
1806 return t4_link_l1cfg_core(adapter, mbox, port, lc,
1807 true, FW_CMD_MAX_TIMEOUT);
1808 }
1809
1810 static inline int t4_link_l1cfg_ns(struct adapter *adapter, unsigned int mbox,
1811 unsigned int port, struct link_config *lc)
1812 {
1813 return t4_link_l1cfg_core(adapter, mbox, port, lc,
1814 false, FW_CMD_MAX_TIMEOUT);
1815 }
1816
1817 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
1818
1819 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg);
1820 u32 t4_get_util_window(struct adapter *adap);
1821 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window);
1822
1823 int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
1824 u32 *mem_base, u32 *mem_aperture);
1825 void t4_memory_update_win(struct adapter *adap, int win, u32 addr);
1826 void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
1827 int dir);
1828 #define T4_MEMORY_WRITE 0
1829 #define T4_MEMORY_READ 1
1830 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
1831 void *buf, int dir);
1832 static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
1833 u32 len, __be32 *buf)
1834 {
1835 return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
1836 }
1837
1838 unsigned int t4_get_regs_len(struct adapter *adapter);
1839 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size);
1840
1841 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz);
1842 int t4_seeprom_wp(struct adapter *adapter, bool enable);
1843 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p);
1844 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p);
1845 int t4_get_pfres(struct adapter *adapter);
1846 int t4_read_flash(struct adapter *adapter, unsigned int addr,
1847 unsigned int nwords, u32 *data, int byte_oriented);
1848 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
1849 int t4_load_phy_fw(struct adapter *adap, int win,
1850 int (*phy_fw_version)(const u8 *, size_t),
1851 const u8 *phy_fw_data, size_t phy_fw_size);
1852 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver);
1853 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op);
1854 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
1855 const u8 *fw_data, unsigned int size, int force);
1856 int t4_fl_pkt_align(struct adapter *adap);
1857 unsigned int t4_flash_cfg_addr(struct adapter *adapter);
1858 int t4_check_fw_version(struct adapter *adap);
1859 int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
1860 int t4_get_fw_version(struct adapter *adapter, u32 *vers);
1861 int t4_get_bs_version(struct adapter *adapter, u32 *vers);
1862 int t4_get_tp_version(struct adapter *adapter, u32 *vers);
1863 int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
1864 int t4_get_scfg_version(struct adapter *adapter, u32 *vers);
1865 int t4_get_vpd_version(struct adapter *adapter, u32 *vers);
1866 int t4_get_version_info(struct adapter *adapter);
1867 void t4_dump_version_info(struct adapter *adapter);
1868 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
1869 const u8 *fw_data, unsigned int fw_size,
1870 struct fw_hdr *card_fw, enum dev_state state, int *reset);
1871 int t4_prep_adapter(struct adapter *adapter);
1872 int t4_shutdown_adapter(struct adapter *adapter);
1873
1874 enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
1875 int t4_bar2_sge_qregs(struct adapter *adapter,
1876 unsigned int qid,
1877 enum t4_bar2_qtype qtype,
1878 int user,
1879 u64 *pbar2_qoffset,
1880 unsigned int *pbar2_qid);
1881
1882 unsigned int qtimer_val(const struct adapter *adap,
1883 const struct sge_rspq *q);
1884
1885 int t4_init_devlog_params(struct adapter *adapter);
1886 int t4_init_sge_params(struct adapter *adapter);
1887 int t4_init_tp_params(struct adapter *adap, bool sleep_ok);
1888 int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
1889 int t4_init_rss_mode(struct adapter *adap, int mbox);
1890 int t4_init_portinfo(struct port_info *pi, int mbox,
1891 int port, int pf, int vf, u8 mac[]);
1892 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
1893 int t4_init_port_mirror(struct port_info *pi, u8 mbox, u8 port, u8 pf, u8 vf,
1894 u16 *mirror_viid);
1895 void t4_fatal_err(struct adapter *adapter);
1896 unsigned int t4_chip_rss_size(struct adapter *adapter);
1897 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1898 int start, int n, const u16 *rspq, unsigned int nrspq);
1899 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1900 unsigned int flags);
1901 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
1902 unsigned int flags, unsigned int defq);
1903 int t4_read_rss(struct adapter *adapter, u16 *entries);
1904 void t4_read_rss_key(struct adapter *adapter, u32 *key, bool sleep_ok);
1905 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
1906 bool sleep_ok);
1907 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
1908 u32 *valp, bool sleep_ok);
1909 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
1910 u32 *vfl, u32 *vfh, bool sleep_ok);
1911 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok);
1912 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok);
1913
1914 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx);
1915 unsigned int t4_get_tp_ch_map(struct adapter *adapter, int pidx);
1916 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
1917 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
1918 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data,
1919 size_t n);
1920 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data,
1921 size_t n);
1922 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
1923 unsigned int *valp);
1924 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
1925 const unsigned int *valp);
1926 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
1927 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
1928 unsigned int *pif_req_wrptr,
1929 unsigned int *pif_rsp_wrptr);
1930 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
1931 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
1932 const char *t4_get_port_type_description(enum fw_port_type port_type);
1933 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
1934 void t4_get_port_stats_offset(struct adapter *adap, int idx,
1935 struct port_stats *stats,
1936 struct port_stats *offset);
1937 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
1938 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
1939 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
1940 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
1941 unsigned int mask, unsigned int val);
1942 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
1943 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
1944 bool sleep_ok);
1945 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
1946 bool sleep_ok);
1947 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
1948 bool sleep_ok);
1949 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
1950 bool sleep_ok);
1951 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1952 struct tp_tcp_stats *v6, bool sleep_ok);
1953 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
1954 struct tp_fcoe_stats *st, bool sleep_ok);
1955 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1956 const unsigned short *alpha, const unsigned short *beta);
1957
1958 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf);
1959
1960 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate);
1961 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid);
1962
1963 void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1964 const u8 *addr);
1965 int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1966 u64 mask0, u64 mask1, unsigned int crc, bool enable);
1967
1968 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
1969 enum dev_master master, enum dev_state *state);
1970 int t4_fw_bye(struct adapter *adap, unsigned int mbox);
1971 int t4_early_init(struct adapter *adap, unsigned int mbox);
1972 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
1973 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
1974 unsigned int cache_line_size);
1975 int t4_fw_initialize(struct adapter *adap, unsigned int mbox);
1976 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1977 unsigned int vf, unsigned int nparams, const u32 *params,
1978 u32 *val);
1979 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
1980 unsigned int vf, unsigned int nparams, const u32 *params,
1981 u32 *val);
1982 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
1983 unsigned int vf, unsigned int nparams, const u32 *params,
1984 u32 *val, int rw, bool sleep_ok);
1985 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
1986 unsigned int pf, unsigned int vf,
1987 unsigned int nparams, const u32 *params,
1988 const u32 *val, int timeout);
1989 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1990 unsigned int vf, unsigned int nparams, const u32 *params,
1991 const u32 *val);
1992 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
1993 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
1994 unsigned int rxqi, unsigned int rxq, unsigned int tc,
1995 unsigned int vi, unsigned int cmask, unsigned int pmask,
1996 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
1997 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
1998 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
1999 unsigned int *rss_size, u8 *vivld, u8 *vin);
2000 int t4_free_vi(struct adapter *adap, unsigned int mbox,
2001 unsigned int pf, unsigned int vf,
2002 unsigned int viid);
2003 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2004 unsigned int viid_mirror, int mtu, int promisc, int all_multi,
2005 int bcast, int vlanex, bool sleep_ok);
2006 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
2007 const u8 *addr, const u8 *mask, unsigned int idx,
2008 u8 lookup_type, u8 port_id, bool sleep_ok);
2009 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, int idx,
2010 bool sleep_ok);
2011 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
2012 const u8 *addr, const u8 *mask, unsigned int vni,
2013 unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
2014 bool sleep_ok);
2015 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
2016 const u8 *addr, const u8 *mask, unsigned int idx,
2017 u8 lookup_type, u8 port_id, bool sleep_ok);
2018 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2019 unsigned int viid, bool free, unsigned int naddr,
2020 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
2021 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
2022 unsigned int viid, unsigned int naddr,
2023 const u8 **addr, bool sleep_ok);
2024 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2025 int idx, const u8 *addr, bool persist, u8 *smt_idx);
2026 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2027 bool ucast, u64 vec, bool sleep_ok);
2028 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
2029 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
2030 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
2031 struct port_info *pi,
2032 bool rx_en, bool tx_en, bool dcb_en);
2033 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2034 bool rx_en, bool tx_en);
2035 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2036 unsigned int nblinks);
2037 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2038 unsigned int mmd, unsigned int reg, u16 *valp);
2039 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2040 unsigned int mmd, unsigned int reg, u16 val);
2041 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
2042 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2043 unsigned int fl0id, unsigned int fl1id);
2044 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2045 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2046 unsigned int fl0id, unsigned int fl1id);
2047 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2048 unsigned int vf, unsigned int eqid);
2049 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2050 unsigned int vf, unsigned int eqid);
2051 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2052 unsigned int vf, unsigned int eqid);
2053 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
2054 int t4_read_sge_dbqtimers(struct adapter *adap, unsigned int ndbqtimers,
2055 u16 *dbqtimers);
2056 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
2057 int t4_update_port_info(struct port_info *pi);
2058 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
2059 unsigned int *speedp, unsigned int *mtup);
2060 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
2061 void t4_db_full(struct adapter *adapter);
2062 void t4_db_dropped(struct adapter *adapter);
2063 int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
2064 int filter_index, int enable);
2065 void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
2066 int filter_index, int *enabled);
2067 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2068 u32 addr, u32 val);
2069 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED]);
2070 void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
2071 unsigned int *kbps, unsigned int *ipg, bool sleep_ok);
2072 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
2073 enum ctxt_type ctype, u32 *data);
2074 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
2075 enum ctxt_type ctype, u32 *data);
2076 int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode,
2077 u8 rateunit, u8 ratemode, u8 channel, u8 class,
2078 u32 minrate, u32 maxrate, u16 weight, u16 pktsize,
2079 u16 burstsize);
2080 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
2081 void t4_idma_monitor_init(struct adapter *adapter,
2082 struct sge_idma_monitor_state *idma);
2083 void t4_idma_monitor(struct adapter *adapter,
2084 struct sge_idma_monitor_state *idma,
2085 int hz, int ticks);
2086 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
2087 unsigned int naddr, u8 *addr);
2088 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
2089 u32 start_index, bool sleep_ok);
2090 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
2091 u32 start_index, bool sleep_ok);
2092 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs,
2093 u32 start_index, bool sleep_ok);
2094
2095 void t4_uld_mem_free(struct adapter *adap);
2096 int t4_uld_mem_alloc(struct adapter *adap);
2097 void t4_uld_clean_up(struct adapter *adap);
2098 void t4_register_netevent_notifier(void);
2099 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
2100 unsigned int devid, unsigned int offset,
2101 unsigned int len, u8 *buf);
2102 int t4_load_boot(struct adapter *adap, u8 *boot_data,
2103 unsigned int boot_addr, unsigned int size);
2104 int t4_load_bootcfg(struct adapter *adap,
2105 const u8 *cfg_data, unsigned int size);
2106 void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
2107 void free_tx_desc(struct adapter *adap, struct sge_txq *q,
2108 unsigned int n, bool unmap);
2109 void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq,
2110 u32 ndesc);
2111 int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc);
2112 void cxgb4_ethofld_restart(struct tasklet_struct *t);
2113 int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
2114 const struct pkt_gl *si);
2115 void free_txq(struct adapter *adap, struct sge_txq *q);
2116 void cxgb4_reclaim_completed_tx(struct adapter *adap,
2117 struct sge_txq *q, bool unmap);
2118 int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
2119 dma_addr_t *addr);
2120 void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
2121 void *pos);
2122 void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
2123 struct ulptx_sgl *sgl, u64 *end, unsigned int start,
2124 const dma_addr_t *addr);
2125 void cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
2126 struct ulptx_sgl *sgl, u64 *end,
2127 const dma_addr_t *addr, u32 start, u32 send_len);
2128 void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
2129 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
2130 u16 vlan);
2131 int cxgb4_dcb_enabled(const struct net_device *dev);
2132
2133 int cxgb4_thermal_init(struct adapter *adap);
2134 int cxgb4_thermal_remove(struct adapter *adap);
2135 int cxgb4_set_msix_aff(struct adapter *adap, unsigned short vec,
2136 cpumask_var_t *aff_mask, int idx);
2137 void cxgb4_clear_msix_aff(unsigned short vec, cpumask_var_t aff_mask);
2138
2139 int cxgb4_change_mac(struct port_info *pi, unsigned int viid,
2140 int *tcam_idx, const u8 *addr,
2141 bool persistent, u8 *smt_idx);
2142
2143 int cxgb4_alloc_mac_filt(struct adapter *adap, unsigned int viid,
2144 bool free, unsigned int naddr,
2145 const u8 **addr, u16 *idx,
2146 u64 *hash, bool sleep_ok);
2147 int cxgb4_free_mac_filt(struct adapter *adap, unsigned int viid,
2148 unsigned int naddr, const u8 **addr, bool sleep_ok);
2149 int cxgb4_init_mps_ref_entries(struct adapter *adap);
2150 void cxgb4_free_mps_ref_entries(struct adapter *adap);
2151 int cxgb4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
2152 const u8 *addr, const u8 *mask,
2153 unsigned int vni, unsigned int vni_mask,
2154 u8 dip_hit, u8 lookup_type, bool sleep_ok);
2155 int cxgb4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
2156 int idx, bool sleep_ok);
2157 int cxgb4_free_raw_mac_filt(struct adapter *adap,
2158 unsigned int viid,
2159 const u8 *addr,
2160 const u8 *mask,
2161 unsigned int idx,
2162 u8 lookup_type,
2163 u8 port_id,
2164 bool sleep_ok);
2165 int cxgb4_alloc_raw_mac_filt(struct adapter *adap,
2166 unsigned int viid,
2167 const u8 *addr,
2168 const u8 *mask,
2169 unsigned int idx,
2170 u8 lookup_type,
2171 u8 port_id,
2172 bool sleep_ok);
2173 int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid,
2174 int *tcam_idx, const u8 *addr,
2175 bool persistent, u8 *smt_idx);
2176 int cxgb4_get_msix_idx_from_bmap(struct adapter *adap);
2177 void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx);
2178 void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
2179 void cxgb4_quiesce_rx(struct sge_rspq *q);
2180 int cxgb4_port_mirror_alloc(struct net_device *dev);
2181 void cxgb4_port_mirror_free(struct net_device *dev);
2182 #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
2183 int cxgb4_set_ktls_feature(struct adapter *adap, bool enable);
2184 #endif
2185 #endif