0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0021
0022 #include <linux/module.h>
0023 #include <linux/moduleparam.h>
0024 #include <linux/kernel.h>
0025 #include <linux/device.h> /* for dev_info() */
0026 #include <linux/timer.h>
0027 #include <linux/errno.h>
0028 #include <linux/ioport.h>
0029 #include <linux/slab.h>
0030 #include <linux/interrupt.h>
0031 #include <linux/pci.h>
0032 #include <linux/aer.h>
0033 #include <linux/init.h>
0034 #include <linux/netdevice.h>
0035 #include <linux/etherdevice.h>
0036 #include <linux/skbuff.h>
0037 #include <linux/dma-mapping.h>
0038 #include <linux/bitops.h>
0039 #include <linux/irq.h>
0040 #include <linux/delay.h>
0041 #include <asm/byteorder.h>
0042 #include <linux/time.h>
0043 #include <linux/ethtool.h>
0044 #include <linux/mii.h>
0045 #include <linux/if_vlan.h>
0046 #include <linux/crash_dump.h>
0047 #include <net/ip.h>
0048 #include <net/ipv6.h>
0049 #include <net/tcp.h>
0050 #include <net/vxlan.h>
0051 #include <net/checksum.h>
0052 #include <net/ip6_checksum.h>
0053 #include <linux/workqueue.h>
0054 #include <linux/crc32.h>
0055 #include <linux/crc32c.h>
0056 #include <linux/prefetch.h>
0057 #include <linux/zlib.h>
0058 #include <linux/io.h>
0059 #include <linux/semaphore.h>
0060 #include <linux/stringify.h>
0061 #include <linux/vmalloc.h>
0062 #include "bnx2x.h"
0063 #include "bnx2x_init.h"
0064 #include "bnx2x_init_ops.h"
0065 #include "bnx2x_cmn.h"
0066 #include "bnx2x_vfpf.h"
0067 #include "bnx2x_dcb.h"
0068 #include "bnx2x_sp.h"
0069 #include <linux/firmware.h>
0070 #include "bnx2x_fw_file_hdr.h"
0071
0072 #define FW_FILE_VERSION \
0073 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
0074 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
0075 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
0076 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
0077
0078 #define FW_FILE_VERSION_V15 \
0079 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
0080 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
0081 __stringify(BCM_5710_FW_REVISION_VERSION_V15) "." \
0082 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
0083
0084 #define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
0085 #define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
0086 #define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
0087 #define FW_FILE_NAME_E1_V15 "bnx2x/bnx2x-e1-" FW_FILE_VERSION_V15 ".fw"
0088 #define FW_FILE_NAME_E1H_V15 "bnx2x/bnx2x-e1h-" FW_FILE_VERSION_V15 ".fw"
0089 #define FW_FILE_NAME_E2_V15 "bnx2x/bnx2x-e2-" FW_FILE_VERSION_V15 ".fw"
0090
0091
0092 #define TX_TIMEOUT (5*HZ)
0093
0094 MODULE_AUTHOR("Eliezer Tamir");
0095 MODULE_DESCRIPTION("QLogic "
0096 "BCM57710/57711/57711E/"
0097 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
0098 "57840/57840_MF Driver");
0099 MODULE_LICENSE("GPL");
0100 MODULE_FIRMWARE(FW_FILE_NAME_E1);
0101 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
0102 MODULE_FIRMWARE(FW_FILE_NAME_E2);
0103 MODULE_FIRMWARE(FW_FILE_NAME_E1_V15);
0104 MODULE_FIRMWARE(FW_FILE_NAME_E1H_V15);
0105 MODULE_FIRMWARE(FW_FILE_NAME_E2_V15);
0106
0107 int bnx2x_num_queues;
0108 module_param_named(num_queues, bnx2x_num_queues, int, 0444);
0109 MODULE_PARM_DESC(num_queues,
0110 " Set number of queues (default is as a number of CPUs)");
0111
0112 static int disable_tpa;
0113 module_param(disable_tpa, int, 0444);
0114 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
0115
0116 static int int_mode;
0117 module_param(int_mode, int, 0444);
0118 MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
0119 "(1 INT#x; 2 MSI)");
0120
0121 static int dropless_fc;
0122 module_param(dropless_fc, int, 0444);
0123 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
0124
0125 static int mrrs = -1;
0126 module_param(mrrs, int, 0444);
0127 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
0128
0129 static int debug;
0130 module_param(debug, int, 0444);
0131 MODULE_PARM_DESC(debug, " Default debug msglevel");
0132
0133 static struct workqueue_struct *bnx2x_wq;
0134 struct workqueue_struct *bnx2x_iov_wq;
0135
0136 struct bnx2x_mac_vals {
0137 u32 xmac_addr;
0138 u32 xmac_val;
0139 u32 emac_addr;
0140 u32 emac_val;
0141 u32 umac_addr[2];
0142 u32 umac_val[2];
0143 u32 bmac_addr;
0144 u32 bmac_val[2];
0145 };
0146
0147 enum bnx2x_board_type {
0148 BCM57710 = 0,
0149 BCM57711,
0150 BCM57711E,
0151 BCM57712,
0152 BCM57712_MF,
0153 BCM57712_VF,
0154 BCM57800,
0155 BCM57800_MF,
0156 BCM57800_VF,
0157 BCM57810,
0158 BCM57810_MF,
0159 BCM57810_VF,
0160 BCM57840_4_10,
0161 BCM57840_2_20,
0162 BCM57840_MF,
0163 BCM57840_VF,
0164 BCM57811,
0165 BCM57811_MF,
0166 BCM57840_O,
0167 BCM57840_MFO,
0168 BCM57811_VF
0169 };
0170
0171
0172 static struct {
0173 char *name;
0174 } board_info[] = {
0175 [BCM57710] = { "QLogic BCM57710 10 Gigabit PCIe [Everest]" },
0176 [BCM57711] = { "QLogic BCM57711 10 Gigabit PCIe" },
0177 [BCM57711E] = { "QLogic BCM57711E 10 Gigabit PCIe" },
0178 [BCM57712] = { "QLogic BCM57712 10 Gigabit Ethernet" },
0179 [BCM57712_MF] = { "QLogic BCM57712 10 Gigabit Ethernet Multi Function" },
0180 [BCM57712_VF] = { "QLogic BCM57712 10 Gigabit Ethernet Virtual Function" },
0181 [BCM57800] = { "QLogic BCM57800 10 Gigabit Ethernet" },
0182 [BCM57800_MF] = { "QLogic BCM57800 10 Gigabit Ethernet Multi Function" },
0183 [BCM57800_VF] = { "QLogic BCM57800 10 Gigabit Ethernet Virtual Function" },
0184 [BCM57810] = { "QLogic BCM57810 10 Gigabit Ethernet" },
0185 [BCM57810_MF] = { "QLogic BCM57810 10 Gigabit Ethernet Multi Function" },
0186 [BCM57810_VF] = { "QLogic BCM57810 10 Gigabit Ethernet Virtual Function" },
0187 [BCM57840_4_10] = { "QLogic BCM57840 10 Gigabit Ethernet" },
0188 [BCM57840_2_20] = { "QLogic BCM57840 20 Gigabit Ethernet" },
0189 [BCM57840_MF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
0190 [BCM57840_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" },
0191 [BCM57811] = { "QLogic BCM57811 10 Gigabit Ethernet" },
0192 [BCM57811_MF] = { "QLogic BCM57811 10 Gigabit Ethernet Multi Function" },
0193 [BCM57840_O] = { "QLogic BCM57840 10/20 Gigabit Ethernet" },
0194 [BCM57840_MFO] = { "QLogic BCM57840 10/20 Gigabit Ethernet Multi Function" },
0195 [BCM57811_VF] = { "QLogic BCM57840 10/20 Gigabit Ethernet Virtual Function" }
0196 };
0197
0198 #ifndef PCI_DEVICE_ID_NX2_57710
0199 #define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
0200 #endif
0201 #ifndef PCI_DEVICE_ID_NX2_57711
0202 #define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
0203 #endif
0204 #ifndef PCI_DEVICE_ID_NX2_57711E
0205 #define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
0206 #endif
0207 #ifndef PCI_DEVICE_ID_NX2_57712
0208 #define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
0209 #endif
0210 #ifndef PCI_DEVICE_ID_NX2_57712_MF
0211 #define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
0212 #endif
0213 #ifndef PCI_DEVICE_ID_NX2_57712_VF
0214 #define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
0215 #endif
0216 #ifndef PCI_DEVICE_ID_NX2_57800
0217 #define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
0218 #endif
0219 #ifndef PCI_DEVICE_ID_NX2_57800_MF
0220 #define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
0221 #endif
0222 #ifndef PCI_DEVICE_ID_NX2_57800_VF
0223 #define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
0224 #endif
0225 #ifndef PCI_DEVICE_ID_NX2_57810
0226 #define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
0227 #endif
0228 #ifndef PCI_DEVICE_ID_NX2_57810_MF
0229 #define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
0230 #endif
0231 #ifndef PCI_DEVICE_ID_NX2_57840_O
0232 #define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
0233 #endif
0234 #ifndef PCI_DEVICE_ID_NX2_57810_VF
0235 #define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
0236 #endif
0237 #ifndef PCI_DEVICE_ID_NX2_57840_4_10
0238 #define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
0239 #endif
0240 #ifndef PCI_DEVICE_ID_NX2_57840_2_20
0241 #define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
0242 #endif
0243 #ifndef PCI_DEVICE_ID_NX2_57840_MFO
0244 #define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
0245 #endif
0246 #ifndef PCI_DEVICE_ID_NX2_57840_MF
0247 #define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
0248 #endif
0249 #ifndef PCI_DEVICE_ID_NX2_57840_VF
0250 #define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
0251 #endif
0252 #ifndef PCI_DEVICE_ID_NX2_57811
0253 #define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
0254 #endif
0255 #ifndef PCI_DEVICE_ID_NX2_57811_MF
0256 #define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
0257 #endif
0258 #ifndef PCI_DEVICE_ID_NX2_57811_VF
0259 #define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
0260 #endif
0261
0262 static const struct pci_device_id bnx2x_pci_tbl[] = {
0263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
0264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
0265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
0266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
0267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
0268 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
0269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
0270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
0271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
0272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
0273 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
0274 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
0275 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
0276 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
0277 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
0278 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
0279 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
0280 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
0281 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
0282 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
0283 { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
0284 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
0285 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
0286 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
0287 { 0 }
0288 };
0289
0290 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
0291
0292 const u32 dmae_reg_go_c[] = {
0293 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
0294 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
0295 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
0296 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
0297 };
0298
0299
0300 #define BNX2X_PREV_WAIT_NEEDED 1
0301 static DEFINE_SEMAPHORE(bnx2x_prev_sem);
0302 static LIST_HEAD(bnx2x_prev_list);
0303
0304
0305 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
0306 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp);
0307 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp);
0308
0309
0310
0311
0312
0313 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr);
0314
0315 static void __storm_memset_dma_mapping(struct bnx2x *bp,
0316 u32 addr, dma_addr_t mapping)
0317 {
0318 REG_WR(bp, addr, U64_LO(mapping));
0319 REG_WR(bp, addr + 4, U64_HI(mapping));
0320 }
0321
0322 static void storm_memset_spq_addr(struct bnx2x *bp,
0323 dma_addr_t mapping, u16 abs_fid)
0324 {
0325 u32 addr = XSEM_REG_FAST_MEMORY +
0326 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
0327
0328 __storm_memset_dma_mapping(bp, addr, mapping);
0329 }
0330
0331 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
0332 u16 pf_id)
0333 {
0334 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
0335 pf_id);
0336 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
0337 pf_id);
0338 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
0339 pf_id);
0340 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
0341 pf_id);
0342 }
0343
0344 static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
0345 u8 enable)
0346 {
0347 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
0348 enable);
0349 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
0350 enable);
0351 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
0352 enable);
0353 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
0354 enable);
0355 }
0356
0357 static void storm_memset_eq_data(struct bnx2x *bp,
0358 struct event_ring_data *eq_data,
0359 u16 pfid)
0360 {
0361 size_t size = sizeof(struct event_ring_data);
0362
0363 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
0364
0365 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
0366 }
0367
0368 static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
0369 u16 pfid)
0370 {
0371 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
0372 REG_WR16(bp, addr, eq_prod);
0373 }
0374
0375
0376
0377
0378 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
0379 {
0380 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
0381 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
0382 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
0383 PCICFG_VENDOR_ID_OFFSET);
0384 }
0385
0386 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
0387 {
0388 u32 val;
0389
0390 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
0391 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
0392 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
0393 PCICFG_VENDOR_ID_OFFSET);
0394
0395 return val;
0396 }
0397
0398 #define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
0399 #define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
0400 #define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
0401 #define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
0402 #define DMAE_DP_DST_NONE "dst_addr [none]"
0403
0404 static void bnx2x_dp_dmae(struct bnx2x *bp,
0405 struct dmae_command *dmae, int msglvl)
0406 {
0407 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
0408 int i;
0409
0410 switch (dmae->opcode & DMAE_COMMAND_DST) {
0411 case DMAE_CMD_DST_PCI:
0412 if (src_type == DMAE_CMD_SRC_PCI)
0413 DP(msglvl, "DMAE: opcode 0x%08x\n"
0414 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
0415 "comp_addr [%x:%08x], comp_val 0x%08x\n",
0416 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
0417 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
0418 dmae->comp_addr_hi, dmae->comp_addr_lo,
0419 dmae->comp_val);
0420 else
0421 DP(msglvl, "DMAE: opcode 0x%08x\n"
0422 "src [%08x], len [%d*4], dst [%x:%08x]\n"
0423 "comp_addr [%x:%08x], comp_val 0x%08x\n",
0424 dmae->opcode, dmae->src_addr_lo >> 2,
0425 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
0426 dmae->comp_addr_hi, dmae->comp_addr_lo,
0427 dmae->comp_val);
0428 break;
0429 case DMAE_CMD_DST_GRC:
0430 if (src_type == DMAE_CMD_SRC_PCI)
0431 DP(msglvl, "DMAE: opcode 0x%08x\n"
0432 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
0433 "comp_addr [%x:%08x], comp_val 0x%08x\n",
0434 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
0435 dmae->len, dmae->dst_addr_lo >> 2,
0436 dmae->comp_addr_hi, dmae->comp_addr_lo,
0437 dmae->comp_val);
0438 else
0439 DP(msglvl, "DMAE: opcode 0x%08x\n"
0440 "src [%08x], len [%d*4], dst [%08x]\n"
0441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
0442 dmae->opcode, dmae->src_addr_lo >> 2,
0443 dmae->len, dmae->dst_addr_lo >> 2,
0444 dmae->comp_addr_hi, dmae->comp_addr_lo,
0445 dmae->comp_val);
0446 break;
0447 default:
0448 if (src_type == DMAE_CMD_SRC_PCI)
0449 DP(msglvl, "DMAE: opcode 0x%08x\n"
0450 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
0451 "comp_addr [%x:%08x] comp_val 0x%08x\n",
0452 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
0453 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
0454 dmae->comp_val);
0455 else
0456 DP(msglvl, "DMAE: opcode 0x%08x\n"
0457 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
0458 "comp_addr [%x:%08x] comp_val 0x%08x\n",
0459 dmae->opcode, dmae->src_addr_lo >> 2,
0460 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
0461 dmae->comp_val);
0462 break;
0463 }
0464
0465 for (i = 0; i < (sizeof(struct dmae_command)/4); i++)
0466 DP(msglvl, "DMAE RAW [%02d]: 0x%08x\n",
0467 i, *(((u32 *)dmae) + i));
0468 }
0469
0470
0471 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
0472 {
0473 u32 cmd_offset;
0474 int i;
0475
0476 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
0477 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
0478 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
0479 }
0480 REG_WR(bp, dmae_reg_go_c[idx], 1);
0481 }
0482
0483 u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
0484 {
0485 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
0486 DMAE_CMD_C_ENABLE);
0487 }
0488
0489 u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
0490 {
0491 return opcode & ~DMAE_CMD_SRC_RESET;
0492 }
0493
0494 u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
0495 bool with_comp, u8 comp_type)
0496 {
0497 u32 opcode = 0;
0498
0499 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
0500 (dst_type << DMAE_COMMAND_DST_SHIFT));
0501
0502 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
0503
0504 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
0505 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
0506 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
0507 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
0508
0509 #ifdef __BIG_ENDIAN
0510 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
0511 #else
0512 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
0513 #endif
0514 if (with_comp)
0515 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
0516 return opcode;
0517 }
0518
0519 void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
0520 struct dmae_command *dmae,
0521 u8 src_type, u8 dst_type)
0522 {
0523 memset(dmae, 0, sizeof(struct dmae_command));
0524
0525
0526 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
0527 true, DMAE_COMP_PCI);
0528
0529
0530 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
0531 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
0532 dmae->comp_val = DMAE_COMP_VAL;
0533 }
0534
0535
0536 int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
0537 u32 *comp)
0538 {
0539 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
0540 int rc = 0;
0541
0542 bnx2x_dp_dmae(bp, dmae, BNX2X_MSG_DMAE);
0543
0544
0545
0546
0547
0548
0549 spin_lock_bh(&bp->dmae_lock);
0550
0551
0552 *comp = 0;
0553
0554
0555 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
0556
0557
0558 udelay(5);
0559 while ((*comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
0560
0561 if (!cnt ||
0562 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
0563 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
0564 BNX2X_ERR("DMAE timeout!\n");
0565 rc = DMAE_TIMEOUT;
0566 goto unlock;
0567 }
0568 cnt--;
0569 udelay(50);
0570 }
0571 if (*comp & DMAE_PCI_ERR_FLAG) {
0572 BNX2X_ERR("DMAE PCI error!\n");
0573 rc = DMAE_PCI_ERROR;
0574 }
0575
0576 unlock:
0577
0578 spin_unlock_bh(&bp->dmae_lock);
0579
0580 return rc;
0581 }
0582
0583 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
0584 u32 len32)
0585 {
0586 int rc;
0587 struct dmae_command dmae;
0588
0589 if (!bp->dmae_ready) {
0590 u32 *data = bnx2x_sp(bp, wb_data[0]);
0591
0592 if (CHIP_IS_E1(bp))
0593 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
0594 else
0595 bnx2x_init_str_wr(bp, dst_addr, data, len32);
0596 return;
0597 }
0598
0599
0600 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
0601
0602
0603 dmae.src_addr_lo = U64_LO(dma_addr);
0604 dmae.src_addr_hi = U64_HI(dma_addr);
0605 dmae.dst_addr_lo = dst_addr >> 2;
0606 dmae.dst_addr_hi = 0;
0607 dmae.len = len32;
0608
0609
0610 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
0611 if (rc) {
0612 BNX2X_ERR("DMAE returned failure %d\n", rc);
0613 #ifdef BNX2X_STOP_ON_ERROR
0614 bnx2x_panic();
0615 #endif
0616 }
0617 }
0618
0619 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
0620 {
0621 int rc;
0622 struct dmae_command dmae;
0623
0624 if (!bp->dmae_ready) {
0625 u32 *data = bnx2x_sp(bp, wb_data[0]);
0626 int i;
0627
0628 if (CHIP_IS_E1(bp))
0629 for (i = 0; i < len32; i++)
0630 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
0631 else
0632 for (i = 0; i < len32; i++)
0633 data[i] = REG_RD(bp, src_addr + i*4);
0634
0635 return;
0636 }
0637
0638
0639 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
0640
0641
0642 dmae.src_addr_lo = src_addr >> 2;
0643 dmae.src_addr_hi = 0;
0644 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
0645 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
0646 dmae.len = len32;
0647
0648
0649 rc = bnx2x_issue_dmae_with_comp(bp, &dmae, bnx2x_sp(bp, wb_comp));
0650 if (rc) {
0651 BNX2X_ERR("DMAE returned failure %d\n", rc);
0652 #ifdef BNX2X_STOP_ON_ERROR
0653 bnx2x_panic();
0654 #endif
0655 }
0656 }
0657
0658 static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
0659 u32 addr, u32 len)
0660 {
0661 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
0662 int offset = 0;
0663
0664 while (len > dmae_wr_max) {
0665 bnx2x_write_dmae(bp, phys_addr + offset,
0666 addr + offset, dmae_wr_max);
0667 offset += dmae_wr_max * 4;
0668 len -= dmae_wr_max;
0669 }
0670
0671 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
0672 }
0673
0674 enum storms {
0675 XSTORM,
0676 TSTORM,
0677 CSTORM,
0678 USTORM,
0679 MAX_STORMS
0680 };
0681
0682 #define STORMS_NUM 4
0683 #define REGS_IN_ENTRY 4
0684
0685 static inline int bnx2x_get_assert_list_entry(struct bnx2x *bp,
0686 enum storms storm,
0687 int entry)
0688 {
0689 switch (storm) {
0690 case XSTORM:
0691 return XSTORM_ASSERT_LIST_OFFSET(entry);
0692 case TSTORM:
0693 return TSTORM_ASSERT_LIST_OFFSET(entry);
0694 case CSTORM:
0695 return CSTORM_ASSERT_LIST_OFFSET(entry);
0696 case USTORM:
0697 return USTORM_ASSERT_LIST_OFFSET(entry);
0698 case MAX_STORMS:
0699 default:
0700 BNX2X_ERR("unknown storm\n");
0701 }
0702 return -EINVAL;
0703 }
0704
0705 static int bnx2x_mc_assert(struct bnx2x *bp)
0706 {
0707 char last_idx;
0708 int i, j, rc = 0;
0709 enum storms storm;
0710 u32 regs[REGS_IN_ENTRY];
0711 u32 bar_storm_intmem[STORMS_NUM] = {
0712 BAR_XSTRORM_INTMEM,
0713 BAR_TSTRORM_INTMEM,
0714 BAR_CSTRORM_INTMEM,
0715 BAR_USTRORM_INTMEM
0716 };
0717 u32 storm_assert_list_index[STORMS_NUM] = {
0718 XSTORM_ASSERT_LIST_INDEX_OFFSET,
0719 TSTORM_ASSERT_LIST_INDEX_OFFSET,
0720 CSTORM_ASSERT_LIST_INDEX_OFFSET,
0721 USTORM_ASSERT_LIST_INDEX_OFFSET
0722 };
0723 char *storms_string[STORMS_NUM] = {
0724 "XSTORM",
0725 "TSTORM",
0726 "CSTORM",
0727 "USTORM"
0728 };
0729
0730 for (storm = XSTORM; storm < MAX_STORMS; storm++) {
0731 last_idx = REG_RD8(bp, bar_storm_intmem[storm] +
0732 storm_assert_list_index[storm]);
0733 if (last_idx)
0734 BNX2X_ERR("%s_ASSERT_LIST_INDEX 0x%x\n",
0735 storms_string[storm], last_idx);
0736
0737
0738 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
0739
0740 for (j = 0; j < REGS_IN_ENTRY; j++)
0741 regs[j] = REG_RD(bp, bar_storm_intmem[storm] +
0742 bnx2x_get_assert_list_entry(bp,
0743 storm,
0744 i) +
0745 sizeof(u32) * j);
0746
0747
0748 if (regs[0] != COMMON_ASM_INVALID_ASSERT_OPCODE) {
0749 BNX2X_ERR("%s_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
0750 storms_string[storm], i, regs[3],
0751 regs[2], regs[1], regs[0]);
0752 rc++;
0753 } else {
0754 break;
0755 }
0756 }
0757 }
0758
0759 BNX2X_ERR("Chip Revision: %s, FW Version: %d_%d_%d\n",
0760 CHIP_IS_E1(bp) ? "everest1" :
0761 CHIP_IS_E1H(bp) ? "everest1h" :
0762 CHIP_IS_E2(bp) ? "everest2" : "everest3",
0763 bp->fw_major, bp->fw_minor, bp->fw_rev);
0764
0765 return rc;
0766 }
0767
0768 #define MCPR_TRACE_BUFFER_SIZE (0x800)
0769 #define SCRATCH_BUFFER_SIZE(bp) \
0770 (CHIP_IS_E1(bp) ? 0x10000 : (CHIP_IS_E1H(bp) ? 0x20000 : 0x28000))
0771
0772 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
0773 {
0774 u32 addr, val;
0775 u32 mark, offset;
0776 __be32 data[9];
0777 int word;
0778 u32 trace_shmem_base;
0779 if (BP_NOMCP(bp)) {
0780 BNX2X_ERR("NO MCP - can not dump\n");
0781 return;
0782 }
0783 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
0784 (bp->common.bc_ver & 0xff0000) >> 16,
0785 (bp->common.bc_ver & 0xff00) >> 8,
0786 (bp->common.bc_ver & 0xff));
0787
0788 if (pci_channel_offline(bp->pdev)) {
0789 BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
0790 return;
0791 }
0792
0793 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
0794 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
0795 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
0796
0797 if (BP_PATH(bp) == 0)
0798 trace_shmem_base = bp->common.shmem_base;
0799 else
0800 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
0801
0802
0803 if (trace_shmem_base < MCPR_SCRATCH_BASE(bp) + MCPR_TRACE_BUFFER_SIZE ||
0804 trace_shmem_base >= MCPR_SCRATCH_BASE(bp) +
0805 SCRATCH_BUFFER_SIZE(bp)) {
0806 BNX2X_ERR("Unable to dump trace buffer (mark %x)\n",
0807 trace_shmem_base);
0808 return;
0809 }
0810
0811 addr = trace_shmem_base - MCPR_TRACE_BUFFER_SIZE;
0812
0813
0814 mark = REG_RD(bp, addr);
0815 if (mark != MFW_TRACE_SIGNATURE) {
0816 BNX2X_ERR("Trace buffer signature is missing.");
0817 return ;
0818 }
0819
0820
0821 addr += 4;
0822 mark = REG_RD(bp, addr);
0823 mark = MCPR_SCRATCH_BASE(bp) + ((mark + 0x3) & ~0x3) - 0x08000000;
0824 if (mark >= trace_shmem_base || mark < addr + 4) {
0825 BNX2X_ERR("Mark doesn't fall inside Trace Buffer\n");
0826 return;
0827 }
0828 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
0829
0830 printk("%s", lvl);
0831
0832
0833 for (offset = mark; offset < trace_shmem_base; offset += 0x8*4) {
0834 for (word = 0; word < 8; word++)
0835 data[word] = htonl(REG_RD(bp, offset + 4*word));
0836 data[8] = 0x0;
0837 pr_cont("%s", (char *)data);
0838 }
0839
0840
0841 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
0842 for (word = 0; word < 8; word++)
0843 data[word] = htonl(REG_RD(bp, offset + 4*word));
0844 data[8] = 0x0;
0845 pr_cont("%s", (char *)data);
0846 }
0847 printk("%s" "end of fw dump\n", lvl);
0848 }
0849
0850 static void bnx2x_fw_dump(struct bnx2x *bp)
0851 {
0852 bnx2x_fw_dump_lvl(bp, KERN_ERR);
0853 }
0854
0855 static void bnx2x_hc_int_disable(struct bnx2x *bp)
0856 {
0857 int port = BP_PORT(bp);
0858 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
0859 u32 val = REG_RD(bp, addr);
0860
0861
0862
0863
0864
0865 if (CHIP_IS_E1(bp)) {
0866
0867
0868
0869
0870 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
0871
0872 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
0873 HC_CONFIG_0_REG_INT_LINE_EN_0 |
0874 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
0875 } else
0876 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
0877 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
0878 HC_CONFIG_0_REG_INT_LINE_EN_0 |
0879 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
0880
0881 DP(NETIF_MSG_IFDOWN,
0882 "write %x to HC %d (addr 0x%x)\n",
0883 val, port, addr);
0884
0885 REG_WR(bp, addr, val);
0886 if (REG_RD(bp, addr) != val)
0887 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
0888 }
0889
0890 static void bnx2x_igu_int_disable(struct bnx2x *bp)
0891 {
0892 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
0893
0894 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
0895 IGU_PF_CONF_INT_LINE_EN |
0896 IGU_PF_CONF_ATTN_BIT_EN);
0897
0898 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
0899
0900 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
0901 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
0902 BNX2X_ERR("BUG! Proper val not read from IGU!\n");
0903 }
0904
0905 static void bnx2x_int_disable(struct bnx2x *bp)
0906 {
0907 if (bp->common.int_block == INT_BLOCK_HC)
0908 bnx2x_hc_int_disable(bp);
0909 else
0910 bnx2x_igu_int_disable(bp);
0911 }
0912
0913 void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
0914 {
0915 int i;
0916 u16 j;
0917 struct hc_sp_status_block_data sp_sb_data;
0918 int func = BP_FUNC(bp);
0919 #ifdef BNX2X_STOP_ON_ERROR
0920 u16 start = 0, end = 0;
0921 u8 cos;
0922 #endif
0923 if (IS_PF(bp) && disable_int)
0924 bnx2x_int_disable(bp);
0925
0926 bp->stats_state = STATS_STATE_DISABLED;
0927 bp->eth_stats.unrecoverable_error++;
0928 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
0929
0930 BNX2X_ERR("begin crash dump -----------------\n");
0931
0932
0933
0934 if (IS_PF(bp)) {
0935 struct host_sp_status_block *def_sb = bp->def_status_blk;
0936 int data_size, cstorm_offset;
0937
0938 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
0939 bp->def_idx, bp->def_att_idx, bp->attn_state,
0940 bp->spq_prod_idx, bp->stats_counter);
0941 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
0942 def_sb->atten_status_block.attn_bits,
0943 def_sb->atten_status_block.attn_bits_ack,
0944 def_sb->atten_status_block.status_block_id,
0945 def_sb->atten_status_block.attn_bits_index);
0946 BNX2X_ERR(" def (");
0947 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
0948 pr_cont("0x%x%s",
0949 def_sb->sp_sb.index_values[i],
0950 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
0951
0952 data_size = sizeof(struct hc_sp_status_block_data) /
0953 sizeof(u32);
0954 cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
0955 for (i = 0; i < data_size; i++)
0956 *((u32 *)&sp_sb_data + i) =
0957 REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
0958 i * sizeof(u32));
0959
0960 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
0961 sp_sb_data.igu_sb_id,
0962 sp_sb_data.igu_seg_id,
0963 sp_sb_data.p_func.pf_id,
0964 sp_sb_data.p_func.vnic_id,
0965 sp_sb_data.p_func.vf_id,
0966 sp_sb_data.p_func.vf_valid,
0967 sp_sb_data.state);
0968 }
0969
0970 for_each_eth_queue(bp, i) {
0971 struct bnx2x_fastpath *fp = &bp->fp[i];
0972 int loop;
0973 struct hc_status_block_data_e2 sb_data_e2;
0974 struct hc_status_block_data_e1x sb_data_e1x;
0975 struct hc_status_block_sm *hc_sm_p =
0976 CHIP_IS_E1x(bp) ?
0977 sb_data_e1x.common.state_machine :
0978 sb_data_e2.common.state_machine;
0979 struct hc_index_data *hc_index_p =
0980 CHIP_IS_E1x(bp) ?
0981 sb_data_e1x.index_data :
0982 sb_data_e2.index_data;
0983 u8 data_size, cos;
0984 u32 *sb_data_p;
0985 struct bnx2x_fp_txdata txdata;
0986
0987 if (!bp->fp)
0988 break;
0989
0990 if (!fp->rx_cons_sb)
0991 continue;
0992
0993
0994 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
0995 i, fp->rx_bd_prod, fp->rx_bd_cons,
0996 fp->rx_comp_prod,
0997 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
0998 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
0999 fp->rx_sge_prod, fp->last_max_sge,
1000 le16_to_cpu(fp->fp_hc_idx));
1001
1002
1003 for_each_cos_in_tx_queue(fp, cos)
1004 {
1005 if (!fp->txdata_ptr[cos])
1006 break;
1007
1008 txdata = *fp->txdata_ptr[cos];
1009
1010 if (!txdata.tx_cons_sb)
1011 continue;
1012
1013 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
1014 i, txdata.tx_pkt_prod,
1015 txdata.tx_pkt_cons, txdata.tx_bd_prod,
1016 txdata.tx_bd_cons,
1017 le16_to_cpu(*txdata.tx_cons_sb));
1018 }
1019
1020 loop = CHIP_IS_E1x(bp) ?
1021 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
1022
1023
1024
1025 if (IS_FCOE_FP(fp))
1026 continue;
1027
1028 BNX2X_ERR(" run indexes (");
1029 for (j = 0; j < HC_SB_MAX_SM; j++)
1030 pr_cont("0x%x%s",
1031 fp->sb_running_index[j],
1032 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
1033
1034 BNX2X_ERR(" indexes (");
1035 for (j = 0; j < loop; j++)
1036 pr_cont("0x%x%s",
1037 fp->sb_index_values[j],
1038 (j == loop - 1) ? ")" : " ");
1039
1040
1041 if (IS_VF(bp))
1042 continue;
1043
1044
1045 data_size = CHIP_IS_E1x(bp) ?
1046 sizeof(struct hc_status_block_data_e1x) :
1047 sizeof(struct hc_status_block_data_e2);
1048 data_size /= sizeof(u32);
1049 sb_data_p = CHIP_IS_E1x(bp) ?
1050 (u32 *)&sb_data_e1x :
1051 (u32 *)&sb_data_e2;
1052
1053 for (j = 0; j < data_size; j++)
1054 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
1055 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
1056 j * sizeof(u32));
1057
1058 if (!CHIP_IS_E1x(bp)) {
1059 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1060 sb_data_e2.common.p_func.pf_id,
1061 sb_data_e2.common.p_func.vf_id,
1062 sb_data_e2.common.p_func.vf_valid,
1063 sb_data_e2.common.p_func.vnic_id,
1064 sb_data_e2.common.same_igu_sb_1b,
1065 sb_data_e2.common.state);
1066 } else {
1067 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
1068 sb_data_e1x.common.p_func.pf_id,
1069 sb_data_e1x.common.p_func.vf_id,
1070 sb_data_e1x.common.p_func.vf_valid,
1071 sb_data_e1x.common.p_func.vnic_id,
1072 sb_data_e1x.common.same_igu_sb_1b,
1073 sb_data_e1x.common.state);
1074 }
1075
1076
1077 for (j = 0; j < HC_SB_MAX_SM; j++) {
1078 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1079 j, hc_sm_p[j].__flags,
1080 hc_sm_p[j].igu_sb_id,
1081 hc_sm_p[j].igu_seg_id,
1082 hc_sm_p[j].time_to_expire,
1083 hc_sm_p[j].timer_value);
1084 }
1085
1086
1087 for (j = 0; j < loop; j++) {
1088 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
1089 hc_index_p[j].flags,
1090 hc_index_p[j].timeout);
1091 }
1092 }
1093
1094 #ifdef BNX2X_STOP_ON_ERROR
1095 if (IS_PF(bp)) {
1096
1097 BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
1098 for (i = 0; i < NUM_EQ_DESC; i++) {
1099 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1100
1101 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1102 i, bp->eq_ring[i].message.opcode,
1103 bp->eq_ring[i].message.error);
1104 BNX2X_ERR("data: %x %x %x\n",
1105 data[0], data[1], data[2]);
1106 }
1107 }
1108
1109
1110
1111 for_each_valid_rx_queue(bp, i) {
1112 struct bnx2x_fastpath *fp = &bp->fp[i];
1113
1114 if (!bp->fp)
1115 break;
1116
1117 if (!fp->rx_cons_sb)
1118 continue;
1119
1120 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1121 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
1122 for (j = start; j != end; j = RX_BD(j + 1)) {
1123 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1124 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1125
1126 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1127 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
1128 }
1129
1130 start = RX_SGE(fp->rx_sge_prod);
1131 end = RX_SGE(fp->last_max_sge);
1132 for (j = start; j != end; j = RX_SGE(j + 1)) {
1133 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1134 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1135
1136 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1137 i, j, rx_sge[1], rx_sge[0], sw_page->page);
1138 }
1139
1140 start = RCQ_BD(fp->rx_comp_cons - 10);
1141 end = RCQ_BD(fp->rx_comp_cons + 503);
1142 for (j = start; j != end; j = RCQ_BD(j + 1)) {
1143 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1144
1145 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1146 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
1147 }
1148 }
1149
1150
1151 for_each_valid_tx_queue(bp, i) {
1152 struct bnx2x_fastpath *fp = &bp->fp[i];
1153
1154 if (!bp->fp)
1155 break;
1156
1157 for_each_cos_in_tx_queue(fp, cos) {
1158 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1159
1160 if (!fp->txdata_ptr[cos])
1161 break;
1162
1163 if (!txdata->tx_cons_sb)
1164 continue;
1165
1166 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1167 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1168 for (j = start; j != end; j = TX_BD(j + 1)) {
1169 struct sw_tx_bd *sw_bd =
1170 &txdata->tx_buf_ring[j];
1171
1172 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
1173 i, cos, j, sw_bd->skb,
1174 sw_bd->first_bd);
1175 }
1176
1177 start = TX_BD(txdata->tx_bd_cons - 10);
1178 end = TX_BD(txdata->tx_bd_cons + 254);
1179 for (j = start; j != end; j = TX_BD(j + 1)) {
1180 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
1181
1182 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
1183 i, cos, j, tx_bd[0], tx_bd[1],
1184 tx_bd[2], tx_bd[3]);
1185 }
1186 }
1187 }
1188 #endif
1189 if (IS_PF(bp)) {
1190 int tmp_msg_en = bp->msg_enable;
1191
1192 bnx2x_fw_dump(bp);
1193 bp->msg_enable |= NETIF_MSG_HW;
1194 BNX2X_ERR("Idle check (1st round) ----------\n");
1195 bnx2x_idle_chk(bp);
1196 BNX2X_ERR("Idle check (2nd round) ----------\n");
1197 bnx2x_idle_chk(bp);
1198 bp->msg_enable = tmp_msg_en;
1199 bnx2x_mc_assert(bp);
1200 }
1201
1202 BNX2X_ERR("end crash dump -----------------\n");
1203 }
1204
1205
1206
1207
1208
1209
1210
1211 #define FLR_WAIT_USEC 10000
1212 #define FLR_WAIT_INTERVAL 50
1213 #define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL)
1214
1215 struct pbf_pN_buf_regs {
1216 int pN;
1217 u32 init_crd;
1218 u32 crd;
1219 u32 crd_freed;
1220 };
1221
1222 struct pbf_pN_cmd_regs {
1223 int pN;
1224 u32 lines_occup;
1225 u32 lines_freed;
1226 };
1227
1228 static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1229 struct pbf_pN_buf_regs *regs,
1230 u32 poll_count)
1231 {
1232 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1233 u32 cur_cnt = poll_count;
1234
1235 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1236 crd = crd_start = REG_RD(bp, regs->crd);
1237 init_crd = REG_RD(bp, regs->init_crd);
1238
1239 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1240 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1241 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1242
1243 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1244 (init_crd - crd_start))) {
1245 if (cur_cnt--) {
1246 udelay(FLR_WAIT_INTERVAL);
1247 crd = REG_RD(bp, regs->crd);
1248 crd_freed = REG_RD(bp, regs->crd_freed);
1249 } else {
1250 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1251 regs->pN);
1252 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1253 regs->pN, crd);
1254 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1255 regs->pN, crd_freed);
1256 break;
1257 }
1258 }
1259 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
1260 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1261 }
1262
1263 static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1264 struct pbf_pN_cmd_regs *regs,
1265 u32 poll_count)
1266 {
1267 u32 occup, to_free, freed, freed_start;
1268 u32 cur_cnt = poll_count;
1269
1270 occup = to_free = REG_RD(bp, regs->lines_occup);
1271 freed = freed_start = REG_RD(bp, regs->lines_freed);
1272
1273 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1274 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1275
1276 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1277 if (cur_cnt--) {
1278 udelay(FLR_WAIT_INTERVAL);
1279 occup = REG_RD(bp, regs->lines_occup);
1280 freed = REG_RD(bp, regs->lines_freed);
1281 } else {
1282 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1283 regs->pN);
1284 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1285 regs->pN, occup);
1286 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1287 regs->pN, freed);
1288 break;
1289 }
1290 }
1291 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
1292 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1293 }
1294
1295 static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1296 u32 expected, u32 poll_count)
1297 {
1298 u32 cur_cnt = poll_count;
1299 u32 val;
1300
1301 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
1302 udelay(FLR_WAIT_INTERVAL);
1303
1304 return val;
1305 }
1306
1307 int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1308 char *msg, u32 poll_cnt)
1309 {
1310 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1311 if (val != 0) {
1312 BNX2X_ERR("%s usage count=%d\n", msg, val);
1313 return 1;
1314 }
1315 return 0;
1316 }
1317
1318
1319 u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
1320 {
1321
1322 if (CHIP_REV_IS_EMUL(bp))
1323 return FLR_POLL_CNT * 2000;
1324
1325 if (CHIP_REV_IS_FPGA(bp))
1326 return FLR_POLL_CNT * 120;
1327
1328 return FLR_POLL_CNT;
1329 }
1330
1331 void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1332 {
1333 struct pbf_pN_cmd_regs cmd_regs[] = {
1334 {0, (CHIP_IS_E3B0(bp)) ?
1335 PBF_REG_TQ_OCCUPANCY_Q0 :
1336 PBF_REG_P0_TQ_OCCUPANCY,
1337 (CHIP_IS_E3B0(bp)) ?
1338 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1339 PBF_REG_P0_TQ_LINES_FREED_CNT},
1340 {1, (CHIP_IS_E3B0(bp)) ?
1341 PBF_REG_TQ_OCCUPANCY_Q1 :
1342 PBF_REG_P1_TQ_OCCUPANCY,
1343 (CHIP_IS_E3B0(bp)) ?
1344 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1345 PBF_REG_P1_TQ_LINES_FREED_CNT},
1346 {4, (CHIP_IS_E3B0(bp)) ?
1347 PBF_REG_TQ_OCCUPANCY_LB_Q :
1348 PBF_REG_P4_TQ_OCCUPANCY,
1349 (CHIP_IS_E3B0(bp)) ?
1350 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1351 PBF_REG_P4_TQ_LINES_FREED_CNT}
1352 };
1353
1354 struct pbf_pN_buf_regs buf_regs[] = {
1355 {0, (CHIP_IS_E3B0(bp)) ?
1356 PBF_REG_INIT_CRD_Q0 :
1357 PBF_REG_P0_INIT_CRD ,
1358 (CHIP_IS_E3B0(bp)) ?
1359 PBF_REG_CREDIT_Q0 :
1360 PBF_REG_P0_CREDIT,
1361 (CHIP_IS_E3B0(bp)) ?
1362 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1363 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1364 {1, (CHIP_IS_E3B0(bp)) ?
1365 PBF_REG_INIT_CRD_Q1 :
1366 PBF_REG_P1_INIT_CRD,
1367 (CHIP_IS_E3B0(bp)) ?
1368 PBF_REG_CREDIT_Q1 :
1369 PBF_REG_P1_CREDIT,
1370 (CHIP_IS_E3B0(bp)) ?
1371 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1372 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1373 {4, (CHIP_IS_E3B0(bp)) ?
1374 PBF_REG_INIT_CRD_LB_Q :
1375 PBF_REG_P4_INIT_CRD,
1376 (CHIP_IS_E3B0(bp)) ?
1377 PBF_REG_CREDIT_LB_Q :
1378 PBF_REG_P4_CREDIT,
1379 (CHIP_IS_E3B0(bp)) ?
1380 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1381 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1382 };
1383
1384 int i;
1385
1386
1387 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1388 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1389
1390
1391 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1392 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1393 }
1394
1395 #define OP_GEN_PARAM(param) \
1396 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1397
1398 #define OP_GEN_TYPE(type) \
1399 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1400
1401 #define OP_GEN_AGG_VECT(index) \
1402 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1403
1404 int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
1405 {
1406 u32 op_gen_command = 0;
1407 u32 comp_addr = BAR_CSTRORM_INTMEM +
1408 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1409
1410 if (REG_RD(bp, comp_addr)) {
1411 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
1412 return 1;
1413 }
1414
1415 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1416 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1417 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1418 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
1419
1420 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
1421 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
1422
1423 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1424 BNX2X_ERR("FW final cleanup did not succeed\n");
1425 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1426 (REG_RD(bp, comp_addr)));
1427 bnx2x_panic();
1428 return 1;
1429 }
1430
1431 REG_WR(bp, comp_addr, 0);
1432
1433 return 0;
1434 }
1435
1436 u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1437 {
1438 u16 status;
1439
1440 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
1441 return status & PCI_EXP_DEVSTA_TRPND;
1442 }
1443
1444
1445
1446 static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1447 {
1448
1449 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1450 CFC_REG_NUM_LCIDS_INSIDE_PF,
1451 "CFC PF usage counter timed out",
1452 poll_cnt))
1453 return 1;
1454
1455
1456 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1457 DORQ_REG_PF_USAGE_CNT,
1458 "DQ PF usage counter timed out",
1459 poll_cnt))
1460 return 1;
1461
1462
1463 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1464 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1465 "QM PF usage counter timed out",
1466 poll_cnt))
1467 return 1;
1468
1469
1470 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1471 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1472 "Timers VNIC usage counter timed out",
1473 poll_cnt))
1474 return 1;
1475 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1476 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1477 "Timers NUM_SCANS usage counter timed out",
1478 poll_cnt))
1479 return 1;
1480
1481
1482 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1483 dmae_reg_go_c[INIT_DMAE_C(bp)],
1484 "DMAE command register timed out",
1485 poll_cnt))
1486 return 1;
1487
1488 return 0;
1489 }
1490
1491 static void bnx2x_hw_enable_status(struct bnx2x *bp)
1492 {
1493 u32 val;
1494
1495 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1496 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1497
1498 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1499 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1500
1501 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1502 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1503
1504 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1505 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1506
1507 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1508 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1509
1510 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1511 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1512
1513 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1514 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1515
1516 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1517 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1518 val);
1519 }
1520
1521 static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1522 {
1523 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1524
1525 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1526
1527
1528 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1529
1530
1531 DP(BNX2X_MSG_SP, "Polling usage counters\n");
1532 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1533 return -EBUSY;
1534
1535
1536
1537
1538 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1539 return -EBUSY;
1540
1541
1542
1543
1544 bnx2x_tx_hw_flushed(bp, poll_cnt);
1545
1546
1547 msleep(100);
1548
1549
1550 if (bnx2x_is_pcie_pending(bp->pdev))
1551 BNX2X_ERR("PCIE Transactions still pending\n");
1552
1553
1554 bnx2x_hw_enable_status(bp);
1555
1556
1557
1558
1559
1560 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1561
1562 return 0;
1563 }
1564
1565 static void bnx2x_hc_int_enable(struct bnx2x *bp)
1566 {
1567 int port = BP_PORT(bp);
1568 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1569 u32 val = REG_RD(bp, addr);
1570 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1571 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1572 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1573
1574 if (msix) {
1575 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1576 HC_CONFIG_0_REG_INT_LINE_EN_0);
1577 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1578 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1579 if (single_msix)
1580 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1581 } else if (msi) {
1582 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1583 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1584 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1585 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1586 } else {
1587 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1588 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1589 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1590 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1591
1592 if (!CHIP_IS_E1(bp)) {
1593 DP(NETIF_MSG_IFUP,
1594 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
1595
1596 REG_WR(bp, addr, val);
1597
1598 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1599 }
1600 }
1601
1602 if (CHIP_IS_E1(bp))
1603 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1604
1605 DP(NETIF_MSG_IFUP,
1606 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1607 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1608
1609 REG_WR(bp, addr, val);
1610
1611
1612
1613 barrier();
1614
1615 if (!CHIP_IS_E1(bp)) {
1616
1617 if (IS_MF(bp)) {
1618 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1619 if (bp->port.pmf)
1620
1621 val |= 0x1100;
1622 } else
1623 val = 0xffff;
1624
1625 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1626 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1627 }
1628 }
1629
1630 static void bnx2x_igu_int_enable(struct bnx2x *bp)
1631 {
1632 u32 val;
1633 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1634 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1635 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1636
1637 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1638
1639 if (msix) {
1640 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1641 IGU_PF_CONF_SINGLE_ISR_EN);
1642 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1643 IGU_PF_CONF_ATTN_BIT_EN);
1644
1645 if (single_msix)
1646 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1647 } else if (msi) {
1648 val &= ~IGU_PF_CONF_INT_LINE_EN;
1649 val |= (IGU_PF_CONF_MSI_MSIX_EN |
1650 IGU_PF_CONF_ATTN_BIT_EN |
1651 IGU_PF_CONF_SINGLE_ISR_EN);
1652 } else {
1653 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1654 val |= (IGU_PF_CONF_INT_LINE_EN |
1655 IGU_PF_CONF_ATTN_BIT_EN |
1656 IGU_PF_CONF_SINGLE_ISR_EN);
1657 }
1658
1659
1660 if ((!msix) || single_msix) {
1661 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1662 bnx2x_ack_int(bp);
1663 }
1664
1665 val |= IGU_PF_CONF_FUNC_EN;
1666
1667 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
1668 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1669
1670 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1671
1672 if (val & IGU_PF_CONF_INT_LINE_EN)
1673 pci_intx(bp->pdev, true);
1674
1675 barrier();
1676
1677
1678 if (IS_MF(bp)) {
1679 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
1680 if (bp->port.pmf)
1681
1682 val |= 0x1100;
1683 } else
1684 val = 0xffff;
1685
1686 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1687 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1688 }
1689
1690 void bnx2x_int_enable(struct bnx2x *bp)
1691 {
1692 if (bp->common.int_block == INT_BLOCK_HC)
1693 bnx2x_hc_int_enable(bp);
1694 else
1695 bnx2x_igu_int_enable(bp);
1696 }
1697
1698 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1699 {
1700 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1701 int i, offset;
1702
1703 if (disable_hw)
1704
1705 bnx2x_int_disable(bp);
1706
1707
1708 if (msix) {
1709 synchronize_irq(bp->msix_table[0].vector);
1710 offset = 1;
1711 if (CNIC_SUPPORT(bp))
1712 offset++;
1713 for_each_eth_queue(bp, i)
1714 synchronize_irq(bp->msix_table[offset++].vector);
1715 } else
1716 synchronize_irq(bp->pdev->irq);
1717
1718
1719 cancel_delayed_work(&bp->sp_task);
1720 cancel_delayed_work(&bp->period_task);
1721 flush_workqueue(bnx2x_wq);
1722 }
1723
1724
1725
1726
1727
1728
1729
1730
1731 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1732 {
1733 u32 lock_status;
1734 u32 resource_bit = (1 << resource);
1735 int func = BP_FUNC(bp);
1736 u32 hw_lock_control_reg;
1737
1738 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1739 "Trying to take a lock on resource %d\n", resource);
1740
1741
1742 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1743 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1744 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1745 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1746 return false;
1747 }
1748
1749 if (func <= 5)
1750 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1751 else
1752 hw_lock_control_reg =
1753 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1754
1755
1756 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1757 lock_status = REG_RD(bp, hw_lock_control_reg);
1758 if (lock_status & resource_bit)
1759 return true;
1760
1761 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1762 "Failed to get a lock on resource %d\n", resource);
1763 return false;
1764 }
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774 static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1775 {
1776 if (BP_PATH(bp))
1777 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1778 else
1779 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1780 }
1781
1782
1783
1784
1785
1786
1787
1788
1789 static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1790 {
1791 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1792 }
1793
1794 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
1795
1796
1797 static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1798 {
1799
1800
1801
1802
1803 atomic_set(&bp->interrupt_occurred, 1);
1804
1805
1806
1807
1808
1809 smp_wmb();
1810
1811
1812 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1813 }
1814
1815 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1816 {
1817 struct bnx2x *bp = fp->bp;
1818 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1819 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1820 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
1821 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
1822
1823 DP(BNX2X_MSG_SP,
1824 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1825 fp->index, cid, command, bp->state,
1826 rr_cqe->ramrod_cqe.ramrod_type);
1827
1828
1829
1830
1831 if (cid >= BNX2X_FIRST_VF_CID &&
1832 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1833 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1834
1835 switch (command) {
1836 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
1837 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
1838 drv_cmd = BNX2X_Q_CMD_UPDATE;
1839 break;
1840
1841 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
1842 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
1843 drv_cmd = BNX2X_Q_CMD_SETUP;
1844 break;
1845
1846 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
1847 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
1848 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1849 break;
1850
1851 case (RAMROD_CMD_ID_ETH_HALT):
1852 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
1853 drv_cmd = BNX2X_Q_CMD_HALT;
1854 break;
1855
1856 case (RAMROD_CMD_ID_ETH_TERMINATE):
1857 DP(BNX2X_MSG_SP, "got MULTI[%d] terminate ramrod\n", cid);
1858 drv_cmd = BNX2X_Q_CMD_TERMINATE;
1859 break;
1860
1861 case (RAMROD_CMD_ID_ETH_EMPTY):
1862 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
1863 drv_cmd = BNX2X_Q_CMD_EMPTY;
1864 break;
1865
1866 case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
1867 DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
1868 drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
1869 break;
1870
1871 default:
1872 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1873 command, fp->index);
1874 return;
1875 }
1876
1877 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1878 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1879
1880
1881
1882
1883
1884
1885
1886 #ifdef BNX2X_STOP_ON_ERROR
1887 bnx2x_panic();
1888 #else
1889 return;
1890 #endif
1891
1892 smp_mb__before_atomic();
1893 atomic_inc(&bp->cq_spq_left);
1894
1895 smp_mb__after_atomic();
1896
1897 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1898
1899 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1900 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910 smp_mb__before_atomic();
1911 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1912 wmb();
1913 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1914 smp_mb__after_atomic();
1915
1916
1917 bnx2x_schedule_sp_task(bp);
1918 }
1919
1920 return;
1921 }
1922
1923 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1924 {
1925 struct bnx2x *bp = netdev_priv(dev_instance);
1926 u16 status = bnx2x_ack_int(bp);
1927 u16 mask;
1928 int i;
1929 u8 cos;
1930
1931
1932 if (unlikely(status == 0)) {
1933 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1934 return IRQ_NONE;
1935 }
1936 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1937
1938 #ifdef BNX2X_STOP_ON_ERROR
1939 if (unlikely(bp->panic))
1940 return IRQ_HANDLED;
1941 #endif
1942
1943 for_each_eth_queue(bp, i) {
1944 struct bnx2x_fastpath *fp = &bp->fp[i];
1945
1946 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
1947 if (status & mask) {
1948
1949 for_each_cos_in_tx_queue(fp, cos)
1950 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1951 prefetch(&fp->sb_running_index[SM_RX_ID]);
1952 napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1953 status &= ~mask;
1954 }
1955 }
1956
1957 if (CNIC_SUPPORT(bp)) {
1958 mask = 0x2;
1959 if (status & (mask | 0x1)) {
1960 struct cnic_ops *c_ops = NULL;
1961
1962 rcu_read_lock();
1963 c_ops = rcu_dereference(bp->cnic_ops);
1964 if (c_ops && (bp->cnic_eth_dev.drv_state &
1965 CNIC_DRV_STATE_HANDLES_IRQ))
1966 c_ops->cnic_handler(bp->cnic_data, NULL);
1967 rcu_read_unlock();
1968
1969 status &= ~mask;
1970 }
1971 }
1972
1973 if (unlikely(status & 0x1)) {
1974
1975
1976
1977
1978 bnx2x_schedule_sp_task(bp);
1979
1980 status &= ~0x1;
1981 if (!status)
1982 return IRQ_HANDLED;
1983 }
1984
1985 if (unlikely(status))
1986 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1987 status);
1988
1989 return IRQ_HANDLED;
1990 }
1991
1992
1993
1994
1995
1996
1997
1998 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1999 {
2000 u32 lock_status;
2001 u32 resource_bit = (1 << resource);
2002 int func = BP_FUNC(bp);
2003 u32 hw_lock_control_reg;
2004 int cnt;
2005
2006
2007 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2008 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2009 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2010 return -EINVAL;
2011 }
2012
2013 if (func <= 5) {
2014 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2015 } else {
2016 hw_lock_control_reg =
2017 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2018 }
2019
2020
2021 lock_status = REG_RD(bp, hw_lock_control_reg);
2022 if (lock_status & resource_bit) {
2023 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
2024 lock_status, resource_bit);
2025 return -EEXIST;
2026 }
2027
2028
2029 for (cnt = 0; cnt < 1000; cnt++) {
2030
2031 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
2032 lock_status = REG_RD(bp, hw_lock_control_reg);
2033 if (lock_status & resource_bit)
2034 return 0;
2035
2036 usleep_range(5000, 10000);
2037 }
2038 BNX2X_ERR("Timeout\n");
2039 return -EAGAIN;
2040 }
2041
2042 int bnx2x_release_leader_lock(struct bnx2x *bp)
2043 {
2044 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
2045 }
2046
2047 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
2048 {
2049 u32 lock_status;
2050 u32 resource_bit = (1 << resource);
2051 int func = BP_FUNC(bp);
2052 u32 hw_lock_control_reg;
2053
2054
2055 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
2056 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
2057 resource, HW_LOCK_MAX_RESOURCE_VALUE);
2058 return -EINVAL;
2059 }
2060
2061 if (func <= 5) {
2062 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
2063 } else {
2064 hw_lock_control_reg =
2065 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
2066 }
2067
2068
2069 lock_status = REG_RD(bp, hw_lock_control_reg);
2070 if (!(lock_status & resource_bit)) {
2071 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. Unlock was called but lock wasn't taken!\n",
2072 lock_status, resource_bit);
2073 return -EFAULT;
2074 }
2075
2076 REG_WR(bp, hw_lock_control_reg, resource_bit);
2077 return 0;
2078 }
2079
2080 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
2081 {
2082
2083 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2084 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2085 int gpio_shift = gpio_num +
2086 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2087 u32 gpio_mask = (1 << gpio_shift);
2088 u32 gpio_reg;
2089 int value;
2090
2091 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2092 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2093 return -EINVAL;
2094 }
2095
2096
2097 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2098
2099
2100 if ((gpio_reg & gpio_mask) == gpio_mask)
2101 value = 1;
2102 else
2103 value = 0;
2104
2105 return value;
2106 }
2107
2108 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2109 {
2110
2111 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2112 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2113 int gpio_shift = gpio_num +
2114 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2115 u32 gpio_mask = (1 << gpio_shift);
2116 u32 gpio_reg;
2117
2118 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2119 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2120 return -EINVAL;
2121 }
2122
2123 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2124
2125 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2126
2127 switch (mode) {
2128 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2129 DP(NETIF_MSG_LINK,
2130 "Set GPIO %d (shift %d) -> output low\n",
2131 gpio_num, gpio_shift);
2132
2133 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2134 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2135 break;
2136
2137 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2138 DP(NETIF_MSG_LINK,
2139 "Set GPIO %d (shift %d) -> output high\n",
2140 gpio_num, gpio_shift);
2141
2142 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2143 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2144 break;
2145
2146 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2147 DP(NETIF_MSG_LINK,
2148 "Set GPIO %d (shift %d) -> input\n",
2149 gpio_num, gpio_shift);
2150
2151 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2152 break;
2153
2154 default:
2155 break;
2156 }
2157
2158 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2159 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2160
2161 return 0;
2162 }
2163
2164 int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2165 {
2166 u32 gpio_reg = 0;
2167 int rc = 0;
2168
2169
2170
2171 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2172
2173 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2174 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2175 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2176 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2177
2178 switch (mode) {
2179 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2180 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2181
2182 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2183 break;
2184
2185 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2186 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2187
2188 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2189 break;
2190
2191 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2192 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2193
2194 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2195 break;
2196
2197 default:
2198 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2199 rc = -EINVAL;
2200 break;
2201 }
2202
2203 if (rc == 0)
2204 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2205
2206 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2207
2208 return rc;
2209 }
2210
2211 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2212 {
2213
2214 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2215 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2216 int gpio_shift = gpio_num +
2217 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2218 u32 gpio_mask = (1 << gpio_shift);
2219 u32 gpio_reg;
2220
2221 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2222 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2223 return -EINVAL;
2224 }
2225
2226 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2227
2228 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2229
2230 switch (mode) {
2231 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2232 DP(NETIF_MSG_LINK,
2233 "Clear GPIO INT %d (shift %d) -> output low\n",
2234 gpio_num, gpio_shift);
2235
2236 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2237 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2238 break;
2239
2240 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2241 DP(NETIF_MSG_LINK,
2242 "Set GPIO INT %d (shift %d) -> output high\n",
2243 gpio_num, gpio_shift);
2244
2245 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2246 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2247 break;
2248
2249 default:
2250 break;
2251 }
2252
2253 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2254 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2255
2256 return 0;
2257 }
2258
2259 static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
2260 {
2261 u32 spio_reg;
2262
2263
2264 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2265 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
2266 return -EINVAL;
2267 }
2268
2269 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2270
2271 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
2272
2273 switch (mode) {
2274 case MISC_SPIO_OUTPUT_LOW:
2275 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
2276
2277 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2278 spio_reg |= (spio << MISC_SPIO_CLR_POS);
2279 break;
2280
2281 case MISC_SPIO_OUTPUT_HIGH:
2282 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
2283
2284 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2285 spio_reg |= (spio << MISC_SPIO_SET_POS);
2286 break;
2287
2288 case MISC_SPIO_INPUT_HI_Z:
2289 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
2290
2291 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
2292 break;
2293
2294 default:
2295 break;
2296 }
2297
2298 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2299 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2300
2301 return 0;
2302 }
2303
2304 void bnx2x_calc_fc_adv(struct bnx2x *bp)
2305 {
2306 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
2307
2308 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
2309 ADVERTISED_Pause);
2310 switch (bp->link_vars.ieee_fc &
2311 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2312 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2313 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
2314 ADVERTISED_Pause);
2315 break;
2316
2317 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2318 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
2319 break;
2320
2321 default:
2322 break;
2323 }
2324 }
2325
2326 static void bnx2x_set_requested_fc(struct bnx2x *bp)
2327 {
2328
2329
2330
2331
2332 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2333 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2334 else
2335 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2336 }
2337
2338 static void bnx2x_init_dropless_fc(struct bnx2x *bp)
2339 {
2340 u32 pause_enabled = 0;
2341
2342 if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
2343 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2344 pause_enabled = 1;
2345
2346 REG_WR(bp, BAR_USTRORM_INTMEM +
2347 USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
2348 pause_enabled);
2349 }
2350
2351 DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
2352 pause_enabled ? "enabled" : "disabled");
2353 }
2354
2355 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2356 {
2357 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2358 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2359
2360 if (!BP_NOMCP(bp)) {
2361 bnx2x_set_requested_fc(bp);
2362 bnx2x_acquire_phy_lock(bp);
2363
2364 if (load_mode == LOAD_DIAG) {
2365 struct link_params *lp = &bp->link_params;
2366 lp->loopback_mode = LOOPBACK_XGXS;
2367
2368 if (lp->req_line_speed[cfx_idx] < SPEED_20000) {
2369 if (lp->speed_cap_mask[cfx_idx] &
2370 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
2371 lp->req_line_speed[cfx_idx] =
2372 SPEED_20000;
2373 else if (lp->speed_cap_mask[cfx_idx] &
2374 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2375 lp->req_line_speed[cfx_idx] =
2376 SPEED_10000;
2377 else
2378 lp->req_line_speed[cfx_idx] =
2379 SPEED_1000;
2380 }
2381 }
2382
2383 if (load_mode == LOAD_LOOPBACK_EXT) {
2384 struct link_params *lp = &bp->link_params;
2385 lp->loopback_mode = LOOPBACK_EXT;
2386 }
2387
2388 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2389
2390 bnx2x_release_phy_lock(bp);
2391
2392 bnx2x_init_dropless_fc(bp);
2393
2394 bnx2x_calc_fc_adv(bp);
2395
2396 if (bp->link_vars.link_up) {
2397 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2398 bnx2x_link_report(bp);
2399 }
2400 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2401 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
2402 return rc;
2403 }
2404 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2405 return -EINVAL;
2406 }
2407
2408 void bnx2x_link_set(struct bnx2x *bp)
2409 {
2410 if (!BP_NOMCP(bp)) {
2411 bnx2x_acquire_phy_lock(bp);
2412 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2413 bnx2x_release_phy_lock(bp);
2414
2415 bnx2x_init_dropless_fc(bp);
2416
2417 bnx2x_calc_fc_adv(bp);
2418 } else
2419 BNX2X_ERR("Bootcode is missing - can not set link\n");
2420 }
2421
2422 static void bnx2x__link_reset(struct bnx2x *bp)
2423 {
2424 if (!BP_NOMCP(bp)) {
2425 bnx2x_acquire_phy_lock(bp);
2426 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
2427 bnx2x_release_phy_lock(bp);
2428 } else
2429 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2430 }
2431
2432 void bnx2x_force_link_reset(struct bnx2x *bp)
2433 {
2434 bnx2x_acquire_phy_lock(bp);
2435 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2436 bnx2x_release_phy_lock(bp);
2437 }
2438
2439 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2440 {
2441 u8 rc = 0;
2442
2443 if (!BP_NOMCP(bp)) {
2444 bnx2x_acquire_phy_lock(bp);
2445 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2446 is_serdes);
2447 bnx2x_release_phy_lock(bp);
2448 } else
2449 BNX2X_ERR("Bootcode is missing - can not test link\n");
2450
2451 return rc;
2452 }
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463 static void bnx2x_calc_vn_min(struct bnx2x *bp,
2464 struct cmng_init_input *input)
2465 {
2466 int all_zero = 1;
2467 int vn;
2468
2469 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2470 u32 vn_cfg = bp->mf_config[vn];
2471 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2472 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2473
2474
2475 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2476 vn_min_rate = 0;
2477
2478 else if (!vn_min_rate)
2479 vn_min_rate = DEF_MIN_RATE;
2480 else
2481 all_zero = 0;
2482
2483 input->vnic_min_rate[vn] = vn_min_rate;
2484 }
2485
2486
2487 if (BNX2X_IS_ETS_ENABLED(bp)) {
2488 input->flags.cmng_enables &=
2489 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2490 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2491 } else if (all_zero) {
2492 input->flags.cmng_enables &=
2493 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2494 DP(NETIF_MSG_IFUP,
2495 "All MIN values are zeroes fairness will be disabled\n");
2496 } else
2497 input->flags.cmng_enables |=
2498 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2499 }
2500
2501 static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2502 struct cmng_init_input *input)
2503 {
2504 u16 vn_max_rate;
2505 u32 vn_cfg = bp->mf_config[vn];
2506
2507 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2508 vn_max_rate = 0;
2509 else {
2510 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2511
2512 if (IS_MF_PERCENT_BW(bp)) {
2513
2514 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2515 } else
2516
2517 vn_max_rate = maxCfg * 100;
2518 }
2519
2520 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2521
2522 input->vnic_max_rate[vn] = vn_max_rate;
2523 }
2524
2525 static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2526 {
2527 if (CHIP_REV_IS_SLOW(bp))
2528 return CMNG_FNS_NONE;
2529 if (IS_MF(bp))
2530 return CMNG_FNS_MINMAX;
2531
2532 return CMNG_FNS_NONE;
2533 }
2534
2535 void bnx2x_read_mf_cfg(struct bnx2x *bp)
2536 {
2537 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2538
2539 if (BP_NOMCP(bp))
2540 return;
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2554 int func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2555
2556 if (func >= E1H_FUNC_MAX)
2557 break;
2558
2559 bp->mf_config[vn] =
2560 MF_CFG_RD(bp, func_mf_config[func].config);
2561 }
2562 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2563 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2564 bp->flags |= MF_FUNC_DIS;
2565 } else {
2566 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2567 bp->flags &= ~MF_FUNC_DIS;
2568 }
2569 }
2570
2571 static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2572 {
2573 struct cmng_init_input input;
2574 memset(&input, 0, sizeof(struct cmng_init_input));
2575
2576 input.port_rate = bp->link_vars.line_speed;
2577
2578 if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
2579 int vn;
2580
2581
2582 if (read_cfg)
2583 bnx2x_read_mf_cfg(bp);
2584
2585
2586 bnx2x_calc_vn_min(bp, &input);
2587
2588
2589 if (bp->port.pmf)
2590 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2591 bnx2x_calc_vn_max(bp, vn, &input);
2592
2593
2594 input.flags.cmng_enables |=
2595 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2596
2597 bnx2x_init_cmng(&input, &bp->cmng);
2598 return;
2599 }
2600
2601
2602 DP(NETIF_MSG_IFUP,
2603 "rate shaping and fairness are disabled\n");
2604 }
2605
2606 static void storm_memset_cmng(struct bnx2x *bp,
2607 struct cmng_init *cmng,
2608 u8 port)
2609 {
2610 int vn;
2611 size_t size = sizeof(struct cmng_struct_per_port);
2612
2613 u32 addr = BAR_XSTRORM_INTMEM +
2614 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2615
2616 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2617
2618 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2619 int func = func_by_vn(bp, vn);
2620
2621 addr = BAR_XSTRORM_INTMEM +
2622 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2623 size = sizeof(struct rate_shaping_vars_per_vn);
2624 __storm_memset_struct(bp, addr, size,
2625 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2626
2627 addr = BAR_XSTRORM_INTMEM +
2628 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2629 size = sizeof(struct fairness_vars_per_vn);
2630 __storm_memset_struct(bp, addr, size,
2631 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2632 }
2633 }
2634
2635
2636 void bnx2x_set_local_cmng(struct bnx2x *bp)
2637 {
2638 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2639
2640 if (cmng_fns != CMNG_FNS_NONE) {
2641 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2642 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2643 } else {
2644
2645 DP(NETIF_MSG_IFUP,
2646 "single function mode without fairness\n");
2647 }
2648 }
2649
2650
2651 static void bnx2x_link_attn(struct bnx2x *bp)
2652 {
2653
2654 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2655
2656 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2657
2658 bnx2x_init_dropless_fc(bp);
2659
2660 if (bp->link_vars.link_up) {
2661
2662 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
2663 struct host_port_stats *pstats;
2664
2665 pstats = bnx2x_sp(bp, port_stats);
2666
2667 memset(&(pstats->mac_stx[0]), 0,
2668 sizeof(struct mac_stx));
2669 }
2670 if (bp->state == BNX2X_STATE_OPEN)
2671 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2672 }
2673
2674 if (bp->link_vars.link_up && bp->link_vars.line_speed)
2675 bnx2x_set_local_cmng(bp);
2676
2677 __bnx2x_link_report(bp);
2678
2679 if (IS_MF(bp))
2680 bnx2x_link_sync_notify(bp);
2681 }
2682
2683 void bnx2x__link_status_update(struct bnx2x *bp)
2684 {
2685 if (bp->state != BNX2X_STATE_OPEN)
2686 return;
2687
2688
2689 if (IS_PF(bp)) {
2690 bnx2x_dcbx_pmf_update(bp);
2691 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2692 if (bp->link_vars.link_up)
2693 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2694 else
2695 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2696
2697 bnx2x_link_report(bp);
2698
2699 } else {
2700 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2701 SUPPORTED_10baseT_Full |
2702 SUPPORTED_100baseT_Half |
2703 SUPPORTED_100baseT_Full |
2704 SUPPORTED_1000baseT_Full |
2705 SUPPORTED_2500baseX_Full |
2706 SUPPORTED_10000baseT_Full |
2707 SUPPORTED_TP |
2708 SUPPORTED_FIBRE |
2709 SUPPORTED_Autoneg |
2710 SUPPORTED_Pause |
2711 SUPPORTED_Asym_Pause);
2712 bp->port.advertising[0] = bp->port.supported[0];
2713
2714 bp->link_params.bp = bp;
2715 bp->link_params.port = BP_PORT(bp);
2716 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2717 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2718 bp->link_params.req_line_speed[0] = SPEED_10000;
2719 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2720 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2721 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2722 bp->link_vars.line_speed = SPEED_10000;
2723 bp->link_vars.link_status =
2724 (LINK_STATUS_LINK_UP |
2725 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2726 bp->link_vars.link_up = 1;
2727 bp->link_vars.duplex = DUPLEX_FULL;
2728 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2729 __bnx2x_link_report(bp);
2730
2731 bnx2x_sample_bulletin(bp);
2732
2733
2734
2735
2736
2737
2738 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2739 }
2740 }
2741
2742 static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2743 u16 vlan_val, u8 allowed_prio)
2744 {
2745 struct bnx2x_func_state_params func_params = {NULL};
2746 struct bnx2x_func_afex_update_params *f_update_params =
2747 &func_params.params.afex_update;
2748
2749 func_params.f_obj = &bp->func_obj;
2750 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2751
2752
2753
2754
2755
2756 f_update_params->vif_id = vifid;
2757 f_update_params->afex_default_vlan = vlan_val;
2758 f_update_params->allowed_priorities = allowed_prio;
2759
2760
2761 if (bnx2x_func_state_change(bp, &func_params) < 0)
2762 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2763
2764 return 0;
2765 }
2766
2767 static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2768 u16 vif_index, u8 func_bit_map)
2769 {
2770 struct bnx2x_func_state_params func_params = {NULL};
2771 struct bnx2x_func_afex_viflists_params *update_params =
2772 &func_params.params.afex_viflists;
2773 int rc;
2774 u32 drv_msg_code;
2775
2776
2777 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2778 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2779 cmd_type);
2780
2781 func_params.f_obj = &bp->func_obj;
2782 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2783
2784
2785 update_params->afex_vif_list_command = cmd_type;
2786 update_params->vif_list_index = vif_index;
2787 update_params->func_bit_map =
2788 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2789 update_params->func_to_clear = 0;
2790 drv_msg_code =
2791 (cmd_type == VIF_LIST_RULE_GET) ?
2792 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2793 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2794
2795
2796
2797
2798 rc = bnx2x_func_state_change(bp, &func_params);
2799 if (rc < 0)
2800 bnx2x_fw_command(bp, drv_msg_code, 0);
2801
2802 return 0;
2803 }
2804
2805 static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2806 {
2807 struct afex_stats afex_stats;
2808 u32 func = BP_ABS_FUNC(bp);
2809 u32 mf_config;
2810 u16 vlan_val;
2811 u32 vlan_prio;
2812 u16 vif_id;
2813 u8 allowed_prio;
2814 u8 vlan_mode;
2815 u32 addr_to_write, vifid, addrs, stats_type, i;
2816
2817 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2818 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2819 DP(BNX2X_MSG_MCP,
2820 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2821 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2822 }
2823
2824 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2825 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2826 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2827 DP(BNX2X_MSG_MCP,
2828 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2829 vifid, addrs);
2830 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2831 addrs);
2832 }
2833
2834 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2835 addr_to_write = SHMEM2_RD(bp,
2836 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2837 stats_type = SHMEM2_RD(bp,
2838 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2839
2840 DP(BNX2X_MSG_MCP,
2841 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2842 addr_to_write);
2843
2844 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2845
2846
2847 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2848 REG_WR(bp, addr_to_write + i*sizeof(u32),
2849 *(((u32 *)(&afex_stats))+i));
2850
2851
2852 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2853 }
2854
2855 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2856 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2857 bp->mf_config[BP_VN(bp)] = mf_config;
2858 DP(BNX2X_MSG_MCP,
2859 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2860 mf_config);
2861
2862
2863 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2864
2865 struct cmng_init_input cmng_input;
2866 struct rate_shaping_vars_per_vn m_rs_vn;
2867 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2868 u32 addr = BAR_XSTRORM_INTMEM +
2869 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2870
2871 bp->mf_config[BP_VN(bp)] = mf_config;
2872
2873 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2874 m_rs_vn.vn_counter.rate =
2875 cmng_input.vnic_max_rate[BP_VN(bp)];
2876 m_rs_vn.vn_counter.quota =
2877 (m_rs_vn.vn_counter.rate *
2878 RS_PERIODIC_TIMEOUT_USEC) / 8;
2879
2880 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2881
2882
2883 vif_id =
2884 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2885 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2886 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2887 vlan_val =
2888 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2889 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2890 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2891 vlan_prio = (mf_config &
2892 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2893 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2894 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2895 vlan_mode =
2896 (MF_CFG_RD(bp,
2897 func_mf_config[func].afex_config) &
2898 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2899 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2900 allowed_prio =
2901 (MF_CFG_RD(bp,
2902 func_mf_config[func].afex_config) &
2903 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2904 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2905
2906
2907 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2908 allowed_prio))
2909 return;
2910
2911 bp->afex_def_vlan_tag = vlan_val;
2912 bp->afex_vlan_mode = vlan_mode;
2913 } else {
2914
2915 bnx2x_link_report(bp);
2916
2917
2918 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2919
2920
2921 bp->afex_def_vlan_tag = -1;
2922 }
2923 }
2924 }
2925
2926 static void bnx2x_handle_update_svid_cmd(struct bnx2x *bp)
2927 {
2928 struct bnx2x_func_switch_update_params *switch_update_params;
2929 struct bnx2x_func_state_params func_params;
2930
2931 memset(&func_params, 0, sizeof(struct bnx2x_func_state_params));
2932 switch_update_params = &func_params.params.switch_update;
2933 func_params.f_obj = &bp->func_obj;
2934 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
2935
2936
2937 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2938 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
2939
2940 if (IS_MF_UFP(bp) || IS_MF_BD(bp)) {
2941 int func = BP_ABS_FUNC(bp);
2942 u32 val;
2943
2944
2945 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2946 FUNC_MF_CFG_E1HOV_TAG_MASK;
2947 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
2948 bp->mf_ov = val;
2949 } else {
2950 BNX2X_ERR("Got an SVID event, but no tag is configured in shmem\n");
2951 goto fail;
2952 }
2953
2954
2955 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + BP_PORT(bp) * 8,
2956 bp->mf_ov);
2957
2958
2959 __set_bit(BNX2X_F_UPDATE_SD_VLAN_TAG_CHNG,
2960 &switch_update_params->changes);
2961 switch_update_params->vlan = bp->mf_ov;
2962
2963 if (bnx2x_func_state_change(bp, &func_params) < 0) {
2964 BNX2X_ERR("Failed to configure FW of S-tag Change to %02x\n",
2965 bp->mf_ov);
2966 goto fail;
2967 } else {
2968 DP(BNX2X_MSG_MCP, "Configured S-tag %02x\n",
2969 bp->mf_ov);
2970 }
2971 } else {
2972 goto fail;
2973 }
2974
2975 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_OK, 0);
2976 return;
2977 fail:
2978 bnx2x_fw_command(bp, DRV_MSG_CODE_OEM_UPDATE_SVID_FAILURE, 0);
2979 }
2980
2981 static void bnx2x_pmf_update(struct bnx2x *bp)
2982 {
2983 int port = BP_PORT(bp);
2984 u32 val;
2985
2986 bp->port.pmf = 1;
2987 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
2988
2989
2990
2991
2992
2993 smp_mb();
2994
2995
2996 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2997
2998 bnx2x_dcbx_pmf_update(bp);
2999
3000
3001 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
3002 if (bp->common.int_block == INT_BLOCK_HC) {
3003 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
3004 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
3005 } else if (!CHIP_IS_E1x(bp)) {
3006 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
3007 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
3008 }
3009
3010 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3011 }
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021
3022 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
3023 {
3024 int mb_idx = BP_FW_MB_IDX(bp);
3025 u32 seq;
3026 u32 rc = 0;
3027 u32 cnt = 1;
3028 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
3029
3030 mutex_lock(&bp->fw_mb_mutex);
3031 seq = ++bp->fw_seq;
3032 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
3033 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
3034
3035 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
3036 (command | seq), param);
3037
3038 do {
3039
3040 msleep(delay);
3041
3042 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
3043
3044
3045 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
3046
3047 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
3048 cnt*delay, rc, seq);
3049
3050
3051 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
3052 rc &= FW_MSG_CODE_MASK;
3053 else {
3054
3055 BNX2X_ERR("FW failed to respond!\n");
3056 bnx2x_fw_dump(bp);
3057 rc = 0;
3058 }
3059 mutex_unlock(&bp->fw_mb_mutex);
3060
3061 return rc;
3062 }
3063
3064 static void storm_memset_func_cfg(struct bnx2x *bp,
3065 struct tstorm_eth_function_common_config *tcfg,
3066 u16 abs_fid)
3067 {
3068 size_t size = sizeof(struct tstorm_eth_function_common_config);
3069
3070 u32 addr = BAR_TSTRORM_INTMEM +
3071 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
3072
3073 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
3074 }
3075
3076 void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
3077 {
3078 if (CHIP_IS_E1x(bp)) {
3079 struct tstorm_eth_function_common_config tcfg = {0};
3080
3081 storm_memset_func_cfg(bp, &tcfg, p->func_id);
3082 }
3083
3084
3085 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
3086 storm_memset_func_en(bp, p->func_id, 1);
3087
3088
3089 if (p->spq_active) {
3090 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
3091 REG_WR(bp, XSEM_REG_FAST_MEMORY +
3092 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
3093 }
3094 }
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105 static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
3106 struct bnx2x_fastpath *fp,
3107 bool zero_stats)
3108 {
3109 unsigned long flags = 0;
3110
3111
3112 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
3113
3114
3115
3116
3117
3118
3119 __set_bit(BNX2X_Q_FLG_STATS, &flags);
3120 if (zero_stats)
3121 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
3122
3123 if (bp->flags & TX_SWITCHING)
3124 __set_bit(BNX2X_Q_FLG_TX_SWITCH, &flags);
3125
3126 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
3127 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
3128
3129 #ifdef BNX2X_STOP_ON_ERROR
3130 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
3131 #endif
3132
3133 return flags;
3134 }
3135
3136 static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
3137 struct bnx2x_fastpath *fp,
3138 bool leading)
3139 {
3140 unsigned long flags = 0;
3141
3142
3143 if (IS_MF_SD(bp))
3144 __set_bit(BNX2X_Q_FLG_OV, &flags);
3145
3146 if (IS_FCOE_FP(fp)) {
3147 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
3148
3149 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
3150 }
3151
3152 if (fp->mode != TPA_MODE_DISABLED) {
3153 __set_bit(BNX2X_Q_FLG_TPA, &flags);
3154 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
3155 if (fp->mode == TPA_MODE_GRO)
3156 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
3157 }
3158
3159 if (leading) {
3160 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
3161 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3162 }
3163
3164
3165 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
3166
3167
3168 if (IS_MF_AFEX(bp))
3169 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3170
3171 return flags | bnx2x_get_common_flags(bp, fp, true);
3172 }
3173
3174 static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
3175 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3176 u8 cos)
3177 {
3178 gen_init->stat_id = bnx2x_stats_id(fp);
3179 gen_init->spcl_id = fp->cl_id;
3180
3181
3182 if (IS_FCOE_FP(fp))
3183 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3184 else
3185 gen_init->mtu = bp->dev->mtu;
3186
3187 gen_init->cos = cos;
3188
3189 gen_init->fp_hsi = ETH_FP_HSI_VERSION;
3190 }
3191
3192 static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
3193 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
3194 struct bnx2x_rxq_setup_params *rxq_init)
3195 {
3196 u8 max_sge = 0;
3197 u16 sge_sz = 0;
3198 u16 tpa_agg_size = 0;
3199
3200 if (fp->mode != TPA_MODE_DISABLED) {
3201 pause->sge_th_lo = SGE_TH_LO(bp);
3202 pause->sge_th_hi = SGE_TH_HI(bp);
3203
3204
3205 WARN_ON(bp->dropless_fc &&
3206 pause->sge_th_hi + FW_PREFETCH_CNT >
3207 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3208
3209 tpa_agg_size = TPA_AGG_SIZE;
3210 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3211 SGE_PAGE_SHIFT;
3212 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3213 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
3214 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
3215 }
3216
3217
3218 if (!CHIP_IS_E1(bp)) {
3219 pause->bd_th_lo = BD_TH_LO(bp);
3220 pause->bd_th_hi = BD_TH_HI(bp);
3221
3222 pause->rcq_th_lo = RCQ_TH_LO(bp);
3223 pause->rcq_th_hi = RCQ_TH_HI(bp);
3224
3225
3226
3227
3228 WARN_ON(bp->dropless_fc &&
3229 pause->bd_th_hi + FW_PREFETCH_CNT >
3230 bp->rx_ring_size);
3231 WARN_ON(bp->dropless_fc &&
3232 pause->rcq_th_hi + FW_PREFETCH_CNT >
3233 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
3234
3235 pause->pri_map = 1;
3236 }
3237
3238
3239 rxq_init->dscr_map = fp->rx_desc_mapping;
3240 rxq_init->sge_map = fp->rx_sge_mapping;
3241 rxq_init->rcq_map = fp->rx_comp_mapping;
3242 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
3243
3244
3245
3246
3247 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3248 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
3249
3250 rxq_init->cl_qzone_id = fp->cl_qzone_id;
3251 rxq_init->tpa_agg_sz = tpa_agg_size;
3252 rxq_init->sge_buf_sz = sge_sz;
3253 rxq_init->max_sges_pkt = max_sge;
3254 rxq_init->rss_engine_id = BP_FUNC(bp);
3255 rxq_init->mcast_engine_id = BP_FUNC(bp);
3256
3257
3258
3259
3260
3261
3262 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
3263
3264 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3265 rxq_init->fw_sb_id = fp->fw_sb_id;
3266
3267 if (IS_FCOE_FP(fp))
3268 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3269 else
3270 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
3271
3272
3273
3274 if (IS_MF_AFEX(bp)) {
3275 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3276 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3277 }
3278 }
3279
3280 static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
3281 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3282 u8 cos)
3283 {
3284 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
3285 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
3286 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3287 txq_init->fw_sb_id = fp->fw_sb_id;
3288
3289
3290
3291
3292
3293 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3294
3295 if (IS_FCOE_FP(fp)) {
3296 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3297 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3298 }
3299 }
3300
3301 static void bnx2x_pf_init(struct bnx2x *bp)
3302 {
3303 struct bnx2x_func_init_params func_init = {0};
3304 struct event_ring_data eq_data = { {0} };
3305
3306 if (!CHIP_IS_E1x(bp)) {
3307
3308
3309 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3310 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3311 (CHIP_MODE_IS_4_PORT(bp) ?
3312 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3313
3314 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3315 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3316 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3317 (CHIP_MODE_IS_4_PORT(bp) ?
3318 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3319 }
3320
3321 func_init.spq_active = true;
3322 func_init.pf_id = BP_FUNC(bp);
3323 func_init.func_id = BP_FUNC(bp);
3324 func_init.spq_map = bp->spq_mapping;
3325 func_init.spq_prod = bp->spq_prod_idx;
3326
3327 bnx2x_func_init(bp, &func_init);
3328
3329 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3330
3331
3332
3333
3334
3335
3336
3337 bp->link_vars.line_speed = SPEED_10000;
3338 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3339
3340
3341 if (bp->port.pmf)
3342 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3343
3344
3345 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3346 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3347 eq_data.producer = bp->eq_prod;
3348 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3349 eq_data.sb_id = DEF_SB_ID;
3350 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3351 }
3352
3353 static void bnx2x_e1h_disable(struct bnx2x *bp)
3354 {
3355 int port = BP_PORT(bp);
3356
3357 bnx2x_tx_disable(bp);
3358
3359 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3360 }
3361
3362 static void bnx2x_e1h_enable(struct bnx2x *bp)
3363 {
3364 int port = BP_PORT(bp);
3365
3366 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
3367 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
3368
3369
3370 netif_tx_wake_all_queues(bp->dev);
3371
3372
3373
3374
3375
3376 }
3377
3378 #define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3379
3380 static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3381 {
3382 struct eth_stats_info *ether_stat =
3383 &bp->slowpath->drv_info_to_mcp.ether_stat;
3384 struct bnx2x_vlan_mac_obj *mac_obj =
3385 &bp->sp_objs->mac_obj;
3386 int i;
3387
3388 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3389 ETH_STAT_INFO_VERSION_LEN);
3390
3391
3392
3393
3394
3395
3396
3397
3398
3399 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3400 memset(ether_stat->mac_local + i, 0,
3401 sizeof(ether_stat->mac_local[0]));
3402 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3403 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3404 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3405 ETH_ALEN);
3406 ether_stat->mtu_size = bp->dev->mtu;
3407 if (bp->dev->features & NETIF_F_RXCSUM)
3408 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3409 if (bp->dev->features & NETIF_F_TSO)
3410 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3411 ether_stat->feature_flags |= bp->common.boot_mode;
3412
3413 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3414
3415 ether_stat->txq_size = bp->tx_ring_size;
3416 ether_stat->rxq_size = bp->rx_ring_size;
3417
3418 #ifdef CONFIG_BNX2X_SRIOV
3419 ether_stat->vf_cnt = IS_SRIOV(bp) ? bp->vfdb->sriov.nr_virtfn : 0;
3420 #endif
3421 }
3422
3423 static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3424 {
3425 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3426 struct fcoe_stats_info *fcoe_stat =
3427 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3428
3429 if (!CNIC_LOADED(bp))
3430 return;
3431
3432 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3433
3434 fcoe_stat->qos_priority =
3435 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3436
3437
3438 if (!NO_FCOE(bp)) {
3439 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
3440 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3441 tstorm_queue_statistics;
3442
3443 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
3444 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
3445 xstorm_queue_statistics;
3446
3447 struct fcoe_statistics_params *fw_fcoe_stat =
3448 &bp->fw_stats_data->fcoe;
3449
3450 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3451 fcoe_stat->rx_bytes_lo,
3452 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
3453
3454 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3455 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3456 fcoe_stat->rx_bytes_lo,
3457 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
3458
3459 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3460 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3461 fcoe_stat->rx_bytes_lo,
3462 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
3463
3464 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3465 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3466 fcoe_stat->rx_bytes_lo,
3467 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
3468
3469 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3470 fcoe_stat->rx_frames_lo,
3471 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
3472
3473 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3474 fcoe_stat->rx_frames_lo,
3475 fcoe_q_tstorm_stats->rcv_ucast_pkts);
3476
3477 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3478 fcoe_stat->rx_frames_lo,
3479 fcoe_q_tstorm_stats->rcv_bcast_pkts);
3480
3481 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3482 fcoe_stat->rx_frames_lo,
3483 fcoe_q_tstorm_stats->rcv_mcast_pkts);
3484
3485 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3486 fcoe_stat->tx_bytes_lo,
3487 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
3488
3489 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3490 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3491 fcoe_stat->tx_bytes_lo,
3492 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
3493
3494 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3495 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3496 fcoe_stat->tx_bytes_lo,
3497 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
3498
3499 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3500 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3501 fcoe_stat->tx_bytes_lo,
3502 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
3503
3504 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3505 fcoe_stat->tx_frames_lo,
3506 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
3507
3508 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3509 fcoe_stat->tx_frames_lo,
3510 fcoe_q_xstorm_stats->ucast_pkts_sent);
3511
3512 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3513 fcoe_stat->tx_frames_lo,
3514 fcoe_q_xstorm_stats->bcast_pkts_sent);
3515
3516 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3517 fcoe_stat->tx_frames_lo,
3518 fcoe_q_xstorm_stats->mcast_pkts_sent);
3519 }
3520
3521
3522 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
3523 }
3524
3525 static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3526 {
3527 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3528 struct iscsi_stats_info *iscsi_stat =
3529 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3530
3531 if (!CNIC_LOADED(bp))
3532 return;
3533
3534 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3535 ETH_ALEN);
3536
3537 iscsi_stat->qos_priority =
3538 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3539
3540
3541 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
3542 }
3543
3544
3545
3546
3547
3548
3549 static void bnx2x_config_mf_bw(struct bnx2x *bp)
3550 {
3551
3552
3553
3554
3555 if (!IS_MF(bp)) {
3556 DP(BNX2X_MSG_MCP,
3557 "Ignoring MF BW config in single function mode\n");
3558 return;
3559 }
3560
3561 if (bp->link_vars.link_up) {
3562 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3563 bnx2x_link_sync_notify(bp);
3564 }
3565 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3566 }
3567
3568 static void bnx2x_set_mf_bw(struct bnx2x *bp)
3569 {
3570 bnx2x_config_mf_bw(bp);
3571 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3572 }
3573
3574 static void bnx2x_handle_eee_event(struct bnx2x *bp)
3575 {
3576 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3577 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3578 }
3579
3580 #define BNX2X_UPDATE_DRV_INFO_IND_LENGTH (20)
3581 #define BNX2X_UPDATE_DRV_INFO_IND_COUNT (25)
3582
3583 static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3584 {
3585 enum drv_info_opcode op_code;
3586 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3587 bool release = false;
3588 int wait;
3589
3590
3591 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3592 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3593 return;
3594 }
3595
3596 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3597 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3598
3599
3600 mutex_lock(&bp->drv_info_mutex);
3601
3602 memset(&bp->slowpath->drv_info_to_mcp, 0,
3603 sizeof(union drv_info_to_mcp));
3604
3605 switch (op_code) {
3606 case ETH_STATS_OPCODE:
3607 bnx2x_drv_info_ether_stat(bp);
3608 break;
3609 case FCOE_STATS_OPCODE:
3610 bnx2x_drv_info_fcoe_stat(bp);
3611 break;
3612 case ISCSI_STATS_OPCODE:
3613 bnx2x_drv_info_iscsi_stat(bp);
3614 break;
3615 default:
3616
3617 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3618 goto out;
3619 }
3620
3621
3622
3623
3624 SHMEM2_WR(bp, drv_info_host_addr_lo,
3625 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3626 SHMEM2_WR(bp, drv_info_host_addr_hi,
3627 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3628
3629 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3630
3631
3632
3633
3634
3635 if (!SHMEM2_HAS(bp, mfw_drv_indication)) {
3636 DP(BNX2X_MSG_MCP, "Management does not support indication\n");
3637 } else if (!bp->drv_info_mng_owner) {
3638 u32 bit = MFW_DRV_IND_READ_DONE_OFFSET((BP_ABS_FUNC(bp) >> 1));
3639
3640 for (wait = 0; wait < BNX2X_UPDATE_DRV_INFO_IND_COUNT; wait++) {
3641 u32 indication = SHMEM2_RD(bp, mfw_drv_indication);
3642
3643
3644 if (indication & bit) {
3645 SHMEM2_WR(bp, mfw_drv_indication,
3646 indication & ~bit);
3647 release = true;
3648 break;
3649 }
3650
3651 msleep(BNX2X_UPDATE_DRV_INFO_IND_LENGTH);
3652 }
3653 }
3654 if (!release) {
3655 DP(BNX2X_MSG_MCP, "Management did not release indication\n");
3656 bp->drv_info_mng_owner = true;
3657 }
3658
3659 out:
3660 mutex_unlock(&bp->drv_info_mutex);
3661 }
3662
3663 static u32 bnx2x_update_mng_version_utility(u8 *version, bool bnx2x_format)
3664 {
3665 u8 vals[4];
3666 int i = 0;
3667
3668 if (bnx2x_format) {
3669 i = sscanf(version, "1.%c%hhd.%hhd.%hhd",
3670 &vals[0], &vals[1], &vals[2], &vals[3]);
3671 if (i > 0)
3672 vals[0] -= '0';
3673 } else {
3674 i = sscanf(version, "%hhd.%hhd.%hhd.%hhd",
3675 &vals[0], &vals[1], &vals[2], &vals[3]);
3676 }
3677
3678 while (i < 4)
3679 vals[i++] = 0;
3680
3681 return (vals[0] << 24) | (vals[1] << 16) | (vals[2] << 8) | vals[3];
3682 }
3683
3684 void bnx2x_update_mng_version(struct bnx2x *bp)
3685 {
3686 u32 iscsiver = DRV_VER_NOT_LOADED;
3687 u32 fcoever = DRV_VER_NOT_LOADED;
3688 u32 ethver = DRV_VER_NOT_LOADED;
3689 int idx = BP_FW_MB_IDX(bp);
3690 u8 *version;
3691
3692 if (!SHMEM2_HAS(bp, func_os_drv_ver))
3693 return;
3694
3695 mutex_lock(&bp->drv_info_mutex);
3696
3697 if (bp->drv_info_mng_owner)
3698 goto out;
3699
3700 if (bp->state != BNX2X_STATE_OPEN)
3701 goto out;
3702
3703
3704 ethver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3705 if (!CNIC_LOADED(bp))
3706 goto out;
3707
3708
3709 memset(&bp->slowpath->drv_info_to_mcp, 0,
3710 sizeof(union drv_info_to_mcp));
3711 bnx2x_drv_info_iscsi_stat(bp);
3712 version = bp->slowpath->drv_info_to_mcp.iscsi_stat.version;
3713 iscsiver = bnx2x_update_mng_version_utility(version, false);
3714
3715 memset(&bp->slowpath->drv_info_to_mcp, 0,
3716 sizeof(union drv_info_to_mcp));
3717 bnx2x_drv_info_fcoe_stat(bp);
3718 version = bp->slowpath->drv_info_to_mcp.fcoe_stat.version;
3719 fcoever = bnx2x_update_mng_version_utility(version, false);
3720
3721 out:
3722 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ETHERNET], ethver);
3723 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_ISCSI], iscsiver);
3724 SHMEM2_WR(bp, func_os_drv_ver[idx].versions[DRV_PERS_FCOE], fcoever);
3725
3726 mutex_unlock(&bp->drv_info_mutex);
3727
3728 DP(BNX2X_MSG_MCP, "Setting driver version: ETH [%08x] iSCSI [%08x] FCoE [%08x]\n",
3729 ethver, iscsiver, fcoever);
3730 }
3731
3732 void bnx2x_update_mfw_dump(struct bnx2x *bp)
3733 {
3734 u32 drv_ver;
3735 u32 valid_dump;
3736
3737 if (!SHMEM2_HAS(bp, drv_info))
3738 return;
3739
3740
3741 SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
3742
3743 drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
3744 SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
3745
3746 SHMEM2_WR(bp, drv_info.fw_ver, REG_RD(bp, XSEM_REG_PRAM));
3747
3748
3749 valid_dump = SHMEM2_RD(bp, drv_info.valid_dump);
3750
3751 if (valid_dump & FIRST_DUMP_VALID)
3752 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 1st partition\n");
3753
3754 if (valid_dump & SECOND_DUMP_VALID)
3755 DP(NETIF_MSG_IFUP, "A valid On-Chip MFW dump found on 2nd partition\n");
3756 }
3757
3758 static void bnx2x_oem_event(struct bnx2x *bp, u32 event)
3759 {
3760 u32 cmd_ok, cmd_fail;
3761
3762
3763 if (event & DRV_STATUS_DCC_EVENT_MASK &&
3764 event & DRV_STATUS_OEM_EVENT_MASK) {
3765 BNX2X_ERR("Received simultaneous events %08x\n", event);
3766 return;
3767 }
3768
3769 if (event & DRV_STATUS_DCC_EVENT_MASK) {
3770 cmd_fail = DRV_MSG_CODE_DCC_FAILURE;
3771 cmd_ok = DRV_MSG_CODE_DCC_OK;
3772 } else {
3773 cmd_fail = DRV_MSG_CODE_OEM_FAILURE;
3774 cmd_ok = DRV_MSG_CODE_OEM_OK;
3775 }
3776
3777 DP(BNX2X_MSG_MCP, "oem_event 0x%x\n", event);
3778
3779 if (event & (DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3780 DRV_STATUS_OEM_DISABLE_ENABLE_PF)) {
3781
3782
3783
3784
3785 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
3786 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
3787 bp->flags |= MF_FUNC_DIS;
3788
3789 bnx2x_e1h_disable(bp);
3790 } else {
3791 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
3792 bp->flags &= ~MF_FUNC_DIS;
3793
3794 bnx2x_e1h_enable(bp);
3795 }
3796 event &= ~(DRV_STATUS_DCC_DISABLE_ENABLE_PF |
3797 DRV_STATUS_OEM_DISABLE_ENABLE_PF);
3798 }
3799
3800 if (event & (DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3801 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION)) {
3802 bnx2x_config_mf_bw(bp);
3803 event &= ~(DRV_STATUS_DCC_BANDWIDTH_ALLOCATION |
3804 DRV_STATUS_OEM_BANDWIDTH_ALLOCATION);
3805 }
3806
3807
3808 if (event)
3809 bnx2x_fw_command(bp, cmd_fail, 0);
3810 else
3811 bnx2x_fw_command(bp, cmd_ok, 0);
3812 }
3813
3814
3815 static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3816 {
3817 struct eth_spe *next_spe = bp->spq_prod_bd;
3818
3819 if (bp->spq_prod_bd == bp->spq_last_bd) {
3820 bp->spq_prod_bd = bp->spq;
3821 bp->spq_prod_idx = 0;
3822 DP(BNX2X_MSG_SP, "end of spq\n");
3823 } else {
3824 bp->spq_prod_bd++;
3825 bp->spq_prod_idx++;
3826 }
3827 return next_spe;
3828 }
3829
3830
3831 static void bnx2x_sp_prod_update(struct bnx2x *bp)
3832 {
3833 int func = BP_FUNC(bp);
3834
3835
3836
3837
3838
3839
3840 mb();
3841
3842 REG_WR16_RELAXED(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
3843 bp->spq_prod_idx);
3844 }
3845
3846
3847
3848
3849
3850
3851
3852 static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3853 {
3854 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3855 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
3856 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3857 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3858 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3859 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3860 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3861 return true;
3862 else
3863 return false;
3864 }
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874
3875
3876
3877
3878
3879
3880 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3881 u32 data_hi, u32 data_lo, int cmd_type)
3882 {
3883 struct eth_spe *spe;
3884 u16 type;
3885 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
3886
3887 #ifdef BNX2X_STOP_ON_ERROR
3888 if (unlikely(bp->panic)) {
3889 BNX2X_ERR("Can't post SP when there is panic\n");
3890 return -EIO;
3891 }
3892 #endif
3893
3894 spin_lock_bh(&bp->spq_lock);
3895
3896 if (common) {
3897 if (!atomic_read(&bp->eq_spq_left)) {
3898 BNX2X_ERR("BUG! EQ ring full!\n");
3899 spin_unlock_bh(&bp->spq_lock);
3900 bnx2x_panic();
3901 return -EBUSY;
3902 }
3903 } else if (!atomic_read(&bp->cq_spq_left)) {
3904 BNX2X_ERR("BUG! SPQ ring full!\n");
3905 spin_unlock_bh(&bp->spq_lock);
3906 bnx2x_panic();
3907 return -EBUSY;
3908 }
3909
3910 spe = bnx2x_sp_get_next(bp);
3911
3912
3913 spe->hdr.conn_and_cmd_data =
3914 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3915 HW_CID(bp, cid));
3916
3917
3918
3919
3920
3921 if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
3922 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
3923 SPE_HDR_CONN_TYPE;
3924 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3925 SPE_HDR_FUNCTION_ID);
3926 } else {
3927 type = cmd_type;
3928 }
3929
3930 spe->hdr.type = cpu_to_le16(type);
3931
3932 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3933 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3934
3935
3936
3937
3938
3939
3940 if (common)
3941 atomic_dec(&bp->eq_spq_left);
3942 else
3943 atomic_dec(&bp->cq_spq_left);
3944
3945 DP(BNX2X_MSG_SP,
3946 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
3947 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3948 (u32)(U64_LO(bp->spq_mapping) +
3949 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
3950 HW_CID(bp, cid), data_hi, data_lo, type,
3951 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
3952
3953 bnx2x_sp_prod_update(bp);
3954 spin_unlock_bh(&bp->spq_lock);
3955 return 0;
3956 }
3957
3958
3959 static int bnx2x_acquire_alr(struct bnx2x *bp)
3960 {
3961 u32 j, val;
3962 int rc = 0;
3963
3964 might_sleep();
3965 for (j = 0; j < 1000; j++) {
3966 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, MCPR_ACCESS_LOCK_LOCK);
3967 val = REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK);
3968 if (val & MCPR_ACCESS_LOCK_LOCK)
3969 break;
3970
3971 usleep_range(5000, 10000);
3972 }
3973 if (!(val & MCPR_ACCESS_LOCK_LOCK)) {
3974 BNX2X_ERR("Cannot acquire MCP access lock register\n");
3975 rc = -EBUSY;
3976 }
3977
3978 return rc;
3979 }
3980
3981
3982 static void bnx2x_release_alr(struct bnx2x *bp)
3983 {
3984 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
3985 }
3986
3987 #define BNX2X_DEF_SB_ATT_IDX 0x0001
3988 #define BNX2X_DEF_SB_IDX 0x0002
3989
3990 static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3991 {
3992 struct host_sp_status_block *def_sb = bp->def_status_blk;
3993 u16 rc = 0;
3994
3995 barrier();
3996 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3997 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
3998 rc |= BNX2X_DEF_SB_ATT_IDX;
3999 }
4000
4001 if (bp->def_idx != def_sb->sp_sb.running_index) {
4002 bp->def_idx = def_sb->sp_sb.running_index;
4003 rc |= BNX2X_DEF_SB_IDX;
4004 }
4005
4006
4007 barrier();
4008 return rc;
4009 }
4010
4011
4012
4013
4014
4015 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4016 {
4017 int port = BP_PORT(bp);
4018 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4019 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4020 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4021 NIG_REG_MASK_INTERRUPT_PORT0;
4022 u32 aeu_mask;
4023 u32 nig_mask = 0;
4024 u32 reg_addr;
4025
4026 if (bp->attn_state & asserted)
4027 BNX2X_ERR("IGU ERROR\n");
4028
4029 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4030 aeu_mask = REG_RD(bp, aeu_addr);
4031
4032 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4033 aeu_mask, asserted);
4034 aeu_mask &= ~(asserted & 0x3ff);
4035 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
4036
4037 REG_WR(bp, aeu_addr, aeu_mask);
4038 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4039
4040 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4041 bp->attn_state |= asserted;
4042 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4043
4044 if (asserted & ATTN_HARD_WIRED_MASK) {
4045 if (asserted & ATTN_NIG_FOR_FUNC) {
4046
4047 bnx2x_acquire_phy_lock(bp);
4048
4049
4050 nig_mask = REG_RD(bp, nig_int_mask_addr);
4051
4052
4053
4054
4055 if (nig_mask) {
4056 REG_WR(bp, nig_int_mask_addr, 0);
4057
4058 bnx2x_link_attn(bp);
4059 }
4060
4061
4062 }
4063 if (asserted & ATTN_SW_TIMER_4_FUNC)
4064 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4065
4066 if (asserted & GPIO_2_FUNC)
4067 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4068
4069 if (asserted & GPIO_3_FUNC)
4070 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4071
4072 if (asserted & GPIO_4_FUNC)
4073 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4074
4075 if (port == 0) {
4076 if (asserted & ATTN_GENERAL_ATTN_1) {
4077 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4078 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4079 }
4080 if (asserted & ATTN_GENERAL_ATTN_2) {
4081 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4083 }
4084 if (asserted & ATTN_GENERAL_ATTN_3) {
4085 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4086 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4087 }
4088 } else {
4089 if (asserted & ATTN_GENERAL_ATTN_4) {
4090 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4091 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4092 }
4093 if (asserted & ATTN_GENERAL_ATTN_5) {
4094 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4095 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4096 }
4097 if (asserted & ATTN_GENERAL_ATTN_6) {
4098 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4099 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4100 }
4101 }
4102
4103 }
4104
4105 if (bp->common.int_block == INT_BLOCK_HC)
4106 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4107 COMMAND_REG_ATTN_BITS_SET);
4108 else
4109 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
4110
4111 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
4112 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
4113 REG_WR(bp, reg_addr, asserted);
4114
4115
4116 if (asserted & ATTN_NIG_FOR_FUNC) {
4117
4118
4119
4120 if (bp->common.int_block != INT_BLOCK_HC) {
4121 u32 cnt = 0, igu_acked;
4122 do {
4123 igu_acked = REG_RD(bp,
4124 IGU_REG_ATTENTION_ACK_BITS);
4125 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
4126 (++cnt < MAX_IGU_ATTN_ACK_TO));
4127 if (!igu_acked)
4128 DP(NETIF_MSG_HW,
4129 "Failed to verify IGU ack on time\n");
4130 barrier();
4131 }
4132 REG_WR(bp, nig_int_mask_addr, nig_mask);
4133 bnx2x_release_phy_lock(bp);
4134 }
4135 }
4136
4137 static void bnx2x_fan_failure(struct bnx2x *bp)
4138 {
4139 int port = BP_PORT(bp);
4140 u32 ext_phy_config;
4141
4142 ext_phy_config =
4143 SHMEM_RD(bp,
4144 dev_info.port_hw_config[port].external_phy_config);
4145
4146 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
4147 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
4148 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
4149 ext_phy_config);
4150
4151
4152 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
4153 "Please contact OEM Support for assistance\n");
4154
4155
4156
4157
4158
4159 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
4160 }
4161
4162 static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
4163 {
4164 int port = BP_PORT(bp);
4165 int reg_offset;
4166 u32 val;
4167
4168 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4169 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4170
4171 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
4172
4173 val = REG_RD(bp, reg_offset);
4174 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
4175 REG_WR(bp, reg_offset, val);
4176
4177 BNX2X_ERR("SPIO5 hw attention\n");
4178
4179
4180 bnx2x_hw_reset_phy(&bp->link_params);
4181 bnx2x_fan_failure(bp);
4182 }
4183
4184 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
4185 bnx2x_acquire_phy_lock(bp);
4186 bnx2x_handle_module_detect_int(&bp->link_params);
4187 bnx2x_release_phy_lock(bp);
4188 }
4189
4190 if (attn & HW_INTERRUPT_ASSERT_SET_0) {
4191
4192 val = REG_RD(bp, reg_offset);
4193 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_0);
4194 REG_WR(bp, reg_offset, val);
4195
4196 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
4197 (u32)(attn & HW_INTERRUPT_ASSERT_SET_0));
4198 bnx2x_panic();
4199 }
4200 }
4201
4202 static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
4203 {
4204 u32 val;
4205
4206 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
4207
4208 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4209 BNX2X_ERR("DB hw attention 0x%x\n", val);
4210
4211 if (val & 0x2)
4212 BNX2X_ERR("FATAL error from DORQ\n");
4213 }
4214
4215 if (attn & HW_INTERRUPT_ASSERT_SET_1) {
4216
4217 int port = BP_PORT(bp);
4218 int reg_offset;
4219
4220 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
4221 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
4222
4223 val = REG_RD(bp, reg_offset);
4224 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_1);
4225 REG_WR(bp, reg_offset, val);
4226
4227 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
4228 (u32)(attn & HW_INTERRUPT_ASSERT_SET_1));
4229 bnx2x_panic();
4230 }
4231 }
4232
4233 static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
4234 {
4235 u32 val;
4236
4237 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4238
4239 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4240 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4241
4242 if (val & 0x2)
4243 BNX2X_ERR("FATAL error from CFC\n");
4244 }
4245
4246 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4247 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4248 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
4249
4250 if (val & 0x18000)
4251 BNX2X_ERR("FATAL error from PXP\n");
4252
4253 if (!CHIP_IS_E1x(bp)) {
4254 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
4255 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
4256 }
4257 }
4258
4259 if (attn & HW_INTERRUPT_ASSERT_SET_2) {
4260
4261 int port = BP_PORT(bp);
4262 int reg_offset;
4263
4264 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
4265 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
4266
4267 val = REG_RD(bp, reg_offset);
4268 val &= ~(attn & HW_INTERRUPT_ASSERT_SET_2);
4269 REG_WR(bp, reg_offset, val);
4270
4271 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
4272 (u32)(attn & HW_INTERRUPT_ASSERT_SET_2));
4273 bnx2x_panic();
4274 }
4275 }
4276
4277 static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
4278 {
4279 u32 val;
4280
4281 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
4282
4283 if (attn & BNX2X_PMF_LINK_ASSERT) {
4284 int func = BP_FUNC(bp);
4285
4286 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4287 bnx2x_read_mf_cfg(bp);
4288 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
4289 func_mf_config[BP_ABS_FUNC(bp)].config);
4290 val = SHMEM_RD(bp,
4291 func_mb[BP_FW_MB_IDX(bp)].drv_status);
4292
4293 if (val & (DRV_STATUS_DCC_EVENT_MASK |
4294 DRV_STATUS_OEM_EVENT_MASK))
4295 bnx2x_oem_event(bp,
4296 (val & (DRV_STATUS_DCC_EVENT_MASK |
4297 DRV_STATUS_OEM_EVENT_MASK)));
4298
4299 if (val & DRV_STATUS_SET_MF_BW)
4300 bnx2x_set_mf_bw(bp);
4301
4302 if (val & DRV_STATUS_DRV_INFO_REQ)
4303 bnx2x_handle_drv_info_req(bp);
4304
4305 if (val & DRV_STATUS_VF_DISABLED)
4306 bnx2x_schedule_iov_task(bp,
4307 BNX2X_IOV_HANDLE_FLR);
4308
4309 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
4310 bnx2x_pmf_update(bp);
4311
4312 if (bp->port.pmf &&
4313 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
4314 bp->dcbx_enabled > 0)
4315
4316 bnx2x_dcbx_set_params(bp,
4317 BNX2X_DCBX_STATE_NEG_RECEIVED);
4318 if (val & DRV_STATUS_AFEX_EVENT_MASK)
4319 bnx2x_handle_afex_cmd(bp,
4320 val & DRV_STATUS_AFEX_EVENT_MASK);
4321 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
4322 bnx2x_handle_eee_event(bp);
4323
4324 if (val & DRV_STATUS_OEM_UPDATE_SVID)
4325 bnx2x_schedule_sp_rtnl(bp,
4326 BNX2X_SP_RTNL_UPDATE_SVID, 0);
4327
4328 if (bp->link_vars.periodic_flags &
4329 PERIODIC_FLAGS_LINK_EVENT) {
4330
4331 bnx2x_acquire_phy_lock(bp);
4332 bp->link_vars.periodic_flags &=
4333 ~PERIODIC_FLAGS_LINK_EVENT;
4334 bnx2x_release_phy_lock(bp);
4335 if (IS_MF(bp))
4336 bnx2x_link_sync_notify(bp);
4337 bnx2x_link_report(bp);
4338 }
4339
4340
4341
4342 bnx2x__link_status_update(bp);
4343 } else if (attn & BNX2X_MC_ASSERT_BITS) {
4344
4345 BNX2X_ERR("MC assert!\n");
4346 bnx2x_mc_assert(bp);
4347 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4348 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4349 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4350 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4351 bnx2x_panic();
4352
4353 } else if (attn & BNX2X_MCP_ASSERT) {
4354
4355 BNX2X_ERR("MCP assert!\n");
4356 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
4357 bnx2x_fw_dump(bp);
4358
4359 } else
4360 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4361 }
4362
4363 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4364 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4365 if (attn & BNX2X_GRC_TIMEOUT) {
4366 val = CHIP_IS_E1(bp) ? 0 :
4367 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
4368 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4369 }
4370 if (attn & BNX2X_GRC_RSV) {
4371 val = CHIP_IS_E1(bp) ? 0 :
4372 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
4373 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4374 }
4375 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
4376 }
4377 }
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388
4389
4390
4391
4392
4393 #define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4394
4395 #define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4396 #define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4397 #define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4398 #define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4399 #define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4400 #define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4401 #define BNX2X_GLOBAL_RESET_BIT 0x00040000
4402
4403
4404
4405
4406
4407
4408 void bnx2x_set_reset_global(struct bnx2x *bp)
4409 {
4410 u32 val;
4411 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4412 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4413 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
4414 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4415 }
4416
4417
4418
4419
4420
4421
4422 static void bnx2x_clear_reset_global(struct bnx2x *bp)
4423 {
4424 u32 val;
4425 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4426 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4427 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
4428 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4429 }
4430
4431
4432
4433
4434
4435
4436 static bool bnx2x_reset_is_global(struct bnx2x *bp)
4437 {
4438 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4439
4440 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4441 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4442 }
4443
4444
4445
4446
4447
4448
4449 static void bnx2x_set_reset_done(struct bnx2x *bp)
4450 {
4451 u32 val;
4452 u32 bit = BP_PATH(bp) ?
4453 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4454 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4455 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4456
4457
4458 val &= ~bit;
4459 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4460
4461 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4462 }
4463
4464
4465
4466
4467
4468
4469 void bnx2x_set_reset_in_progress(struct bnx2x *bp)
4470 {
4471 u32 val;
4472 u32 bit = BP_PATH(bp) ?
4473 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4474 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4475 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4476
4477
4478 val |= bit;
4479 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4480 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4481 }
4482
4483
4484
4485
4486
4487 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
4488 {
4489 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4490 u32 bit = engine ?
4491 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4492
4493
4494 return (val & bit) ? false : true;
4495 }
4496
4497
4498
4499
4500
4501
4502 void bnx2x_set_pf_load(struct bnx2x *bp)
4503 {
4504 u32 val1, val;
4505 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4506 BNX2X_PATH0_LOAD_CNT_MASK;
4507 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4508 BNX2X_PATH0_LOAD_CNT_SHIFT;
4509
4510 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4511 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4512
4513 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
4514
4515
4516 val1 = (val & mask) >> shift;
4517
4518
4519 val1 |= (1 << bp->pf_num);
4520
4521
4522 val &= ~mask;
4523
4524
4525 val |= ((val1 << shift) & mask);
4526
4527 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4528 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4529 }
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540 bool bnx2x_clear_pf_load(struct bnx2x *bp)
4541 {
4542 u32 val1, val;
4543 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4544 BNX2X_PATH0_LOAD_CNT_MASK;
4545 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4546 BNX2X_PATH0_LOAD_CNT_SHIFT;
4547
4548 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4549 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4550 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
4551
4552
4553 val1 = (val & mask) >> shift;
4554
4555
4556 val1 &= ~(1 << bp->pf_num);
4557
4558
4559 val &= ~mask;
4560
4561
4562 val |= ((val1 << shift) & mask);
4563
4564 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
4565 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4566 return val1 != 0;
4567 }
4568
4569
4570
4571
4572
4573
4574 static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
4575 {
4576 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4577 BNX2X_PATH0_LOAD_CNT_MASK);
4578 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4579 BNX2X_PATH0_LOAD_CNT_SHIFT);
4580 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4581
4582 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
4583
4584 val = (val & mask) >> shift;
4585
4586 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4587 engine, val);
4588
4589 return val != 0;
4590 }
4591
4592 static void _print_parity(struct bnx2x *bp, u32 reg)
4593 {
4594 pr_cont(" [0x%08x] ", REG_RD(bp, reg));
4595 }
4596
4597 static void _print_next_block(int idx, const char *blk)
4598 {
4599 pr_cont("%s%s", idx ? ", " : "", blk);
4600 }
4601
4602 static bool bnx2x_check_blocks_with_parity0(struct bnx2x *bp, u32 sig,
4603 int *par_num, bool print)
4604 {
4605 u32 cur_bit;
4606 bool res;
4607 int i;
4608
4609 res = false;
4610
4611 for (i = 0; sig; i++) {
4612 cur_bit = (0x1UL << i);
4613 if (sig & cur_bit) {
4614 res |= true;
4615
4616 if (print) {
4617 switch (cur_bit) {
4618 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
4619 _print_next_block((*par_num)++, "BRB");
4620 _print_parity(bp,
4621 BRB1_REG_BRB1_PRTY_STS);
4622 break;
4623 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
4624 _print_next_block((*par_num)++,
4625 "PARSER");
4626 _print_parity(bp, PRS_REG_PRS_PRTY_STS);
4627 break;
4628 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
4629 _print_next_block((*par_num)++, "TSDM");
4630 _print_parity(bp,
4631 TSDM_REG_TSDM_PRTY_STS);
4632 break;
4633 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
4634 _print_next_block((*par_num)++,
4635 "SEARCHER");
4636 _print_parity(bp, SRC_REG_SRC_PRTY_STS);
4637 break;
4638 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4639 _print_next_block((*par_num)++, "TCM");
4640 _print_parity(bp, TCM_REG_TCM_PRTY_STS);
4641 break;
4642 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
4643 _print_next_block((*par_num)++,
4644 "TSEMI");
4645 _print_parity(bp,
4646 TSEM_REG_TSEM_PRTY_STS_0);
4647 _print_parity(bp,
4648 TSEM_REG_TSEM_PRTY_STS_1);
4649 break;
4650 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4651 _print_next_block((*par_num)++, "XPB");
4652 _print_parity(bp, GRCBASE_XPB +
4653 PB_REG_PB_PRTY_STS);
4654 break;
4655 }
4656 }
4657
4658
4659 sig &= ~cur_bit;
4660 }
4661 }
4662
4663 return res;
4664 }
4665
4666 static bool bnx2x_check_blocks_with_parity1(struct bnx2x *bp, u32 sig,
4667 int *par_num, bool *global,
4668 bool print)
4669 {
4670 u32 cur_bit;
4671 bool res;
4672 int i;
4673
4674 res = false;
4675
4676 for (i = 0; sig; i++) {
4677 cur_bit = (0x1UL << i);
4678 if (sig & cur_bit) {
4679 res |= true;
4680 switch (cur_bit) {
4681 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4682 if (print) {
4683 _print_next_block((*par_num)++, "PBF");
4684 _print_parity(bp, PBF_REG_PBF_PRTY_STS);
4685 }
4686 break;
4687 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
4688 if (print) {
4689 _print_next_block((*par_num)++, "QM");
4690 _print_parity(bp, QM_REG_QM_PRTY_STS);
4691 }
4692 break;
4693 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4694 if (print) {
4695 _print_next_block((*par_num)++, "TM");
4696 _print_parity(bp, TM_REG_TM_PRTY_STS);
4697 }
4698 break;
4699 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
4700 if (print) {
4701 _print_next_block((*par_num)++, "XSDM");
4702 _print_parity(bp,
4703 XSDM_REG_XSDM_PRTY_STS);
4704 }
4705 break;
4706 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4707 if (print) {
4708 _print_next_block((*par_num)++, "XCM");
4709 _print_parity(bp, XCM_REG_XCM_PRTY_STS);
4710 }
4711 break;
4712 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
4713 if (print) {
4714 _print_next_block((*par_num)++,
4715 "XSEMI");
4716 _print_parity(bp,
4717 XSEM_REG_XSEM_PRTY_STS_0);
4718 _print_parity(bp,
4719 XSEM_REG_XSEM_PRTY_STS_1);
4720 }
4721 break;
4722 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
4723 if (print) {
4724 _print_next_block((*par_num)++,
4725 "DOORBELLQ");
4726 _print_parity(bp,
4727 DORQ_REG_DORQ_PRTY_STS);
4728 }
4729 break;
4730 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4731 if (print) {
4732 _print_next_block((*par_num)++, "NIG");
4733 if (CHIP_IS_E1x(bp)) {
4734 _print_parity(bp,
4735 NIG_REG_NIG_PRTY_STS);
4736 } else {
4737 _print_parity(bp,
4738 NIG_REG_NIG_PRTY_STS_0);
4739 _print_parity(bp,
4740 NIG_REG_NIG_PRTY_STS_1);
4741 }
4742 }
4743 break;
4744 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
4745 if (print)
4746 _print_next_block((*par_num)++,
4747 "VAUX PCI CORE");
4748 *global = true;
4749 break;
4750 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
4751 if (print) {
4752 _print_next_block((*par_num)++,
4753 "DEBUG");
4754 _print_parity(bp, DBG_REG_DBG_PRTY_STS);
4755 }
4756 break;
4757 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
4758 if (print) {
4759 _print_next_block((*par_num)++, "USDM");
4760 _print_parity(bp,
4761 USDM_REG_USDM_PRTY_STS);
4762 }
4763 break;
4764 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4765 if (print) {
4766 _print_next_block((*par_num)++, "UCM");
4767 _print_parity(bp, UCM_REG_UCM_PRTY_STS);
4768 }
4769 break;
4770 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
4771 if (print) {
4772 _print_next_block((*par_num)++,
4773 "USEMI");
4774 _print_parity(bp,
4775 USEM_REG_USEM_PRTY_STS_0);
4776 _print_parity(bp,
4777 USEM_REG_USEM_PRTY_STS_1);
4778 }
4779 break;
4780 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
4781 if (print) {
4782 _print_next_block((*par_num)++, "UPB");
4783 _print_parity(bp, GRCBASE_UPB +
4784 PB_REG_PB_PRTY_STS);
4785 }
4786 break;
4787 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
4788 if (print) {
4789 _print_next_block((*par_num)++, "CSDM");
4790 _print_parity(bp,
4791 CSDM_REG_CSDM_PRTY_STS);
4792 }
4793 break;
4794 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4795 if (print) {
4796 _print_next_block((*par_num)++, "CCM");
4797 _print_parity(bp, CCM_REG_CCM_PRTY_STS);
4798 }
4799 break;
4800 }
4801
4802
4803 sig &= ~cur_bit;
4804 }
4805 }
4806
4807 return res;
4808 }
4809
4810 static bool bnx2x_check_blocks_with_parity2(struct bnx2x *bp, u32 sig,
4811 int *par_num, bool print)
4812 {
4813 u32 cur_bit;
4814 bool res;
4815 int i;
4816
4817 res = false;
4818
4819 for (i = 0; sig; i++) {
4820 cur_bit = (0x1UL << i);
4821 if (sig & cur_bit) {
4822 res = true;
4823 if (print) {
4824 switch (cur_bit) {
4825 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
4826 _print_next_block((*par_num)++,
4827 "CSEMI");
4828 _print_parity(bp,
4829 CSEM_REG_CSEM_PRTY_STS_0);
4830 _print_parity(bp,
4831 CSEM_REG_CSEM_PRTY_STS_1);
4832 break;
4833 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
4834 _print_next_block((*par_num)++, "PXP");
4835 _print_parity(bp, PXP_REG_PXP_PRTY_STS);
4836 _print_parity(bp,
4837 PXP2_REG_PXP2_PRTY_STS_0);
4838 _print_parity(bp,
4839 PXP2_REG_PXP2_PRTY_STS_1);
4840 break;
4841 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
4842 _print_next_block((*par_num)++,
4843 "PXPPCICLOCKCLIENT");
4844 break;
4845 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
4846 _print_next_block((*par_num)++, "CFC");
4847 _print_parity(bp,
4848 CFC_REG_CFC_PRTY_STS);
4849 break;
4850 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
4851 _print_next_block((*par_num)++, "CDU");
4852 _print_parity(bp, CDU_REG_CDU_PRTY_STS);
4853 break;
4854 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4855 _print_next_block((*par_num)++, "DMAE");
4856 _print_parity(bp,
4857 DMAE_REG_DMAE_PRTY_STS);
4858 break;
4859 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
4860 _print_next_block((*par_num)++, "IGU");
4861 if (CHIP_IS_E1x(bp))
4862 _print_parity(bp,
4863 HC_REG_HC_PRTY_STS);
4864 else
4865 _print_parity(bp,
4866 IGU_REG_IGU_PRTY_STS);
4867 break;
4868 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
4869 _print_next_block((*par_num)++, "MISC");
4870 _print_parity(bp,
4871 MISC_REG_MISC_PRTY_STS);
4872 break;
4873 }
4874 }
4875
4876
4877 sig &= ~cur_bit;
4878 }
4879 }
4880
4881 return res;
4882 }
4883
4884 static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig,
4885 int *par_num, bool *global,
4886 bool print)
4887 {
4888 bool res = false;
4889 u32 cur_bit;
4890 int i;
4891
4892 for (i = 0; sig; i++) {
4893 cur_bit = (0x1UL << i);
4894 if (sig & cur_bit) {
4895 switch (cur_bit) {
4896 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
4897 if (print)
4898 _print_next_block((*par_num)++,
4899 "MCP ROM");
4900 *global = true;
4901 res = true;
4902 break;
4903 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
4904 if (print)
4905 _print_next_block((*par_num)++,
4906 "MCP UMP RX");
4907 *global = true;
4908 res = true;
4909 break;
4910 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
4911 if (print)
4912 _print_next_block((*par_num)++,
4913 "MCP UMP TX");
4914 *global = true;
4915 res = true;
4916 break;
4917 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
4918 (*par_num)++;
4919
4920 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4921 1UL << 10);
4922 break;
4923 }
4924
4925
4926 sig &= ~cur_bit;
4927 }
4928 }
4929
4930 return res;
4931 }
4932
4933 static bool bnx2x_check_blocks_with_parity4(struct bnx2x *bp, u32 sig,
4934 int *par_num, bool print)
4935 {
4936 u32 cur_bit;
4937 bool res;
4938 int i;
4939
4940 res = false;
4941
4942 for (i = 0; sig; i++) {
4943 cur_bit = (0x1UL << i);
4944 if (sig & cur_bit) {
4945 res = true;
4946 if (print) {
4947 switch (cur_bit) {
4948 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4949 _print_next_block((*par_num)++,
4950 "PGLUE_B");
4951 _print_parity(bp,
4952 PGLUE_B_REG_PGLUE_B_PRTY_STS);
4953 break;
4954 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4955 _print_next_block((*par_num)++, "ATC");
4956 _print_parity(bp,
4957 ATC_REG_ATC_PRTY_STS);
4958 break;
4959 }
4960 }
4961
4962 sig &= ~cur_bit;
4963 }
4964 }
4965
4966 return res;
4967 }
4968
4969 static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4970 u32 *sig)
4971 {
4972 bool res = false;
4973
4974 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4975 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4976 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4977 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4978 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
4979 int par_num = 0;
4980
4981 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4982 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
4983 sig[0] & HW_PRTY_ASSERT_SET_0,
4984 sig[1] & HW_PRTY_ASSERT_SET_1,
4985 sig[2] & HW_PRTY_ASSERT_SET_2,
4986 sig[3] & HW_PRTY_ASSERT_SET_3,
4987 sig[4] & HW_PRTY_ASSERT_SET_4);
4988 if (print) {
4989 if (((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4990 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4991 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4992 (sig[4] & HW_PRTY_ASSERT_SET_4)) ||
4993 (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) {
4994 netdev_err(bp->dev,
4995 "Parity errors detected in blocks: ");
4996 } else {
4997 print = false;
4998 }
4999 }
5000 res |= bnx2x_check_blocks_with_parity0(bp,
5001 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print);
5002 res |= bnx2x_check_blocks_with_parity1(bp,
5003 sig[1] & HW_PRTY_ASSERT_SET_1, &par_num, global, print);
5004 res |= bnx2x_check_blocks_with_parity2(bp,
5005 sig[2] & HW_PRTY_ASSERT_SET_2, &par_num, print);
5006 res |= bnx2x_check_blocks_with_parity3(bp,
5007 sig[3] & HW_PRTY_ASSERT_SET_3, &par_num, global, print);
5008 res |= bnx2x_check_blocks_with_parity4(bp,
5009 sig[4] & HW_PRTY_ASSERT_SET_4, &par_num, print);
5010
5011 if (print)
5012 pr_cont("\n");
5013 }
5014
5015 return res;
5016 }
5017
5018
5019
5020
5021
5022
5023
5024
5025 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
5026 {
5027 struct attn_route attn = { {0} };
5028 int port = BP_PORT(bp);
5029
5030 attn.sig[0] = REG_RD(bp,
5031 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
5032 port*4);
5033 attn.sig[1] = REG_RD(bp,
5034 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
5035 port*4);
5036 attn.sig[2] = REG_RD(bp,
5037 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
5038 port*4);
5039 attn.sig[3] = REG_RD(bp,
5040 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
5041 port*4);
5042
5043
5044
5045 attn.sig[3] &= ((REG_RD(bp,
5046 !port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0
5047 : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0) &
5048 MISC_AEU_ENABLE_MCP_PRTY_BITS) |
5049 ~MISC_AEU_ENABLE_MCP_PRTY_BITS);
5050
5051 if (!CHIP_IS_E1x(bp))
5052 attn.sig[4] = REG_RD(bp,
5053 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
5054 port*4);
5055
5056 return bnx2x_parity_attn(bp, global, print, attn.sig);
5057 }
5058
5059 static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
5060 {
5061 u32 val;
5062 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
5063
5064 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
5065 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
5066 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
5067 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
5068 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
5069 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
5070 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
5071 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
5072 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
5073 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
5074 if (val &
5075 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
5076 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
5077 if (val &
5078 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
5079 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
5080 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
5081 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
5082 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
5083 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
5084 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
5085 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
5086 }
5087 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
5088 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
5089 BNX2X_ERR("ATC hw attention 0x%x\n", val);
5090 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
5091 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
5092 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
5093 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
5094 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
5095 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
5096 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
5097 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
5098 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
5099 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
5100 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
5101 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
5102 }
5103
5104 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5105 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
5106 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
5107 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
5108 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
5109 }
5110 }
5111
5112 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
5113 {
5114 struct attn_route attn, *group_mask;
5115 int port = BP_PORT(bp);
5116 int index;
5117 u32 reg_addr;
5118 u32 val;
5119 u32 aeu_mask;
5120 bool global = false;
5121
5122
5123
5124 bnx2x_acquire_alr(bp);
5125
5126 if (bnx2x_chk_parity_attn(bp, &global, true)) {
5127 #ifndef BNX2X_STOP_ON_ERROR
5128 bp->recovery_state = BNX2X_RECOVERY_INIT;
5129 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5130
5131 bnx2x_int_disable(bp);
5132
5133
5134
5135 #else
5136 bnx2x_panic();
5137 #endif
5138 bnx2x_release_alr(bp);
5139 return;
5140 }
5141
5142 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
5143 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
5144 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
5145 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
5146 if (!CHIP_IS_E1x(bp))
5147 attn.sig[4] =
5148 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
5149 else
5150 attn.sig[4] = 0;
5151
5152 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
5153 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
5154
5155 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5156 if (deasserted & (1 << index)) {
5157 group_mask = &bp->attn_group[index];
5158
5159 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
5160 index,
5161 group_mask->sig[0], group_mask->sig[1],
5162 group_mask->sig[2], group_mask->sig[3],
5163 group_mask->sig[4]);
5164
5165 bnx2x_attn_int_deasserted4(bp,
5166 attn.sig[4] & group_mask->sig[4]);
5167 bnx2x_attn_int_deasserted3(bp,
5168 attn.sig[3] & group_mask->sig[3]);
5169 bnx2x_attn_int_deasserted1(bp,
5170 attn.sig[1] & group_mask->sig[1]);
5171 bnx2x_attn_int_deasserted2(bp,
5172 attn.sig[2] & group_mask->sig[2]);
5173 bnx2x_attn_int_deasserted0(bp,
5174 attn.sig[0] & group_mask->sig[0]);
5175 }
5176 }
5177
5178 bnx2x_release_alr(bp);
5179
5180 if (bp->common.int_block == INT_BLOCK_HC)
5181 reg_addr = (HC_REG_COMMAND_REG + port*32 +
5182 COMMAND_REG_ATTN_BITS_CLR);
5183 else
5184 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
5185
5186 val = ~deasserted;
5187 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
5188 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5189 REG_WR(bp, reg_addr, val);
5190
5191 if (~bp->attn_state & deasserted)
5192 BNX2X_ERR("IGU ERROR\n");
5193
5194 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5195 MISC_REG_AEU_MASK_ATTN_FUNC_0;
5196
5197 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5198 aeu_mask = REG_RD(bp, reg_addr);
5199
5200 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
5201 aeu_mask, deasserted);
5202 aeu_mask |= (deasserted & 0x3ff);
5203 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
5204
5205 REG_WR(bp, reg_addr, aeu_mask);
5206 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
5207
5208 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
5209 bp->attn_state &= ~deasserted;
5210 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
5211 }
5212
5213 static void bnx2x_attn_int(struct bnx2x *bp)
5214 {
5215
5216 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
5217 attn_bits);
5218 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
5219 attn_bits_ack);
5220 u32 attn_state = bp->attn_state;
5221
5222
5223 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
5224 u32 deasserted = ~attn_bits & attn_ack & attn_state;
5225
5226 DP(NETIF_MSG_HW,
5227 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
5228 attn_bits, attn_ack, asserted, deasserted);
5229
5230 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
5231 BNX2X_ERR("BAD attention state\n");
5232
5233
5234 if (asserted)
5235 bnx2x_attn_int_asserted(bp, asserted);
5236
5237 if (deasserted)
5238 bnx2x_attn_int_deasserted(bp, deasserted);
5239 }
5240
5241 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
5242 u16 index, u8 op, u8 update)
5243 {
5244 u32 igu_addr = bp->igu_base_addr;
5245 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
5246 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
5247 igu_addr);
5248 }
5249
5250 static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
5251 {
5252
5253 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
5254 }
5255
5256 static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
5257 union event_ring_elem *elem)
5258 {
5259 u8 err = elem->message.error;
5260
5261 if (!bp->cnic_eth_dev.starting_cid ||
5262 (cid < bp->cnic_eth_dev.starting_cid &&
5263 cid != bp->cnic_eth_dev.iscsi_l2_cid))
5264 return 1;
5265
5266 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
5267
5268 if (unlikely(err)) {
5269
5270 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
5271 cid);
5272 bnx2x_panic_dump(bp, false);
5273 }
5274 bnx2x_cnic_cfc_comp(bp, cid, err);
5275 return 0;
5276 }
5277
5278 static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
5279 {
5280 struct bnx2x_mcast_ramrod_params rparam;
5281 int rc;
5282
5283 memset(&rparam, 0, sizeof(rparam));
5284
5285 rparam.mcast_obj = &bp->mcast_obj;
5286
5287 netif_addr_lock_bh(bp->dev);
5288
5289
5290 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
5291
5292
5293 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
5294 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
5295 if (rc < 0)
5296 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
5297 rc);
5298 }
5299
5300 netif_addr_unlock_bh(bp->dev);
5301 }
5302
5303 static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5304 union event_ring_elem *elem)
5305 {
5306 unsigned long ramrod_flags = 0;
5307 int rc = 0;
5308 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5309 u32 cid = echo & BNX2X_SWCID_MASK;
5310 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5311
5312
5313 __set_bit(RAMROD_CONT, &ramrod_flags);
5314
5315 switch (echo >> BNX2X_SWCID_SHIFT) {
5316 case BNX2X_FILTER_MAC_PENDING:
5317 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5318 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
5319 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
5320 else
5321 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
5322
5323 break;
5324 case BNX2X_FILTER_VLAN_PENDING:
5325 DP(BNX2X_MSG_SP, "Got SETUP_VLAN completions\n");
5326 vlan_mac_obj = &bp->sp_objs[cid].vlan_obj;
5327 break;
5328 case BNX2X_FILTER_MCAST_PENDING:
5329 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
5330
5331
5332
5333 bnx2x_handle_mcast_eqe(bp);
5334 return;
5335 default:
5336 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5337 return;
5338 }
5339
5340 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
5341
5342 if (rc < 0)
5343 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
5344 else if (rc > 0)
5345 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
5346 }
5347
5348 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
5349
5350 static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
5351 {
5352 netif_addr_lock_bh(bp->dev);
5353
5354 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5355
5356
5357 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
5358 bnx2x_set_storm_rx_mode(bp);
5359 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
5360 &bp->sp_state))
5361 bnx2x_set_iscsi_eth_rx_mode(bp, true);
5362 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
5363 &bp->sp_state))
5364 bnx2x_set_iscsi_eth_rx_mode(bp, false);
5365
5366 netif_addr_unlock_bh(bp->dev);
5367 }
5368
5369 static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
5370 union event_ring_elem *elem)
5371 {
5372 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
5373 DP(BNX2X_MSG_SP,
5374 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
5375 elem->message.data.vif_list_event.func_bit_map);
5376 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
5377 elem->message.data.vif_list_event.func_bit_map);
5378 } else if (elem->message.data.vif_list_event.echo ==
5379 VIF_LIST_RULE_SET) {
5380 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
5381 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
5382 }
5383 }
5384
5385
5386 static void bnx2x_after_function_update(struct bnx2x *bp)
5387 {
5388 int q, rc;
5389 struct bnx2x_fastpath *fp;
5390 struct bnx2x_queue_state_params queue_params = {NULL};
5391 struct bnx2x_queue_update_params *q_update_params =
5392 &queue_params.params.update;
5393
5394
5395 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
5396
5397
5398 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
5399 &q_update_params->update_flags);
5400 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
5401 &q_update_params->update_flags);
5402 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5403
5404
5405 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
5406 q_update_params->silent_removal_value = 0;
5407 q_update_params->silent_removal_mask = 0;
5408 } else {
5409 q_update_params->silent_removal_value =
5410 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
5411 q_update_params->silent_removal_mask = VLAN_VID_MASK;
5412 }
5413
5414 for_each_eth_queue(bp, q) {
5415
5416 fp = &bp->fp[q];
5417 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5418
5419
5420 rc = bnx2x_queue_state_change(bp, &queue_params);
5421 if (rc < 0)
5422 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5423 q);
5424 }
5425
5426 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
5427 fp = &bp->fp[FCOE_IDX(bp)];
5428 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
5429
5430
5431 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
5432
5433
5434 smp_mb__before_atomic();
5435 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
5436 smp_mb__after_atomic();
5437
5438
5439 rc = bnx2x_queue_state_change(bp, &queue_params);
5440 if (rc < 0)
5441 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
5442 q);
5443 } else {
5444
5445 bnx2x_link_report(bp);
5446 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5447 }
5448 }
5449
5450 static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
5451 struct bnx2x *bp, u32 cid)
5452 {
5453 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
5454
5455 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
5456 return &bnx2x_fcoe_sp_obj(bp, q_obj);
5457 else
5458 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
5459 }
5460
5461 static void bnx2x_eq_int(struct bnx2x *bp)
5462 {
5463 u16 hw_cons, sw_cons, sw_prod;
5464 union event_ring_elem *elem;
5465 u8 echo;
5466 u32 cid;
5467 u8 opcode;
5468 int rc, spqe_cnt = 0;
5469 struct bnx2x_queue_sp_obj *q_obj;
5470 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5471 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
5472
5473 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5474
5475
5476
5477
5478
5479
5480 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5481 hw_cons++;
5482
5483
5484
5485
5486
5487 sw_cons = bp->eq_cons;
5488 sw_prod = bp->eq_prod;
5489
5490 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
5491 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
5492
5493 for (; sw_cons != hw_cons;
5494 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5495
5496 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5497
5498 rc = bnx2x_iov_eq_sp_event(bp, elem);
5499 if (!rc) {
5500 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5501 rc);
5502 goto next_spqe;
5503 }
5504
5505 opcode = elem->message.opcode;
5506
5507
5508 switch (opcode) {
5509 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5510 bnx2x_vf_mbx_schedule(bp,
5511 &elem->message.data.vf_pf_event);
5512 continue;
5513
5514 case EVENT_RING_OPCODE_STAT_QUERY:
5515 DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
5516 "got statistics comp event %d\n",
5517 bp->stats_comp++);
5518
5519 goto next_spqe;
5520
5521 case EVENT_RING_OPCODE_CFC_DEL:
5522
5523
5524
5525
5526
5527
5528
5529 cid = SW_CID(elem->message.data.cfc_del_event.cid);
5530
5531 DP(BNX2X_MSG_SP,
5532 "got delete ramrod for MULTI[%d]\n", cid);
5533
5534 if (CNIC_LOADED(bp) &&
5535 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
5536 goto next_spqe;
5537
5538 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5539
5540 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5541 break;
5542
5543 goto next_spqe;
5544
5545 case EVENT_RING_OPCODE_STOP_TRAFFIC:
5546 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
5547 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5548 if (f_obj->complete_cmd(bp, f_obj,
5549 BNX2X_F_CMD_TX_STOP))
5550 break;
5551 goto next_spqe;
5552
5553 case EVENT_RING_OPCODE_START_TRAFFIC:
5554 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
5555 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5556 if (f_obj->complete_cmd(bp, f_obj,
5557 BNX2X_F_CMD_TX_START))
5558 break;
5559 goto next_spqe;
5560
5561 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
5562 echo = elem->message.data.function_update_event.echo;
5563 if (echo == SWITCH_UPDATE) {
5564 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5565 "got FUNC_SWITCH_UPDATE ramrod\n");
5566 if (f_obj->complete_cmd(
5567 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5568 break;
5569
5570 } else {
5571 int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
5572
5573 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5574 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5575 f_obj->complete_cmd(bp, f_obj,
5576 BNX2X_F_CMD_AFEX_UPDATE);
5577
5578
5579
5580
5581
5582 bnx2x_schedule_sp_rtnl(bp, cmd, 0);
5583 }
5584
5585 goto next_spqe;
5586
5587 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5588 f_obj->complete_cmd(bp, f_obj,
5589 BNX2X_F_CMD_AFEX_VIFLISTS);
5590 bnx2x_after_afex_vif_lists(bp, elem);
5591 goto next_spqe;
5592 case EVENT_RING_OPCODE_FUNCTION_START:
5593 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5594 "got FUNC_START ramrod\n");
5595 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5596 break;
5597
5598 goto next_spqe;
5599
5600 case EVENT_RING_OPCODE_FUNCTION_STOP:
5601 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5602 "got FUNC_STOP ramrod\n");
5603 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5604 break;
5605
5606 goto next_spqe;
5607
5608 case EVENT_RING_OPCODE_SET_TIMESYNC:
5609 DP(BNX2X_MSG_SP | BNX2X_MSG_PTP,
5610 "got set_timesync ramrod completion\n");
5611 if (f_obj->complete_cmd(bp, f_obj,
5612 BNX2X_F_CMD_SET_TIMESYNC))
5613 break;
5614 goto next_spqe;
5615 }
5616
5617 switch (opcode | bp->state) {
5618 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5619 BNX2X_STATE_OPEN):
5620 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5621 BNX2X_STATE_OPENING_WAIT4_PORT):
5622 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5623 BNX2X_STATE_CLOSING_WAIT4_HALT):
5624 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5625 SW_CID(elem->message.data.eth_event.echo));
5626 rss_raw->clear_pending(rss_raw);
5627 break;
5628
5629 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5630 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5631 case (EVENT_RING_OPCODE_SET_MAC |
5632 BNX2X_STATE_CLOSING_WAIT4_HALT):
5633 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5634 BNX2X_STATE_OPEN):
5635 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5636 BNX2X_STATE_DIAG):
5637 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5638 BNX2X_STATE_CLOSING_WAIT4_HALT):
5639 DP(BNX2X_MSG_SP, "got (un)set vlan/mac ramrod\n");
5640 bnx2x_handle_classification_eqe(bp, elem);
5641 break;
5642
5643 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5644 BNX2X_STATE_OPEN):
5645 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5646 BNX2X_STATE_DIAG):
5647 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5648 BNX2X_STATE_CLOSING_WAIT4_HALT):
5649 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
5650 bnx2x_handle_mcast_eqe(bp);
5651 break;
5652
5653 case (EVENT_RING_OPCODE_FILTERS_RULES |
5654 BNX2X_STATE_OPEN):
5655 case (EVENT_RING_OPCODE_FILTERS_RULES |
5656 BNX2X_STATE_DIAG):
5657 case (EVENT_RING_OPCODE_FILTERS_RULES |
5658 BNX2X_STATE_CLOSING_WAIT4_HALT):
5659 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
5660 bnx2x_handle_rx_mode_eqe(bp);
5661 break;
5662 default:
5663
5664 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5665 elem->message.opcode, bp->state);
5666 }
5667 next_spqe:
5668 spqe_cnt++;
5669 }
5670
5671 smp_mb__before_atomic();
5672 atomic_add(spqe_cnt, &bp->eq_spq_left);
5673
5674 bp->eq_cons = sw_cons;
5675 bp->eq_prod = sw_prod;
5676
5677 smp_wmb();
5678
5679
5680 bnx2x_update_eq_prod(bp, bp->eq_prod);
5681 }
5682
5683 static void bnx2x_sp_task(struct work_struct *work)
5684 {
5685 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
5686
5687 DP(BNX2X_MSG_SP, "sp task invoked\n");
5688
5689
5690 smp_rmb();
5691 if (atomic_read(&bp->interrupt_occurred)) {
5692
5693
5694 u16 status = bnx2x_update_dsb_idx(bp);
5695
5696 DP(BNX2X_MSG_SP, "status %x\n", status);
5697 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5698 atomic_set(&bp->interrupt_occurred, 0);
5699
5700
5701 if (status & BNX2X_DEF_SB_ATT_IDX) {
5702 bnx2x_attn_int(bp);
5703 status &= ~BNX2X_DEF_SB_ATT_IDX;
5704 }
5705
5706
5707 if (status & BNX2X_DEF_SB_IDX) {
5708 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5709
5710 if (FCOE_INIT(bp) &&
5711 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5712
5713
5714
5715 local_bh_disable();
5716 napi_schedule(&bnx2x_fcoe(bp, napi));
5717 local_bh_enable();
5718 }
5719
5720
5721 bnx2x_eq_int(bp);
5722 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5723 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5724
5725 status &= ~BNX2X_DEF_SB_IDX;
5726 }
5727
5728
5729 if (unlikely(status))
5730 DP(BNX2X_MSG_SP,
5731 "got an unknown interrupt! (status 0x%x)\n", status);
5732
5733
5734 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5735 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5736 }
5737
5738
5739 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5740 &bp->sp_state)) {
5741 bnx2x_link_report(bp);
5742 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5743 }
5744 }
5745
5746 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
5747 {
5748 struct net_device *dev = dev_instance;
5749 struct bnx2x *bp = netdev_priv(dev);
5750
5751 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5752 IGU_INT_DISABLE, 0);
5753
5754 #ifdef BNX2X_STOP_ON_ERROR
5755 if (unlikely(bp->panic))
5756 return IRQ_HANDLED;
5757 #endif
5758
5759 if (CNIC_LOADED(bp)) {
5760 struct cnic_ops *c_ops;
5761
5762 rcu_read_lock();
5763 c_ops = rcu_dereference(bp->cnic_ops);
5764 if (c_ops)
5765 c_ops->cnic_handler(bp->cnic_data, NULL);
5766 rcu_read_unlock();
5767 }
5768
5769
5770
5771
5772 bnx2x_schedule_sp_task(bp);
5773
5774 return IRQ_HANDLED;
5775 }
5776
5777
5778
5779 void bnx2x_drv_pulse(struct bnx2x *bp)
5780 {
5781 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5782 bp->fw_drv_pulse_wr_seq);
5783 }
5784
5785 static void bnx2x_timer(struct timer_list *t)
5786 {
5787 struct bnx2x *bp = from_timer(bp, t, timer);
5788
5789 if (!netif_running(bp->dev))
5790 return;
5791
5792 if (IS_PF(bp) &&
5793 !BP_NOMCP(bp)) {
5794 int mb_idx = BP_FW_MB_IDX(bp);
5795 u16 drv_pulse;
5796 u16 mcp_pulse;
5797
5798 ++bp->fw_drv_pulse_wr_seq;
5799 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5800 drv_pulse = bp->fw_drv_pulse_wr_seq;
5801 bnx2x_drv_pulse(bp);
5802
5803 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
5804 MCP_PULSE_SEQ_MASK);
5805
5806
5807
5808
5809
5810 if (((drv_pulse - mcp_pulse) & MCP_PULSE_SEQ_MASK) > 5)
5811 BNX2X_ERR("MFW seems hanged: drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5812 drv_pulse, mcp_pulse);
5813 }
5814
5815 if (bp->state == BNX2X_STATE_OPEN)
5816 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5817
5818
5819 if (IS_VF(bp))
5820 bnx2x_timer_sriov(bp);
5821
5822 mod_timer(&bp->timer, jiffies + bp->current_interval);
5823 }
5824
5825
5826
5827
5828
5829
5830
5831
5832
5833 static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
5834 {
5835 u32 i;
5836 if (!(len%4) && !(addr%4))
5837 for (i = 0; i < len; i += 4)
5838 REG_WR(bp, addr + i, fill);
5839 else
5840 for (i = 0; i < len; i++)
5841 REG_WR8(bp, addr + i, fill);
5842 }
5843
5844
5845 static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5846 int fw_sb_id,
5847 u32 *sb_data_p,
5848 u32 data_size)
5849 {
5850 int index;
5851 for (index = 0; index < data_size; index++)
5852 REG_WR(bp, BAR_CSTRORM_INTMEM +
5853 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5854 sizeof(u32)*index,
5855 *(sb_data_p + index));
5856 }
5857
5858 static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
5859 {
5860 u32 *sb_data_p;
5861 u32 data_size = 0;
5862 struct hc_status_block_data_e2 sb_data_e2;
5863 struct hc_status_block_data_e1x sb_data_e1x;
5864
5865
5866 if (!CHIP_IS_E1x(bp)) {
5867 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5868 sb_data_e2.common.state = SB_DISABLED;
5869 sb_data_e2.common.p_func.vf_valid = false;
5870 sb_data_p = (u32 *)&sb_data_e2;
5871 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5872 } else {
5873 memset(&sb_data_e1x, 0,
5874 sizeof(struct hc_status_block_data_e1x));
5875 sb_data_e1x.common.state = SB_DISABLED;
5876 sb_data_e1x.common.p_func.vf_valid = false;
5877 sb_data_p = (u32 *)&sb_data_e1x;
5878 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5879 }
5880 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5881
5882 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5883 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5884 CSTORM_STATUS_BLOCK_SIZE);
5885 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5886 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5887 CSTORM_SYNC_BLOCK_SIZE);
5888 }
5889
5890
5891 static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
5892 struct hc_sp_status_block_data *sp_sb_data)
5893 {
5894 int func = BP_FUNC(bp);
5895 int i;
5896 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5897 REG_WR(bp, BAR_CSTRORM_INTMEM +
5898 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5899 i*sizeof(u32),
5900 *((u32 *)sp_sb_data + i));
5901 }
5902
5903 static void bnx2x_zero_sp_sb(struct bnx2x *bp)
5904 {
5905 int func = BP_FUNC(bp);
5906 struct hc_sp_status_block_data sp_sb_data;
5907 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5908
5909 sp_sb_data.state = SB_DISABLED;
5910 sp_sb_data.p_func.vf_valid = false;
5911
5912 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5913
5914 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5915 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5916 CSTORM_SP_STATUS_BLOCK_SIZE);
5917 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5918 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5919 CSTORM_SP_SYNC_BLOCK_SIZE);
5920 }
5921
5922 static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
5923 int igu_sb_id, int igu_seg_id)
5924 {
5925 hc_sm->igu_sb_id = igu_sb_id;
5926 hc_sm->igu_seg_id = igu_seg_id;
5927 hc_sm->timer_value = 0xFF;
5928 hc_sm->time_to_expire = 0xFFFFFFFF;
5929 }
5930
5931
5932 static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
5933 {
5934
5935
5936 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5937
5938
5939 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5940 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5941 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5942 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5943
5944
5945
5946 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5947 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5948
5949
5950 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5951 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5952 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5953 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5954 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5955 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5956 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5957 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5958 }
5959
5960 void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
5961 u8 vf_valid, int fw_sb_id, int igu_sb_id)
5962 {
5963 int igu_seg_id;
5964
5965 struct hc_status_block_data_e2 sb_data_e2;
5966 struct hc_status_block_data_e1x sb_data_e1x;
5967 struct hc_status_block_sm *hc_sm_p;
5968 int data_size;
5969 u32 *sb_data_p;
5970
5971 if (CHIP_INT_MODE_IS_BC(bp))
5972 igu_seg_id = HC_SEG_ACCESS_NORM;
5973 else
5974 igu_seg_id = IGU_SEG_ACCESS_NORM;
5975
5976 bnx2x_zero_fp_sb(bp, fw_sb_id);
5977
5978 if (!CHIP_IS_E1x(bp)) {
5979 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
5980 sb_data_e2.common.state = SB_ENABLED;
5981 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5982 sb_data_e2.common.p_func.vf_id = vfid;
5983 sb_data_e2.common.p_func.vf_valid = vf_valid;
5984 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5985 sb_data_e2.common.same_igu_sb_1b = true;
5986 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5987 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5988 hc_sm_p = sb_data_e2.common.state_machine;
5989 sb_data_p = (u32 *)&sb_data_e2;
5990 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5991 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
5992 } else {
5993 memset(&sb_data_e1x, 0,
5994 sizeof(struct hc_status_block_data_e1x));
5995 sb_data_e1x.common.state = SB_ENABLED;
5996 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5997 sb_data_e1x.common.p_func.vf_id = 0xff;
5998 sb_data_e1x.common.p_func.vf_valid = false;
5999 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
6000 sb_data_e1x.common.same_igu_sb_1b = true;
6001 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
6002 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
6003 hc_sm_p = sb_data_e1x.common.state_machine;
6004 sb_data_p = (u32 *)&sb_data_e1x;
6005 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
6006 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
6007 }
6008
6009 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
6010 igu_sb_id, igu_seg_id);
6011 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
6012 igu_sb_id, igu_seg_id);
6013
6014 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
6015
6016
6017 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
6018 }
6019
6020 static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
6021 u16 tx_usec, u16 rx_usec)
6022 {
6023 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
6024 false, rx_usec);
6025 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6026 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
6027 tx_usec);
6028 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6029 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
6030 tx_usec);
6031 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
6032 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
6033 tx_usec);
6034 }
6035
6036 static void bnx2x_init_def_sb(struct bnx2x *bp)
6037 {
6038 struct host_sp_status_block *def_sb = bp->def_status_blk;
6039 dma_addr_t mapping = bp->def_status_blk_mapping;
6040 int igu_sp_sb_index;
6041 int igu_seg_id;
6042 int port = BP_PORT(bp);
6043 int func = BP_FUNC(bp);
6044 int reg_offset, reg_offset_en5;
6045 u64 section;
6046 int index;
6047 struct hc_sp_status_block_data sp_sb_data;
6048 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
6049
6050 if (CHIP_INT_MODE_IS_BC(bp)) {
6051 igu_sp_sb_index = DEF_SB_IGU_ID;
6052 igu_seg_id = HC_SEG_ACCESS_DEF;
6053 } else {
6054 igu_sp_sb_index = bp->igu_dsb_id;
6055 igu_seg_id = IGU_SEG_ACCESS_DEF;
6056 }
6057
6058
6059 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6060 atten_status_block);
6061 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
6062
6063 bp->attn_state = 0;
6064
6065 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6066 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6067 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
6068 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
6069 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
6070 int sindex;
6071
6072 for (sindex = 0; sindex < 4; sindex++)
6073 bp->attn_group[index].sig[sindex] =
6074 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
6075
6076 if (!CHIP_IS_E1x(bp))
6077
6078
6079
6080
6081
6082 bp->attn_group[index].sig[4] = REG_RD(bp,
6083 reg_offset_en5 + 0x4*index);
6084 else
6085 bp->attn_group[index].sig[4] = 0;
6086 }
6087
6088 if (bp->common.int_block == INT_BLOCK_HC) {
6089 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
6090 HC_REG_ATTN_MSG0_ADDR_L);
6091
6092 REG_WR(bp, reg_offset, U64_LO(section));
6093 REG_WR(bp, reg_offset + 4, U64_HI(section));
6094 } else if (!CHIP_IS_E1x(bp)) {
6095 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
6096 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
6097 }
6098
6099 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
6100 sp_sb);
6101
6102 bnx2x_zero_sp_sb(bp);
6103
6104
6105 sp_sb_data.state = SB_ENABLED;
6106 sp_sb_data.host_sb_addr.lo = U64_LO(section);
6107 sp_sb_data.host_sb_addr.hi = U64_HI(section);
6108 sp_sb_data.igu_sb_id = igu_sp_sb_index;
6109 sp_sb_data.igu_seg_id = igu_seg_id;
6110 sp_sb_data.p_func.pf_id = func;
6111 sp_sb_data.p_func.vnic_id = BP_VN(bp);
6112 sp_sb_data.p_func.vf_id = 0xff;
6113
6114 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
6115
6116 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
6117 }
6118
6119 void bnx2x_update_coalesce(struct bnx2x *bp)
6120 {
6121 int i;
6122
6123 for_each_eth_queue(bp, i)
6124 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
6125 bp->tx_ticks, bp->rx_ticks);
6126 }
6127
6128 static void bnx2x_init_sp_ring(struct bnx2x *bp)
6129 {
6130 spin_lock_init(&bp->spq_lock);
6131 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
6132
6133 bp->spq_prod_idx = 0;
6134 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
6135 bp->spq_prod_bd = bp->spq;
6136 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
6137 }
6138
6139 static void bnx2x_init_eq_ring(struct bnx2x *bp)
6140 {
6141 int i;
6142 for (i = 1; i <= NUM_EQ_PAGES; i++) {
6143 union event_ring_elem *elem =
6144 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
6145
6146 elem->next_page.addr.hi =
6147 cpu_to_le32(U64_HI(bp->eq_mapping +
6148 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
6149 elem->next_page.addr.lo =
6150 cpu_to_le32(U64_LO(bp->eq_mapping +
6151 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
6152 }
6153 bp->eq_cons = 0;
6154 bp->eq_prod = NUM_EQ_DESC;
6155 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6156
6157 atomic_set(&bp->eq_spq_left,
6158 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
6159 }
6160
6161
6162 static int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
6163 unsigned long rx_mode_flags,
6164 unsigned long rx_accept_flags,
6165 unsigned long tx_accept_flags,
6166 unsigned long ramrod_flags)
6167 {
6168 struct bnx2x_rx_mode_ramrod_params ramrod_param;
6169 int rc;
6170
6171 memset(&ramrod_param, 0, sizeof(ramrod_param));
6172
6173
6174 ramrod_param.cid = 0;
6175 ramrod_param.cl_id = cl_id;
6176 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
6177 ramrod_param.func_id = BP_FUNC(bp);
6178
6179 ramrod_param.pstate = &bp->sp_state;
6180 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
6181
6182 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
6183 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
6184
6185 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
6186
6187 ramrod_param.ramrod_flags = ramrod_flags;
6188 ramrod_param.rx_mode_flags = rx_mode_flags;
6189
6190 ramrod_param.rx_accept_flags = rx_accept_flags;
6191 ramrod_param.tx_accept_flags = tx_accept_flags;
6192
6193 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
6194 if (rc < 0) {
6195 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
6196 return rc;
6197 }
6198
6199 return 0;
6200 }
6201
6202 static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
6203 unsigned long *rx_accept_flags,
6204 unsigned long *tx_accept_flags)
6205 {
6206
6207 *rx_accept_flags = 0;
6208 *tx_accept_flags = 0;
6209
6210 switch (rx_mode) {
6211 case BNX2X_RX_MODE_NONE:
6212
6213
6214
6215
6216 break;
6217 case BNX2X_RX_MODE_NORMAL:
6218 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6219 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
6220 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6221
6222
6223 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6224 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
6225 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6226
6227 if (bp->accept_any_vlan) {
6228 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6229 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6230 }
6231
6232 break;
6233 case BNX2X_RX_MODE_ALLMULTI:
6234 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6235 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6236 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6237
6238
6239 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6240 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6241 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6242
6243 if (bp->accept_any_vlan) {
6244 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6245 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6246 }
6247
6248 break;
6249 case BNX2X_RX_MODE_PROMISC:
6250
6251
6252
6253
6254 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
6255 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
6256 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
6257 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
6258
6259
6260 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
6261 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
6262
6263 if (IS_MF_SI(bp))
6264 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
6265 else
6266 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
6267
6268 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
6269 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
6270
6271 break;
6272 default:
6273 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
6274 return -EINVAL;
6275 }
6276
6277 return 0;
6278 }
6279
6280
6281 static int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
6282 {
6283 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
6284 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
6285 int rc;
6286
6287 if (!NO_FCOE(bp))
6288
6289 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
6290
6291 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
6292 &tx_accept_flags);
6293 if (rc)
6294 return rc;
6295
6296 __set_bit(RAMROD_RX, &ramrod_flags);
6297 __set_bit(RAMROD_TX, &ramrod_flags);
6298
6299 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
6300 rx_accept_flags, tx_accept_flags,
6301 ramrod_flags);
6302 }
6303
6304 static void bnx2x_init_internal_common(struct bnx2x *bp)
6305 {
6306 int i;
6307
6308
6309
6310 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
6311 REG_WR(bp, BAR_USTRORM_INTMEM +
6312 USTORM_AGG_DATA_OFFSET + i * 4, 0);
6313 if (!CHIP_IS_E1x(bp)) {
6314 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
6315 CHIP_INT_MODE_IS_BC(bp) ?
6316 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
6317 }
6318 }
6319
6320 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
6321 {
6322 switch (load_code) {
6323 case FW_MSG_CODE_DRV_LOAD_COMMON:
6324 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
6325 bnx2x_init_internal_common(bp);
6326 fallthrough;
6327
6328 case FW_MSG_CODE_DRV_LOAD_PORT:
6329
6330 fallthrough;
6331
6332 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6333
6334
6335 break;
6336
6337 default:
6338 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6339 break;
6340 }
6341 }
6342
6343 static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
6344 {
6345 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
6346 }
6347
6348 static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
6349 {
6350 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
6351 }
6352
6353 static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
6354 {
6355 if (CHIP_IS_E1x(fp->bp))
6356 return BP_L_ID(fp->bp) + fp->index;
6357 else
6358 return bnx2x_fp_igu_sb_id(fp);
6359 }
6360
6361 static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
6362 {
6363 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6364 u8 cos;
6365 unsigned long q_type = 0;
6366 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
6367 fp->rx_queue = fp_idx;
6368 fp->cid = fp_idx;
6369 fp->cl_id = bnx2x_fp_cl_id(fp);
6370 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
6371 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
6372
6373 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
6374
6375
6376 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
6377
6378
6379 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
6380
6381
6382 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6383 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6384
6385 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
6386
6387
6388 for_each_cos_in_tx_queue(fp, cos) {
6389 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
6390 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
6391 FP_COS_TO_TXQ(fp, cos, bp),
6392 BNX2X_TX_SB_INDEX_BASE + cos, fp);
6393 cids[cos] = fp->txdata_ptr[cos]->cid;
6394 }
6395
6396
6397 if (IS_VF(bp))
6398 return;
6399
6400 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
6401 fp->fw_sb_id, fp->igu_sb_id);
6402 bnx2x_update_fpsb_idx(fp);
6403 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
6404 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6405 bnx2x_sp_mapping(bp, q_rdata), q_type);
6406
6407
6408
6409
6410 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
6411
6412 DP(NETIF_MSG_IFUP,
6413 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6414 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6415 fp->igu_sb_id);
6416 }
6417
6418 static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
6419 {
6420 int i;
6421
6422 for (i = 1; i <= NUM_TX_RINGS; i++) {
6423 struct eth_tx_next_bd *tx_next_bd =
6424 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
6425
6426 tx_next_bd->addr_hi =
6427 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
6428 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6429 tx_next_bd->addr_lo =
6430 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
6431 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
6432 }
6433
6434 *txdata->tx_cons_sb = cpu_to_le16(0);
6435
6436 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
6437 txdata->tx_db.data.zero_fill1 = 0;
6438 txdata->tx_db.data.prod = 0;
6439
6440 txdata->tx_pkt_prod = 0;
6441 txdata->tx_pkt_cons = 0;
6442 txdata->tx_bd_prod = 0;
6443 txdata->tx_bd_cons = 0;
6444 txdata->tx_pkt = 0;
6445 }
6446
6447 static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
6448 {
6449 int i;
6450
6451 for_each_tx_queue_cnic(bp, i)
6452 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6453 }
6454
6455 static void bnx2x_init_tx_rings(struct bnx2x *bp)
6456 {
6457 int i;
6458 u8 cos;
6459
6460 for_each_eth_queue(bp, i)
6461 for_each_cos_in_tx_queue(&bp->fp[i], cos)
6462 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
6463 }
6464
6465 static void bnx2x_init_fcoe_fp(struct bnx2x *bp)
6466 {
6467 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
6468 unsigned long q_type = 0;
6469
6470 bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
6471 bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
6472 BNX2X_FCOE_ETH_CL_ID_IDX);
6473 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp);
6474 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
6475 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
6476 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
6477 bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]),
6478 fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX,
6479 fp);
6480
6481 DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
6482
6483
6484 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
6485
6486 bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
6487 bnx2x_rx_ustorm_prods_offset(fp);
6488
6489
6490 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
6491 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6492
6493
6494 BUG_ON(fp->max_cos != 1);
6495
6496 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id,
6497 &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6498 bnx2x_sp_mapping(bp, q_rdata), q_type);
6499
6500 DP(NETIF_MSG_IFUP,
6501 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
6502 fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
6503 fp->igu_sb_id);
6504 }
6505
6506 void bnx2x_nic_init_cnic(struct bnx2x *bp)
6507 {
6508 if (!NO_FCOE(bp))
6509 bnx2x_init_fcoe_fp(bp);
6510
6511 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6512 BNX2X_VF_ID_INVALID, false,
6513 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
6514
6515
6516 rmb();
6517 bnx2x_init_rx_rings_cnic(bp);
6518 bnx2x_init_tx_rings_cnic(bp);
6519
6520
6521 mb();
6522 }
6523
6524 void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
6525 {
6526 int i;
6527
6528
6529 for_each_eth_queue(bp, i)
6530 bnx2x_init_eth_fp(bp, i);
6531
6532
6533 rmb();
6534 bnx2x_init_rx_rings(bp);
6535 bnx2x_init_tx_rings(bp);
6536
6537 if (IS_PF(bp)) {
6538
6539 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6540 bp->common.shmem_base,
6541 bp->common.shmem2_base, BP_PORT(bp));
6542
6543
6544 bnx2x_init_def_sb(bp);
6545 bnx2x_update_dsb_idx(bp);
6546 bnx2x_init_sp_ring(bp);
6547 } else {
6548 bnx2x_memset_stats(bp);
6549 }
6550 }
6551
6552 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6553 {
6554 bnx2x_init_eq_ring(bp);
6555 bnx2x_init_internal(bp, load_code);
6556 bnx2x_pf_init(bp);
6557 bnx2x_stats_init(bp);
6558
6559
6560 mb();
6561
6562 bnx2x_int_enable(bp);
6563
6564
6565 bnx2x_attn_int_deasserted0(bp,
6566 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6567 AEU_INPUTS_ATTN_BITS_SPIO5);
6568 }
6569
6570
6571 static int bnx2x_gunzip_init(struct bnx2x *bp)
6572 {
6573 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6574 &bp->gunzip_mapping, GFP_KERNEL);
6575 if (bp->gunzip_buf == NULL)
6576 goto gunzip_nomem1;
6577
6578 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6579 if (bp->strm == NULL)
6580 goto gunzip_nomem2;
6581
6582 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
6583 if (bp->strm->workspace == NULL)
6584 goto gunzip_nomem3;
6585
6586 return 0;
6587
6588 gunzip_nomem3:
6589 kfree(bp->strm);
6590 bp->strm = NULL;
6591
6592 gunzip_nomem2:
6593 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6594 bp->gunzip_mapping);
6595 bp->gunzip_buf = NULL;
6596
6597 gunzip_nomem1:
6598 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
6599 return -ENOMEM;
6600 }
6601
6602 static void bnx2x_gunzip_end(struct bnx2x *bp)
6603 {
6604 if (bp->strm) {
6605 vfree(bp->strm->workspace);
6606 kfree(bp->strm);
6607 bp->strm = NULL;
6608 }
6609
6610 if (bp->gunzip_buf) {
6611 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6612 bp->gunzip_mapping);
6613 bp->gunzip_buf = NULL;
6614 }
6615 }
6616
6617 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6618 {
6619 int n, rc;
6620
6621
6622 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6623 BNX2X_ERR("Bad gzip header\n");
6624 return -EINVAL;
6625 }
6626
6627 n = 10;
6628
6629 #define FNAME 0x8
6630
6631 if (zbuf[3] & FNAME)
6632 while ((zbuf[n++] != 0) && (n < len));
6633
6634 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6635 bp->strm->avail_in = len - n;
6636 bp->strm->next_out = bp->gunzip_buf;
6637 bp->strm->avail_out = FW_BUF_SIZE;
6638
6639 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6640 if (rc != Z_OK)
6641 return rc;
6642
6643 rc = zlib_inflate(bp->strm, Z_FINISH);
6644 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6645 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6646 bp->strm->msg);
6647
6648 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6649 if (bp->gunzip_outlen & 0x3)
6650 netdev_err(bp->dev,
6651 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6652 bp->gunzip_outlen);
6653 bp->gunzip_outlen >>= 2;
6654
6655 zlib_inflateEnd(bp->strm);
6656
6657 if (rc == Z_STREAM_END)
6658 return 0;
6659
6660 return rc;
6661 }
6662
6663
6664
6665
6666
6667
6668
6669
6670 static void bnx2x_lb_pckt(struct bnx2x *bp)
6671 {
6672 u32 wb_write[3];
6673
6674
6675 wb_write[0] = 0x55555555;
6676 wb_write[1] = 0x55555555;
6677 wb_write[2] = 0x20;
6678 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6679
6680
6681 wb_write[0] = 0x09000000;
6682 wb_write[1] = 0x55555555;
6683 wb_write[2] = 0x10;
6684 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6685 }
6686
6687
6688
6689
6690
6691 static int bnx2x_int_mem_test(struct bnx2x *bp)
6692 {
6693 int factor;
6694 int count, i;
6695 u32 val = 0;
6696
6697 if (CHIP_REV_IS_FPGA(bp))
6698 factor = 120;
6699 else if (CHIP_REV_IS_EMUL(bp))
6700 factor = 200;
6701 else
6702 factor = 1;
6703
6704
6705 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6706 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6707 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6708 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6709
6710
6711 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6712
6713
6714 bnx2x_lb_pckt(bp);
6715
6716
6717
6718 count = 1000 * factor;
6719 while (count) {
6720
6721 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6722 val = *bnx2x_sp(bp, wb_data[0]);
6723 if (val == 0x10)
6724 break;
6725
6726 usleep_range(10000, 20000);
6727 count--;
6728 }
6729 if (val != 0x10) {
6730 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6731 return -1;
6732 }
6733
6734
6735 count = 1000 * factor;
6736 while (count) {
6737 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6738 if (val == 1)
6739 break;
6740
6741 usleep_range(10000, 20000);
6742 count--;
6743 }
6744 if (val != 0x1) {
6745 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6746 return -2;
6747 }
6748
6749
6750 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6751 msleep(50);
6752 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6753 msleep(50);
6754 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6755 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6756
6757 DP(NETIF_MSG_HW, "part2\n");
6758
6759
6760 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6761 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6762 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6763 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6764
6765
6766 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6767
6768
6769 for (i = 0; i < 10; i++)
6770 bnx2x_lb_pckt(bp);
6771
6772
6773
6774 count = 1000 * factor;
6775 while (count) {
6776
6777 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6778 val = *bnx2x_sp(bp, wb_data[0]);
6779 if (val == 0xb0)
6780 break;
6781
6782 usleep_range(10000, 20000);
6783 count--;
6784 }
6785 if (val != 0xb0) {
6786 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6787 return -3;
6788 }
6789
6790
6791 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6792 if (val != 2)
6793 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6794
6795
6796 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6797
6798
6799 msleep(10 * factor);
6800
6801 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6802 if (val != 3)
6803 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6804
6805
6806 for (i = 0; i < 11; i++)
6807 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6808 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6809 if (val != 1) {
6810 BNX2X_ERR("clear of NIG failed\n");
6811 return -4;
6812 }
6813
6814
6815 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6816 msleep(50);
6817 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6818 msleep(50);
6819 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6820 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
6821 if (!CNIC_SUPPORT(bp))
6822
6823 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6824
6825
6826 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6827 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6828 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6829 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6830
6831 DP(NETIF_MSG_HW, "done\n");
6832
6833 return 0;
6834 }
6835
6836 static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
6837 {
6838 u32 val;
6839
6840 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6841 if (!CHIP_IS_E1x(bp))
6842 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6843 else
6844 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6845 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6846 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6847
6848
6849
6850
6851
6852
6853 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
6854 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6855 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6856 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6857 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6858 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6859
6860
6861 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6862 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6863 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6864
6865
6866 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6867 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6868 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6869 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6870
6871
6872
6873 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6874 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6875 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6876 if (!CHIP_IS_E1x(bp))
6877 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6878 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6879 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6880
6881 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6882 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6883 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6884
6885
6886 if (!CHIP_IS_E1x(bp))
6887
6888 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6889
6890 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6891 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6892
6893 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);
6894 }
6895
6896 static void bnx2x_reset_common(struct bnx2x *bp)
6897 {
6898 u32 val = 0x1400;
6899
6900
6901 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6902 0xd3ffff7f);
6903
6904 if (CHIP_IS_E3(bp)) {
6905 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6906 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6907 }
6908
6909 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6910 }
6911
6912 static void bnx2x_setup_dmae(struct bnx2x *bp)
6913 {
6914 bp->dmae_ready = 0;
6915 spin_lock_init(&bp->dmae_lock);
6916 }
6917
6918 static void bnx2x_init_pxp(struct bnx2x *bp)
6919 {
6920 u16 devctl;
6921 int r_order, w_order;
6922
6923 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
6924 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6925 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6926 if (bp->mrrs == -1)
6927 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6928 else {
6929 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6930 r_order = bp->mrrs;
6931 }
6932
6933 bnx2x_init_pxp_arb(bp, r_order, w_order);
6934 }
6935
6936 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6937 {
6938 int is_required;
6939 u32 val;
6940 int port;
6941
6942 if (BP_NOMCP(bp))
6943 return;
6944
6945 is_required = 0;
6946 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6947 SHARED_HW_CFG_FAN_FAILURE_MASK;
6948
6949 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6950 is_required = 1;
6951
6952
6953
6954
6955
6956
6957 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6958 for (port = PORT_0; port < PORT_MAX; port++) {
6959 is_required |=
6960 bnx2x_fan_failure_det_req(
6961 bp,
6962 bp->common.shmem_base,
6963 bp->common.shmem2_base,
6964 port);
6965 }
6966
6967 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6968
6969 if (is_required == 0)
6970 return;
6971
6972
6973 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
6974
6975
6976 val = REG_RD(bp, MISC_REG_SPIO_INT);
6977 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
6978 REG_WR(bp, MISC_REG_SPIO_INT, val);
6979
6980
6981 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6982 val |= MISC_SPIO_SPIO5;
6983 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6984 }
6985
6986 void bnx2x_pf_disable(struct bnx2x *bp)
6987 {
6988 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6989 val &= ~IGU_PF_CONF_FUNC_EN;
6990
6991 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6992 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6993 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6994 }
6995
6996 static void bnx2x__common_init_phy(struct bnx2x *bp)
6997 {
6998 u32 shmem_base[2], shmem2_base[2];
6999
7000 if (SHMEM2_RD(bp, size) >
7001 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
7002 return;
7003 shmem_base[0] = bp->common.shmem_base;
7004 shmem2_base[0] = bp->common.shmem2_base;
7005 if (!CHIP_IS_E1x(bp)) {
7006 shmem_base[1] =
7007 SHMEM2_RD(bp, other_shmem_base_addr);
7008 shmem2_base[1] =
7009 SHMEM2_RD(bp, other_shmem2_base_addr);
7010 }
7011 bnx2x_acquire_phy_lock(bp);
7012 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
7013 bp->common.chip_id);
7014 bnx2x_release_phy_lock(bp);
7015 }
7016
7017 static void bnx2x_config_endianity(struct bnx2x *bp, u32 val)
7018 {
7019 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val);
7020 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val);
7021 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val);
7022 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val);
7023 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val);
7024
7025
7026 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
7027
7028 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val);
7029 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val);
7030 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val);
7031 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val);
7032 }
7033
7034 static void bnx2x_set_endianity(struct bnx2x *bp)
7035 {
7036 #ifdef __BIG_ENDIAN
7037 bnx2x_config_endianity(bp, 1);
7038 #else
7039 bnx2x_config_endianity(bp, 0);
7040 #endif
7041 }
7042
7043 static void bnx2x_reset_endianity(struct bnx2x *bp)
7044 {
7045 bnx2x_config_endianity(bp, 0);
7046 }
7047
7048
7049
7050
7051
7052
7053 static int bnx2x_init_hw_common(struct bnx2x *bp)
7054 {
7055 u32 val;
7056
7057 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
7058
7059
7060
7061
7062
7063 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7064
7065 bnx2x_reset_common(bp);
7066 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
7067
7068 val = 0xfffc;
7069 if (CHIP_IS_E3(bp)) {
7070 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
7071 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
7072 }
7073 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
7074
7075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
7076
7077 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
7078
7079 if (!CHIP_IS_E1x(bp)) {
7080 u8 abs_func_id;
7081
7082
7083
7084
7085
7086
7087
7088
7089 for (abs_func_id = BP_PATH(bp);
7090 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
7091 if (abs_func_id == BP_ABS_FUNC(bp)) {
7092 REG_WR(bp,
7093 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
7094 1);
7095 continue;
7096 }
7097
7098 bnx2x_pretend_func(bp, abs_func_id);
7099
7100 bnx2x_pf_disable(bp);
7101 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7102 }
7103 }
7104
7105 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
7106 if (CHIP_IS_E1(bp)) {
7107
7108
7109 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
7110 }
7111
7112 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
7113 bnx2x_init_pxp(bp);
7114 bnx2x_set_endianity(bp);
7115 bnx2x_ilt_init_page_size(bp, INITOP_SET);
7116
7117 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
7118 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
7119
7120
7121 msleep(100);
7122
7123 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
7124 if (val != 1) {
7125 BNX2X_ERR("PXP2 CFG failed\n");
7126 return -EBUSY;
7127 }
7128 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
7129 if (val != 1) {
7130 BNX2X_ERR("PXP2 RD_INIT failed\n");
7131 return -EBUSY;
7132 }
7133
7134
7135
7136
7137
7138
7139 if (!CHIP_IS_E1x(bp)) {
7140
7141
7142
7143
7144
7145
7146
7147
7148
7149
7150
7151
7152
7153
7154
7155
7156
7157
7158
7159
7160
7161
7162
7163
7164
7165
7166
7167
7168
7169
7170
7171
7172
7173
7174
7175
7176
7177
7178
7179
7180
7181
7182
7183
7184
7185
7186
7187
7188
7189
7190
7191
7192
7193
7194
7195
7196
7197
7198
7199
7200
7201
7202 struct ilt_client_info ilt_cli;
7203 struct bnx2x_ilt ilt;
7204 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7205 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
7206
7207
7208 ilt_cli.start = 0;
7209 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7210 ilt_cli.client_num = ILT_CLIENT_TM;
7211
7212
7213
7214
7215
7216
7217
7218
7219
7220
7221
7222
7223 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
7224 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
7225 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
7226
7227 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
7228 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
7229 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
7230 }
7231
7232 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
7233 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
7234
7235 if (!CHIP_IS_E1x(bp)) {
7236 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
7237 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
7238 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
7239
7240 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
7241
7242
7243 do {
7244 msleep(200);
7245 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
7246 } while (factor-- && (val != 1));
7247
7248 if (val != 1) {
7249 BNX2X_ERR("ATC_INIT failed\n");
7250 return -EBUSY;
7251 }
7252 }
7253
7254 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
7255
7256 bnx2x_iov_init_dmae(bp);
7257
7258
7259 bp->dmae_ready = 1;
7260 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
7261
7262 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
7263
7264 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
7265
7266 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
7267
7268 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
7269
7270 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
7271 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
7272 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
7273 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
7274
7275 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
7276
7277
7278 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
7279
7280
7281 REG_WR(bp, QM_REG_SOFT_RESET, 1);
7282 REG_WR(bp, QM_REG_SOFT_RESET, 0);
7283
7284 if (CNIC_SUPPORT(bp))
7285 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
7286
7287 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
7288
7289 if (!CHIP_REV_IS_SLOW(bp))
7290
7291 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
7292
7293 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
7294
7295 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
7296 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
7297
7298 if (!CHIP_IS_E1(bp))
7299 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
7300
7301 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
7302 if (IS_MF_AFEX(bp)) {
7303
7304
7305
7306 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
7307 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
7308 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
7309 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
7310 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
7311 } else {
7312
7313
7314
7315 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
7316 bp->path_has_ovlan ? 7 : 6);
7317 }
7318 }
7319
7320 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
7321 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
7322 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
7323 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
7324
7325 if (!CHIP_IS_E1x(bp)) {
7326
7327 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7328 VFC_MEMORIES_RST_REG_CAM_RST |
7329 VFC_MEMORIES_RST_REG_RAM_RST);
7330 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
7331 VFC_MEMORIES_RST_REG_CAM_RST |
7332 VFC_MEMORIES_RST_REG_RAM_RST);
7333
7334 msleep(20);
7335 }
7336
7337 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
7338 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
7339 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
7340 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
7341
7342
7343 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7344 0x80000000);
7345 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7346 0x80000000);
7347
7348 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
7349 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
7350 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
7351
7352 if (!CHIP_IS_E1x(bp)) {
7353 if (IS_MF_AFEX(bp)) {
7354
7355
7356
7357 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
7358 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
7359 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
7360 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
7361 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
7362 } else {
7363 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
7364 bp->path_has_ovlan ? 7 : 6);
7365 }
7366 }
7367
7368 REG_WR(bp, SRC_REG_SOFT_RST, 1);
7369
7370 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
7371
7372 if (CNIC_SUPPORT(bp)) {
7373 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
7374 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
7375 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
7376 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
7377 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
7378 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
7379 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
7380 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
7381 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
7382 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
7383 }
7384 REG_WR(bp, SRC_REG_SOFT_RST, 0);
7385
7386 if (sizeof(union cdu_context) != 1024)
7387
7388 dev_alert(&bp->pdev->dev,
7389 "please adjust the size of cdu_context(%ld)\n",
7390 (long)sizeof(union cdu_context));
7391
7392 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
7393 val = (4 << 24) + (0 << 12) + 1024;
7394 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
7395
7396 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
7397 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
7398
7399 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
7400
7401
7402 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
7403
7404 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
7405
7406 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
7407 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
7408
7409 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
7410 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
7411
7412
7413 REG_WR(bp, 0x2814, 0xffffffff);
7414 REG_WR(bp, 0x3820, 0xffffffff);
7415
7416 if (!CHIP_IS_E1x(bp)) {
7417 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
7418 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
7419 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
7420 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
7421 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
7422 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
7423 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
7424 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
7425 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
7426 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
7427 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
7428 }
7429
7430 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
7431 if (!CHIP_IS_E1(bp)) {
7432
7433 if (!CHIP_IS_E3(bp))
7434 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
7435 }
7436 if (CHIP_IS_E1H(bp))
7437
7438 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
7439
7440 if (CHIP_REV_IS_SLOW(bp))
7441 msleep(200);
7442
7443
7444 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
7445 if (val != 1) {
7446 BNX2X_ERR("CFC LL_INIT failed\n");
7447 return -EBUSY;
7448 }
7449 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
7450 if (val != 1) {
7451 BNX2X_ERR("CFC AC_INIT failed\n");
7452 return -EBUSY;
7453 }
7454 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
7455 if (val != 1) {
7456 BNX2X_ERR("CFC CAM_INIT failed\n");
7457 return -EBUSY;
7458 }
7459 REG_WR(bp, CFC_REG_DEBUG0, 0);
7460
7461 if (CHIP_IS_E1(bp)) {
7462
7463
7464 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
7465 val = *bnx2x_sp(bp, wb_data[0]);
7466
7467
7468 if ((val == 0) && bnx2x_int_mem_test(bp)) {
7469 BNX2X_ERR("internal mem self test failed\n");
7470 return -EBUSY;
7471 }
7472 }
7473
7474 bnx2x_setup_fan_failure_detection(bp);
7475
7476
7477 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
7478
7479 bnx2x_enable_blocks_attention(bp);
7480 bnx2x_enable_blocks_parity(bp);
7481
7482 if (!BP_NOMCP(bp)) {
7483 if (CHIP_IS_E1x(bp))
7484 bnx2x__common_init_phy(bp);
7485 } else
7486 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
7487
7488 if (SHMEM2_HAS(bp, netproc_fw_ver))
7489 SHMEM2_WR(bp, netproc_fw_ver, REG_RD(bp, XSEM_REG_PRAM));
7490
7491 return 0;
7492 }
7493
7494
7495
7496
7497
7498
7499 static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
7500 {
7501 int rc = bnx2x_init_hw_common(bp);
7502
7503 if (rc)
7504 return rc;
7505
7506
7507 if (!BP_NOMCP(bp))
7508 bnx2x__common_init_phy(bp);
7509
7510 return 0;
7511 }
7512
7513 static int bnx2x_init_hw_port(struct bnx2x *bp)
7514 {
7515 int port = BP_PORT(bp);
7516 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
7517 u32 low, high;
7518 u32 val, reg;
7519
7520 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
7521
7522 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7523
7524 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7525 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7526 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7527
7528
7529
7530
7531
7532
7533 if (!CHIP_IS_E1x(bp))
7534 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7535
7536 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7537 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7538 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7539 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7540
7541 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7542 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7543 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7544 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7545
7546
7547 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
7548
7549 if (CNIC_SUPPORT(bp)) {
7550 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7551 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7552 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7553 }
7554
7555 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
7556
7557 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7558
7559 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
7560
7561 if (IS_MF(bp))
7562 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7563 else if (bp->dev->mtu > 4096) {
7564 if (bp->flags & ONE_PORT_FLAG)
7565 low = 160;
7566 else {
7567 val = bp->dev->mtu;
7568
7569 low = 96 + (val/64) +
7570 ((val % 64) ? 1 : 0);
7571 }
7572 } else
7573 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7574 high = low + 56;
7575 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7576 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
7577 }
7578
7579 if (CHIP_MODE_IS_4_PORT(bp))
7580 REG_WR(bp, (BP_PORT(bp) ?
7581 BRB1_REG_MAC_GUARANTIED_1 :
7582 BRB1_REG_MAC_GUARANTIED_0), 40);
7583
7584 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7585 if (CHIP_IS_E3B0(bp)) {
7586 if (IS_MF_AFEX(bp)) {
7587
7588 REG_WR(bp, BP_PORT(bp) ?
7589 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7590 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7591 REG_WR(bp, BP_PORT(bp) ?
7592 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7593 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7594 REG_WR(bp, BP_PORT(bp) ?
7595 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7596 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7597 } else {
7598
7599
7600
7601
7602 REG_WR(bp, BP_PORT(bp) ?
7603 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7604 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7605 (bp->path_has_ovlan ? 7 : 6));
7606 }
7607 }
7608
7609 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7610 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7611 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7612 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7613
7614 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7615 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7616 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7617 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7618
7619 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7620 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7621
7622 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7623
7624 if (CHIP_IS_E1x(bp)) {
7625
7626 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
7627
7628
7629 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7630
7631 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
7632
7633
7634 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7635 udelay(50);
7636 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7637 }
7638
7639 if (CNIC_SUPPORT(bp))
7640 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7641
7642 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7643 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
7644
7645 if (CHIP_IS_E1(bp)) {
7646 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7647 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7648 }
7649 bnx2x_init_block(bp, BLOCK_HC, init_phase);
7650
7651 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
7652
7653 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
7654
7655
7656
7657
7658 val = IS_MF(bp) ? 0xF7 : 0x7;
7659
7660 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7661 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
7662
7663
7664 reg = port ? MISC_REG_AEU_ENABLE4_NIG_1 : MISC_REG_AEU_ENABLE4_NIG_0;
7665 REG_WR(bp, reg,
7666 REG_RD(bp, reg) &
7667 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7668
7669 reg = port ? MISC_REG_AEU_ENABLE4_PXP_1 : MISC_REG_AEU_ENABLE4_PXP_0;
7670 REG_WR(bp, reg,
7671 REG_RD(bp, reg) &
7672 ~AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY);
7673
7674 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7675
7676 if (!CHIP_IS_E1x(bp)) {
7677
7678
7679
7680 if (IS_MF_AFEX(bp))
7681 REG_WR(bp, BP_PORT(bp) ?
7682 NIG_REG_P1_HDRS_AFTER_BASIC :
7683 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7684 else
7685 REG_WR(bp, BP_PORT(bp) ?
7686 NIG_REG_P1_HDRS_AFTER_BASIC :
7687 NIG_REG_P0_HDRS_AFTER_BASIC,
7688 IS_MF_SD(bp) ? 7 : 6);
7689
7690 if (CHIP_IS_E3(bp))
7691 REG_WR(bp, BP_PORT(bp) ?
7692 NIG_REG_LLH1_MF_MODE :
7693 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7694 }
7695 if (!CHIP_IS_E3(bp))
7696 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
7697
7698 if (!CHIP_IS_E1(bp)) {
7699
7700 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
7701 (IS_MF_SD(bp) ? 0x1 : 0x2));
7702
7703 if (!CHIP_IS_E1x(bp)) {
7704 val = 0;
7705 switch (bp->mf_mode) {
7706 case MULTI_FUNCTION_SD:
7707 val = 1;
7708 break;
7709 case MULTI_FUNCTION_SI:
7710 case MULTI_FUNCTION_AFEX:
7711 val = 2;
7712 break;
7713 }
7714
7715 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7716 NIG_REG_LLH0_CLS_TYPE), val);
7717 }
7718 {
7719 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7720 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7721 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7722 }
7723 }
7724
7725
7726 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
7727 if (val & MISC_SPIO_SPIO5) {
7728 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7729 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7730 val = REG_RD(bp, reg_addr);
7731 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
7732 REG_WR(bp, reg_addr, val);
7733 }
7734
7735 if (CHIP_IS_E3B0(bp))
7736 bp->flags |= PTP_SUPPORTED;
7737
7738 return 0;
7739 }
7740
7741 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7742 {
7743 int reg;
7744 u32 wb_write[2];
7745
7746 if (CHIP_IS_E1(bp))
7747 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
7748 else
7749 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
7750
7751 wb_write[0] = ONCHIP_ADDR1(addr);
7752 wb_write[1] = ONCHIP_ADDR2(addr);
7753 REG_WR_DMAE(bp, reg, wb_write, 2);
7754 }
7755
7756 void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
7757 {
7758 u32 data, ctl, cnt = 100;
7759 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7760 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7761 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7762 u32 sb_bit = 1 << (idu_sb_id%32);
7763 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
7764 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7765
7766
7767 if (CHIP_INT_MODE_IS_BC(bp))
7768 return;
7769
7770 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7771 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7772 IGU_REGULAR_CLEANUP_SET |
7773 IGU_REGULAR_BCLEANUP;
7774
7775 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7776 func_encode << IGU_CTRL_REG_FID_SHIFT |
7777 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7778
7779 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7780 data, igu_addr_data);
7781 REG_WR(bp, igu_addr_data, data);
7782 barrier();
7783 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7784 ctl, igu_addr_ctl);
7785 REG_WR(bp, igu_addr_ctl, ctl);
7786 barrier();
7787
7788
7789 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7790 msleep(20);
7791
7792 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7793 DP(NETIF_MSG_HW,
7794 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7795 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7796 }
7797 }
7798
7799 static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
7800 {
7801 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true );
7802 }
7803
7804 static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
7805 {
7806 u32 i, base = FUNC_ILT_BASE(func);
7807 for (i = base; i < base + ILT_PER_FUNC; i++)
7808 bnx2x_ilt_wr(bp, i, 0);
7809 }
7810
7811 static void bnx2x_init_searcher(struct bnx2x *bp)
7812 {
7813 int port = BP_PORT(bp);
7814 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7815
7816 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7817 }
7818
7819 static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7820 {
7821 int rc;
7822 struct bnx2x_func_state_params func_params = {NULL};
7823 struct bnx2x_func_switch_update_params *switch_update_params =
7824 &func_params.params.switch_update;
7825
7826
7827 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7828 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7829
7830 func_params.f_obj = &bp->func_obj;
7831 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7832
7833
7834 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND_CHNG,
7835 &switch_update_params->changes);
7836 if (suspend)
7837 __set_bit(BNX2X_F_UPDATE_TX_SWITCH_SUSPEND,
7838 &switch_update_params->changes);
7839
7840 rc = bnx2x_func_state_change(bp, &func_params);
7841
7842 return rc;
7843 }
7844
7845 static int bnx2x_reset_nic_mode(struct bnx2x *bp)
7846 {
7847 int rc, i, port = BP_PORT(bp);
7848 int vlan_en = 0, mac_en[NUM_MACS];
7849
7850
7851 if (bp->mf_mode == SINGLE_FUNCTION) {
7852 bnx2x_set_rx_filter(&bp->link_params, 0);
7853 } else {
7854 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7855 NIG_REG_LLH0_FUNC_EN);
7856 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7857 NIG_REG_LLH0_FUNC_EN, 0);
7858 for (i = 0; i < NUM_MACS; i++) {
7859 mac_en[i] = REG_RD(bp, port ?
7860 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7861 4 * i) :
7862 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7863 4 * i));
7864 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7865 4 * i) :
7866 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7867 }
7868 }
7869
7870
7871 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7872 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7873
7874
7875
7876
7877
7878
7879 rc = bnx2x_func_switch_update(bp, 1);
7880 if (rc) {
7881 BNX2X_ERR("Can't suspend tx-switching!\n");
7882 return rc;
7883 }
7884
7885
7886 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7887
7888
7889 if (bp->mf_mode == SINGLE_FUNCTION) {
7890 bnx2x_set_rx_filter(&bp->link_params, 1);
7891 } else {
7892 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7893 NIG_REG_LLH0_FUNC_EN, vlan_en);
7894 for (i = 0; i < NUM_MACS; i++) {
7895 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7896 4 * i) :
7897 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7898 mac_en[i]);
7899 }
7900 }
7901
7902
7903 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7904 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7905
7906
7907 rc = bnx2x_func_switch_update(bp, 0);
7908 if (rc) {
7909 BNX2X_ERR("Can't resume tx-switching!\n");
7910 return rc;
7911 }
7912
7913 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7914 return 0;
7915 }
7916
7917 int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7918 {
7919 int rc;
7920
7921 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7922
7923 if (CONFIGURE_NIC_MODE(bp)) {
7924
7925 bnx2x_init_searcher(bp);
7926
7927
7928 rc = bnx2x_reset_nic_mode(bp);
7929 if (rc)
7930 BNX2X_ERR("Can't change NIC mode!\n");
7931 return rc;
7932 }
7933
7934 return 0;
7935 }
7936
7937
7938
7939
7940
7941
7942
7943
7944 static void bnx2x_clean_pglue_errors(struct bnx2x *bp)
7945 {
7946 if (!CHIP_IS_E1x(bp))
7947 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
7948 1 << BP_ABS_FUNC(bp));
7949 }
7950
7951 static int bnx2x_init_hw_func(struct bnx2x *bp)
7952 {
7953 int port = BP_PORT(bp);
7954 int func = BP_FUNC(bp);
7955 int init_phase = PHASE_PF0 + func;
7956 struct bnx2x_ilt *ilt = BP_ILT(bp);
7957 u16 cdu_ilt_start;
7958 u32 addr, val;
7959 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
7960 int i, main_mem_width, rc;
7961
7962 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
7963
7964
7965 if (!CHIP_IS_E1x(bp)) {
7966 rc = bnx2x_pf_flr_clnup(bp);
7967 if (rc) {
7968 bnx2x_fw_dump(bp);
7969 return rc;
7970 }
7971 }
7972
7973
7974 if (bp->common.int_block == INT_BLOCK_HC) {
7975 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7976 val = REG_RD(bp, addr);
7977 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7978 REG_WR(bp, addr, val);
7979 }
7980
7981 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7982 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7983
7984 ilt = BP_ILT(bp);
7985 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7986
7987 if (IS_SRIOV(bp))
7988 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7989 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7990
7991
7992
7993
7994 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
7995 for (i = 0; i < L2_ILT_LINES(bp); i++) {
7996 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
7997 ilt->lines[cdu_ilt_start + i].page_mapping =
7998 bp->context[i].cxt_mapping;
7999 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
8000 }
8001
8002 bnx2x_ilt_init_op(bp, INITOP_SET);
8003
8004 if (!CONFIGURE_NIC_MODE(bp)) {
8005 bnx2x_init_searcher(bp);
8006 REG_WR(bp, PRS_REG_NIC_MODE, 0);
8007 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
8008 } else {
8009
8010 REG_WR(bp, PRS_REG_NIC_MODE, 1);
8011 DP(NETIF_MSG_IFUP, "NIC MODE configured\n");
8012 }
8013
8014 if (!CHIP_IS_E1x(bp)) {
8015 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
8016
8017
8018
8019
8020 if (!(bp->flags & USING_MSIX_FLAG))
8021 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
8022
8023
8024
8025
8026
8027
8028 msleep(20);
8029
8030
8031
8032
8033
8034 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
8035
8036 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
8037 }
8038
8039 bp->dmae_ready = 1;
8040
8041 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
8042
8043 bnx2x_clean_pglue_errors(bp);
8044
8045 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
8046 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
8047 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
8048 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
8049 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
8050 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
8051 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
8052 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
8053 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
8054 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
8055 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
8056 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
8057 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
8058
8059 if (!CHIP_IS_E1x(bp))
8060 REG_WR(bp, QM_REG_PF_EN, 1);
8061
8062 if (!CHIP_IS_E1x(bp)) {
8063 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8064 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8065 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8066 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
8067 }
8068 bnx2x_init_block(bp, BLOCK_QM, init_phase);
8069
8070 bnx2x_init_block(bp, BLOCK_TM, init_phase);
8071 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
8072 REG_WR(bp, DORQ_REG_MODE_ACT, 1);
8073
8074 bnx2x_iov_init_dq(bp);
8075
8076 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
8077 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
8078 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
8079 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
8080 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
8081 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
8082 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
8083 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
8084 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
8085 if (!CHIP_IS_E1x(bp))
8086 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
8087
8088 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
8089
8090 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
8091
8092 if (!CHIP_IS_E1x(bp))
8093 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
8094
8095 if (IS_MF(bp)) {
8096 if (!(IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) {
8097 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port * 8, 1);
8098 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port * 8,
8099 bp->mf_ov);
8100 }
8101 }
8102
8103 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
8104
8105
8106 if (bp->common.int_block == INT_BLOCK_HC) {
8107 if (CHIP_IS_E1H(bp)) {
8108 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8109
8110 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8111 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8112 }
8113 bnx2x_init_block(bp, BLOCK_HC, init_phase);
8114
8115 } else {
8116 int num_segs, sb_idx, prod_offset;
8117
8118 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
8119
8120 if (!CHIP_IS_E1x(bp)) {
8121 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8122 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8123 }
8124
8125 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
8126
8127 if (!CHIP_IS_E1x(bp)) {
8128 int dsb_idx = 0;
8129
8130
8131
8132
8133
8134
8135
8136
8137
8138
8139
8140
8141
8142
8143
8144
8145
8146
8147
8148
8149
8150 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8151 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
8152 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
8153 prod_offset = (bp->igu_base_sb + sb_idx) *
8154 num_segs;
8155
8156 for (i = 0; i < num_segs; i++) {
8157 addr = IGU_REG_PROD_CONS_MEMORY +
8158 (prod_offset + i) * 4;
8159 REG_WR(bp, addr, 0);
8160 }
8161
8162 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
8163 USTORM_ID, 0, IGU_INT_NOP, 1);
8164 bnx2x_igu_clear_sb(bp,
8165 bp->igu_base_sb + sb_idx);
8166 }
8167
8168
8169 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
8170 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
8171
8172 if (CHIP_MODE_IS_4_PORT(bp))
8173 dsb_idx = BP_FUNC(bp);
8174 else
8175 dsb_idx = BP_VN(bp);
8176
8177 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
8178 IGU_BC_BASE_DSB_PROD + dsb_idx :
8179 IGU_NORM_BASE_DSB_PROD + dsb_idx);
8180
8181
8182
8183
8184
8185 for (i = 0; i < (num_segs * E1HVN_MAX);
8186 i += E1HVN_MAX) {
8187 addr = IGU_REG_PROD_CONS_MEMORY +
8188 (prod_offset + i)*4;
8189 REG_WR(bp, addr, 0);
8190 }
8191
8192 if (CHIP_INT_MODE_IS_BC(bp)) {
8193 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8194 USTORM_ID, 0, IGU_INT_NOP, 1);
8195 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8196 CSTORM_ID, 0, IGU_INT_NOP, 1);
8197 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8198 XSTORM_ID, 0, IGU_INT_NOP, 1);
8199 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8200 TSTORM_ID, 0, IGU_INT_NOP, 1);
8201 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8202 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8203 } else {
8204 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8205 USTORM_ID, 0, IGU_INT_NOP, 1);
8206 bnx2x_ack_sb(bp, bp->igu_dsb_id,
8207 ATTENTION_ID, 0, IGU_INT_NOP, 1);
8208 }
8209 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
8210
8211
8212
8213 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
8214 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
8215 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
8216 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
8217 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
8218 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
8219 }
8220 }
8221
8222
8223 REG_WR(bp, 0x2114, 0xffffffff);
8224 REG_WR(bp, 0x2120, 0xffffffff);
8225
8226 if (CHIP_IS_E1x(bp)) {
8227 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2;
8228 main_mem_base = HC_REG_MAIN_MEMORY +
8229 BP_PORT(bp) * (main_mem_size * 4);
8230 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
8231 main_mem_width = 8;
8232
8233 val = REG_RD(bp, main_mem_prty_clr);
8234 if (val)
8235 DP(NETIF_MSG_HW,
8236 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
8237 val);
8238
8239
8240 for (i = main_mem_base;
8241 i < main_mem_base + main_mem_size * 4;
8242 i += main_mem_width) {
8243 bnx2x_read_dmae(bp, i, main_mem_width / 4);
8244 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
8245 i, main_mem_width / 4);
8246 }
8247
8248 REG_RD(bp, main_mem_prty_clr);
8249 }
8250
8251 #ifdef BNX2X_STOP_ON_ERROR
8252
8253 REG_WR8(bp, BAR_USTRORM_INTMEM +
8254 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8255 REG_WR8(bp, BAR_TSTRORM_INTMEM +
8256 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8257 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8258 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8259 REG_WR8(bp, BAR_XSTRORM_INTMEM +
8260 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
8261 #endif
8262
8263 bnx2x_phy_probe(&bp->link_params);
8264
8265 return 0;
8266 }
8267
8268 void bnx2x_free_mem_cnic(struct bnx2x *bp)
8269 {
8270 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
8271
8272 if (!CHIP_IS_E1x(bp))
8273 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
8274 sizeof(struct host_hc_status_block_e2));
8275 else
8276 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
8277 sizeof(struct host_hc_status_block_e1x));
8278
8279 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8280 }
8281
8282 void bnx2x_free_mem(struct bnx2x *bp)
8283 {
8284 int i;
8285
8286 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
8287 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
8288
8289 if (IS_VF(bp))
8290 return;
8291
8292 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
8293 sizeof(struct host_sp_status_block));
8294
8295 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
8296 sizeof(struct bnx2x_slowpath));
8297
8298 for (i = 0; i < L2_ILT_LINES(bp); i++)
8299 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
8300 bp->context[i].size);
8301 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
8302
8303 BNX2X_FREE(bp->ilt->lines);
8304
8305 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
8306
8307 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
8308 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8309
8310 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
8311
8312 bnx2x_iov_free_mem(bp);
8313 }
8314
8315 int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
8316 {
8317 if (!CHIP_IS_E1x(bp)) {
8318
8319 bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8320 sizeof(struct host_hc_status_block_e2));
8321 if (!bp->cnic_sb.e2_sb)
8322 goto alloc_mem_err;
8323 } else {
8324 bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
8325 sizeof(struct host_hc_status_block_e1x));
8326 if (!bp->cnic_sb.e1x_sb)
8327 goto alloc_mem_err;
8328 }
8329
8330 if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8331
8332 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8333 if (!bp->t2)
8334 goto alloc_mem_err;
8335 }
8336
8337
8338 bp->cnic_eth_dev.addr_drv_info_to_mcp =
8339 &bp->slowpath->drv_info_to_mcp;
8340
8341 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
8342 goto alloc_mem_err;
8343
8344 return 0;
8345
8346 alloc_mem_err:
8347 bnx2x_free_mem_cnic(bp);
8348 BNX2X_ERR("Can't allocate memory\n");
8349 return -ENOMEM;
8350 }
8351
8352 int bnx2x_alloc_mem(struct bnx2x *bp)
8353 {
8354 int i, allocated, context_size;
8355
8356 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
8357
8358 bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
8359 if (!bp->t2)
8360 goto alloc_mem_err;
8361 }
8362
8363 bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
8364 sizeof(struct host_sp_status_block));
8365 if (!bp->def_status_blk)
8366 goto alloc_mem_err;
8367
8368 bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
8369 sizeof(struct bnx2x_slowpath));
8370 if (!bp->slowpath)
8371 goto alloc_mem_err;
8372
8373
8374
8375
8376
8377
8378
8379
8380
8381
8382
8383
8384
8385
8386 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
8387
8388 for (i = 0, allocated = 0; allocated < context_size; i++) {
8389 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
8390 (context_size - allocated));
8391 bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
8392 bp->context[i].size);
8393 if (!bp->context[i].vcxt)
8394 goto alloc_mem_err;
8395 allocated += bp->context[i].size;
8396 }
8397 bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
8398 GFP_KERNEL);
8399 if (!bp->ilt->lines)
8400 goto alloc_mem_err;
8401
8402 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
8403 goto alloc_mem_err;
8404
8405 if (bnx2x_iov_alloc_mem(bp))
8406 goto alloc_mem_err;
8407
8408
8409 bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
8410 if (!bp->spq)
8411 goto alloc_mem_err;
8412
8413
8414 bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
8415 BCM_PAGE_SIZE * NUM_EQ_PAGES);
8416 if (!bp->eq_ring)
8417 goto alloc_mem_err;
8418
8419 return 0;
8420
8421 alloc_mem_err:
8422 bnx2x_free_mem(bp);
8423 BNX2X_ERR("Can't allocate memory\n");
8424 return -ENOMEM;
8425 }
8426
8427
8428
8429
8430
8431 int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
8432 struct bnx2x_vlan_mac_obj *obj, bool set,
8433 int mac_type, unsigned long *ramrod_flags)
8434 {
8435 int rc;
8436 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8437
8438 memset(&ramrod_param, 0, sizeof(ramrod_param));
8439
8440
8441 ramrod_param.vlan_mac_obj = obj;
8442 ramrod_param.ramrod_flags = *ramrod_flags;
8443
8444
8445 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8446 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
8447
8448 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
8449
8450
8451 if (set)
8452 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8453 else
8454 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8455 }
8456
8457 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8458
8459 if (rc == -EEXIST) {
8460 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8461
8462 rc = 0;
8463 } else if (rc < 0)
8464 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
8465
8466 return rc;
8467 }
8468
8469 int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
8470 struct bnx2x_vlan_mac_obj *obj, bool set,
8471 unsigned long *ramrod_flags)
8472 {
8473 int rc;
8474 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
8475
8476 memset(&ramrod_param, 0, sizeof(ramrod_param));
8477
8478
8479 ramrod_param.vlan_mac_obj = obj;
8480 ramrod_param.ramrod_flags = *ramrod_flags;
8481
8482
8483 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
8484 ramrod_param.user_req.u.vlan.vlan = vlan;
8485 __set_bit(BNX2X_VLAN, &ramrod_param.user_req.vlan_mac_flags);
8486
8487 if (set)
8488 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
8489 else
8490 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
8491 }
8492
8493 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
8494
8495 if (rc == -EEXIST) {
8496
8497 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
8498 rc = 0;
8499 } else if (rc < 0) {
8500 BNX2X_ERR("%s VLAN failed\n", (set ? "Set" : "Del"));
8501 }
8502
8503 return rc;
8504 }
8505
8506 void bnx2x_clear_vlan_info(struct bnx2x *bp)
8507 {
8508 struct bnx2x_vlan_entry *vlan;
8509
8510
8511 list_for_each_entry(vlan, &bp->vlan_reg, link)
8512 vlan->hw = false;
8513
8514 bp->vlan_cnt = 0;
8515 }
8516
8517 static int bnx2x_del_all_vlans(struct bnx2x *bp)
8518 {
8519 struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
8520 unsigned long ramrod_flags = 0, vlan_flags = 0;
8521 int rc;
8522
8523 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8524 __set_bit(BNX2X_VLAN, &vlan_flags);
8525 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_flags, &ramrod_flags);
8526 if (rc)
8527 return rc;
8528
8529 bnx2x_clear_vlan_info(bp);
8530
8531 return 0;
8532 }
8533
8534 int bnx2x_del_all_macs(struct bnx2x *bp,
8535 struct bnx2x_vlan_mac_obj *mac_obj,
8536 int mac_type, bool wait_for_comp)
8537 {
8538 int rc;
8539 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
8540
8541
8542 if (wait_for_comp)
8543 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8544
8545
8546 __set_bit(mac_type, &vlan_mac_flags);
8547
8548 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
8549 if (rc < 0)
8550 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
8551
8552 return rc;
8553 }
8554
8555 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
8556 {
8557 if (IS_PF(bp)) {
8558 unsigned long ramrod_flags = 0;
8559
8560 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
8561 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
8562 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
8563 &bp->sp_objs->mac_obj, set,
8564 BNX2X_ETH_MAC, &ramrod_flags);
8565 } else {
8566 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
8567 bp->fp->index, set);
8568 }
8569 }
8570
8571 int bnx2x_setup_leading(struct bnx2x *bp)
8572 {
8573 if (IS_PF(bp))
8574 return bnx2x_setup_queue(bp, &bp->fp[0], true);
8575 else
8576 return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
8577 }
8578
8579
8580
8581
8582
8583
8584
8585
8586 int bnx2x_set_int_mode(struct bnx2x *bp)
8587 {
8588 int rc = 0;
8589
8590 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
8591 BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
8592 return -EINVAL;
8593 }
8594
8595 switch (int_mode) {
8596 case BNX2X_INT_MODE_MSIX:
8597
8598 rc = bnx2x_enable_msix(bp);
8599
8600
8601 if (!rc)
8602 return 0;
8603
8604
8605 if (rc && IS_VF(bp))
8606 return rc;
8607
8608
8609 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
8610 bp->num_queues,
8611 1 + bp->num_cnic_queues);
8612
8613 fallthrough;
8614 case BNX2X_INT_MODE_MSI:
8615 bnx2x_enable_msi(bp);
8616
8617 fallthrough;
8618 case BNX2X_INT_MODE_INTX:
8619 bp->num_ethernet_queues = 1;
8620 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
8621 BNX2X_DEV_INFO("set number of queues to 1\n");
8622 break;
8623 default:
8624 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8625 return -EINVAL;
8626 }
8627 return 0;
8628 }
8629
8630
8631 static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8632 {
8633 if (IS_SRIOV(bp))
8634 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
8635 return L2_ILT_LINES(bp);
8636 }
8637
8638 void bnx2x_ilt_set_info(struct bnx2x *bp)
8639 {
8640 struct ilt_client_info *ilt_client;
8641 struct bnx2x_ilt *ilt = BP_ILT(bp);
8642 u16 line = 0;
8643
8644 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8645 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8646
8647
8648 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8649 ilt_client->client_num = ILT_CLIENT_CDU;
8650 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8651 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8652 ilt_client->start = line;
8653 line += bnx2x_cid_ilt_lines(bp);
8654
8655 if (CNIC_SUPPORT(bp))
8656 line += CNIC_ILT_LINES;
8657 ilt_client->end = line - 1;
8658
8659 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8660 ilt_client->start,
8661 ilt_client->end,
8662 ilt_client->page_size,
8663 ilt_client->flags,
8664 ilog2(ilt_client->page_size >> 12));
8665
8666
8667 if (QM_INIT(bp->qm_cid_count)) {
8668 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8669 ilt_client->client_num = ILT_CLIENT_QM;
8670 ilt_client->page_size = QM_ILT_PAGE_SZ;
8671 ilt_client->flags = 0;
8672 ilt_client->start = line;
8673
8674
8675 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8676 QM_ILT_PAGE_SZ);
8677
8678 ilt_client->end = line - 1;
8679
8680 DP(NETIF_MSG_IFUP,
8681 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8682 ilt_client->start,
8683 ilt_client->end,
8684 ilt_client->page_size,
8685 ilt_client->flags,
8686 ilog2(ilt_client->page_size >> 12));
8687 }
8688
8689 if (CNIC_SUPPORT(bp)) {
8690
8691 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8692 ilt_client->client_num = ILT_CLIENT_SRC;
8693 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8694 ilt_client->flags = 0;
8695 ilt_client->start = line;
8696 line += SRC_ILT_LINES;
8697 ilt_client->end = line - 1;
8698
8699 DP(NETIF_MSG_IFUP,
8700 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8701 ilt_client->start,
8702 ilt_client->end,
8703 ilt_client->page_size,
8704 ilt_client->flags,
8705 ilog2(ilt_client->page_size >> 12));
8706
8707
8708 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8709 ilt_client->client_num = ILT_CLIENT_TM;
8710 ilt_client->page_size = TM_ILT_PAGE_SZ;
8711 ilt_client->flags = 0;
8712 ilt_client->start = line;
8713 line += TM_ILT_LINES;
8714 ilt_client->end = line - 1;
8715
8716 DP(NETIF_MSG_IFUP,
8717 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8718 ilt_client->start,
8719 ilt_client->end,
8720 ilt_client->page_size,
8721 ilt_client->flags,
8722 ilog2(ilt_client->page_size >> 12));
8723 }
8724
8725 BUG_ON(line > ILT_MAX_LINES);
8726 }
8727
8728
8729
8730
8731
8732
8733
8734
8735
8736
8737
8738
8739 static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
8740 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
8741 {
8742 u8 cos;
8743 int cxt_index, cxt_offset;
8744
8745
8746 if (!IS_FCOE_FP(fp)) {
8747 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8748 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8749
8750
8751
8752
8753 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8754 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8755
8756
8757 init_params->rx.hc_rate = bp->rx_ticks ?
8758 (1000000 / bp->rx_ticks) : 0;
8759 init_params->tx.hc_rate = bp->tx_ticks ?
8760 (1000000 / bp->tx_ticks) : 0;
8761
8762
8763 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8764 fp->fw_sb_id;
8765
8766
8767
8768
8769
8770 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8771 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
8772 }
8773
8774
8775 init_params->max_cos = fp->max_cos;
8776
8777 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
8778 fp->index, init_params->max_cos);
8779
8780
8781 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
8782 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8783 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
8784 ILT_PAGE_CIDS);
8785 init_params->cxts[cos] =
8786 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8787 }
8788 }
8789
8790 static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8791 struct bnx2x_queue_state_params *q_params,
8792 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8793 int tx_index, bool leading)
8794 {
8795 memset(tx_only_params, 0, sizeof(*tx_only_params));
8796
8797
8798 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8799
8800
8801 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8802
8803
8804 tx_only_params->cid_index = tx_index;
8805
8806
8807 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8808
8809
8810 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8811
8812 DP(NETIF_MSG_IFUP,
8813 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
8814 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8815 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8816 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8817
8818
8819 return bnx2x_queue_state_change(bp, q_params);
8820 }
8821
8822
8823
8824
8825
8826
8827
8828
8829
8830
8831
8832
8833 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8834 bool leading)
8835 {
8836 struct bnx2x_queue_state_params q_params = {NULL};
8837 struct bnx2x_queue_setup_params *setup_params =
8838 &q_params.params.setup;
8839 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8840 &q_params.params.tx_only;
8841 int rc;
8842 u8 tx_index;
8843
8844 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
8845
8846
8847 if (!IS_FCOE_FP(fp))
8848 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
8849 IGU_INT_ENABLE, 0);
8850
8851 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8852
8853 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8854
8855
8856 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
8857
8858
8859 q_params.cmd = BNX2X_Q_CMD_INIT;
8860
8861
8862 rc = bnx2x_queue_state_change(bp, &q_params);
8863 if (rc) {
8864 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
8865 return rc;
8866 }
8867
8868 DP(NETIF_MSG_IFUP, "init complete\n");
8869
8870
8871 memset(setup_params, 0, sizeof(*setup_params));
8872
8873
8874 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
8875
8876
8877 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8878 FIRST_TX_COS_INDEX);
8879
8880 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
8881 &setup_params->rxq_params);
8882
8883 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8884 FIRST_TX_COS_INDEX);
8885
8886
8887 q_params.cmd = BNX2X_Q_CMD_SETUP;
8888
8889 if (IS_FCOE_FP(fp))
8890 bp->fcoe_init = true;
8891
8892
8893 rc = bnx2x_queue_state_change(bp, &q_params);
8894 if (rc) {
8895 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8896 return rc;
8897 }
8898
8899
8900 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8901 tx_index < fp->max_cos;
8902 tx_index++) {
8903
8904
8905 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8906 tx_only_params, tx_index, leading);
8907 if (rc) {
8908 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8909 fp->index, tx_index);
8910 return rc;
8911 }
8912 }
8913
8914 return rc;
8915 }
8916
8917 static int bnx2x_stop_queue(struct bnx2x *bp, int index)
8918 {
8919 struct bnx2x_fastpath *fp = &bp->fp[index];
8920 struct bnx2x_fp_txdata *txdata;
8921 struct bnx2x_queue_state_params q_params = {NULL};
8922 int rc, tx_index;
8923
8924 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
8925
8926 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
8927
8928 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
8929
8930
8931 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8932 tx_index < fp->max_cos;
8933 tx_index++){
8934
8935
8936 txdata = fp->txdata_ptr[tx_index];
8937
8938 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
8939 txdata->txq_index);
8940
8941
8942 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8943 memset(&q_params.params.terminate, 0,
8944 sizeof(q_params.params.terminate));
8945 q_params.params.terminate.cid_index = tx_index;
8946
8947 rc = bnx2x_queue_state_change(bp, &q_params);
8948 if (rc)
8949 return rc;
8950
8951
8952 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8953 memset(&q_params.params.cfc_del, 0,
8954 sizeof(q_params.params.cfc_del));
8955 q_params.params.cfc_del.cid_index = tx_index;
8956 rc = bnx2x_queue_state_change(bp, &q_params);
8957 if (rc)
8958 return rc;
8959 }
8960
8961
8962 q_params.cmd = BNX2X_Q_CMD_HALT;
8963 rc = bnx2x_queue_state_change(bp, &q_params);
8964 if (rc)
8965 return rc;
8966
8967
8968 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8969 memset(&q_params.params.terminate, 0,
8970 sizeof(q_params.params.terminate));
8971 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
8972 rc = bnx2x_queue_state_change(bp, &q_params);
8973 if (rc)
8974 return rc;
8975
8976 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8977 memset(&q_params.params.cfc_del, 0,
8978 sizeof(q_params.params.cfc_del));
8979 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
8980 return bnx2x_queue_state_change(bp, &q_params);
8981 }
8982
8983 static void bnx2x_reset_func(struct bnx2x *bp)
8984 {
8985 int port = BP_PORT(bp);
8986 int func = BP_FUNC(bp);
8987 int i;
8988
8989
8990 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8991 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8992 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8993 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8994
8995
8996 for_each_eth_queue(bp, i) {
8997 struct bnx2x_fastpath *fp = &bp->fp[i];
8998 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8999 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
9000 SB_DISABLED);
9001 }
9002
9003 if (CNIC_LOADED(bp))
9004
9005 REG_WR8(bp, BAR_CSTRORM_INTMEM +
9006 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
9007 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
9008
9009
9010 REG_WR8(bp, BAR_CSTRORM_INTMEM +
9011 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
9012 SB_DISABLED);
9013
9014 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
9015 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
9016 0);
9017
9018
9019 if (bp->common.int_block == INT_BLOCK_HC) {
9020 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
9021 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
9022 } else {
9023 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
9024 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
9025 }
9026
9027 if (CNIC_LOADED(bp)) {
9028
9029 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
9030
9031
9032
9033
9034 for (i = 0; i < 200; i++) {
9035 usleep_range(10000, 20000);
9036 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
9037 break;
9038 }
9039 }
9040
9041 bnx2x_clear_func_ilt(bp, func);
9042
9043
9044
9045
9046 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
9047 struct ilt_client_info ilt_cli;
9048
9049 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
9050 ilt_cli.start = 0;
9051 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
9052 ilt_cli.client_num = ILT_CLIENT_TM;
9053
9054 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
9055 }
9056
9057
9058 if (!CHIP_IS_E1x(bp))
9059 bnx2x_pf_disable(bp);
9060
9061 bp->dmae_ready = 0;
9062 }
9063
9064 static void bnx2x_reset_port(struct bnx2x *bp)
9065 {
9066 int port = BP_PORT(bp);
9067 u32 val;
9068
9069
9070 bnx2x__link_reset(bp);
9071
9072 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
9073
9074
9075 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
9076
9077 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
9078 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9079
9080
9081 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
9082
9083 msleep(100);
9084
9085 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
9086 if (val)
9087 DP(NETIF_MSG_IFDOWN,
9088 "BRB1 is not empty %d blocks are occupied\n", val);
9089
9090
9091 }
9092
9093 static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
9094 {
9095 struct bnx2x_func_state_params func_params = {NULL};
9096
9097
9098 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9099
9100 func_params.f_obj = &bp->func_obj;
9101 func_params.cmd = BNX2X_F_CMD_HW_RESET;
9102
9103 func_params.params.hw_init.load_phase = load_code;
9104
9105 return bnx2x_func_state_change(bp, &func_params);
9106 }
9107
9108 static int bnx2x_func_stop(struct bnx2x *bp)
9109 {
9110 struct bnx2x_func_state_params func_params = {NULL};
9111 int rc;
9112
9113
9114 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
9115 func_params.f_obj = &bp->func_obj;
9116 func_params.cmd = BNX2X_F_CMD_STOP;
9117
9118
9119
9120
9121
9122
9123
9124 rc = bnx2x_func_state_change(bp, &func_params);
9125 if (rc) {
9126 #ifdef BNX2X_STOP_ON_ERROR
9127 return rc;
9128 #else
9129 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
9130 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
9131 return bnx2x_func_state_change(bp, &func_params);
9132 #endif
9133 }
9134
9135 return 0;
9136 }
9137
9138
9139
9140
9141
9142
9143
9144
9145
9146 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
9147 {
9148 u32 reset_code = 0;
9149 int port = BP_PORT(bp);
9150
9151
9152 if (unload_mode == UNLOAD_NORMAL)
9153 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9154
9155 else if (bp->flags & NO_WOL_FLAG)
9156 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
9157
9158 else if (bp->wol) {
9159 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
9160 const u8 *mac_addr = bp->dev->dev_addr;
9161 struct pci_dev *pdev = bp->pdev;
9162 u32 val;
9163 u16 pmc;
9164
9165
9166
9167
9168 u8 entry = (BP_VN(bp) + 1)*8;
9169
9170 val = (mac_addr[0] << 8) | mac_addr[1];
9171 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
9172
9173 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
9174 (mac_addr[4] << 8) | mac_addr[5];
9175 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
9176
9177
9178 pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc);
9179 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
9180 pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc);
9181
9182 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
9183
9184 } else
9185 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9186
9187
9188 if (!BP_NOMCP(bp))
9189 reset_code = bnx2x_fw_command(bp, reset_code, 0);
9190 else {
9191 int path = BP_PATH(bp);
9192
9193 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
9194 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9195 bnx2x_load_count[path][2]);
9196 bnx2x_load_count[path][0]--;
9197 bnx2x_load_count[path][1 + port]--;
9198 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
9199 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
9200 bnx2x_load_count[path][2]);
9201 if (bnx2x_load_count[path][0] == 0)
9202 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
9203 else if (bnx2x_load_count[path][1 + port] == 0)
9204 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
9205 else
9206 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
9207 }
9208
9209 return reset_code;
9210 }
9211
9212
9213
9214
9215
9216
9217
9218 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
9219 {
9220 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
9221
9222
9223 if (!BP_NOMCP(bp))
9224 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
9225 }
9226
9227 static int bnx2x_func_wait_started(struct bnx2x *bp)
9228 {
9229 int tout = 50;
9230 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
9231
9232 if (!bp->port.pmf)
9233 return 0;
9234
9235
9236
9237
9238
9239
9240
9241
9242
9243
9244
9245
9246
9247
9248
9249
9250 if (msix)
9251 synchronize_irq(bp->msix_table[0].vector);
9252 else
9253 synchronize_irq(bp->pdev->irq);
9254
9255 flush_workqueue(bnx2x_wq);
9256 flush_workqueue(bnx2x_iov_wq);
9257
9258 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
9259 BNX2X_F_STATE_STARTED && tout--)
9260 msleep(20);
9261
9262 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
9263 BNX2X_F_STATE_STARTED) {
9264 #ifdef BNX2X_STOP_ON_ERROR
9265 BNX2X_ERR("Wrong function state\n");
9266 return -EBUSY;
9267 #else
9268
9269
9270
9271
9272 struct bnx2x_func_state_params func_params = {NULL};
9273
9274 DP(NETIF_MSG_IFDOWN,
9275 "Hmmm... Unexpected function state! Forcing STARTED-->TX_STOPPED-->STARTED\n");
9276
9277 func_params.f_obj = &bp->func_obj;
9278 __set_bit(RAMROD_DRV_CLR_ONLY,
9279 &func_params.ramrod_flags);
9280
9281
9282 func_params.cmd = BNX2X_F_CMD_TX_STOP;
9283 bnx2x_func_state_change(bp, &func_params);
9284
9285
9286 func_params.cmd = BNX2X_F_CMD_TX_START;
9287 return bnx2x_func_state_change(bp, &func_params);
9288 #endif
9289 }
9290
9291 return 0;
9292 }
9293
9294 static void bnx2x_disable_ptp(struct bnx2x *bp)
9295 {
9296 int port = BP_PORT(bp);
9297
9298
9299 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
9300 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
9301
9302
9303 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
9304 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
9305 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
9306 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
9307 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
9308 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
9309 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
9310 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
9311
9312
9313 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
9314 NIG_REG_P0_PTP_EN, 0x0);
9315 }
9316
9317
9318 static void bnx2x_stop_ptp(struct bnx2x *bp)
9319 {
9320
9321
9322
9323 cancel_work_sync(&bp->ptp_task);
9324
9325 if (bp->ptp_tx_skb) {
9326 dev_kfree_skb_any(bp->ptp_tx_skb);
9327 bp->ptp_tx_skb = NULL;
9328 }
9329
9330
9331 bnx2x_disable_ptp(bp);
9332
9333 DP(BNX2X_MSG_PTP, "PTP stop ended successfully\n");
9334 }
9335
9336 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
9337 {
9338 int port = BP_PORT(bp);
9339 int i, rc = 0;
9340 u8 cos;
9341 struct bnx2x_mcast_ramrod_params rparam = {NULL};
9342 u32 reset_code;
9343
9344
9345 for_each_tx_queue(bp, i) {
9346 struct bnx2x_fastpath *fp = &bp->fp[i];
9347
9348 for_each_cos_in_tx_queue(fp, cos)
9349 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
9350 #ifdef BNX2X_STOP_ON_ERROR
9351 if (rc)
9352 return;
9353 #endif
9354 }
9355
9356
9357 usleep_range(1000, 2000);
9358
9359
9360 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
9361 false);
9362 if (rc < 0)
9363 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
9364
9365
9366 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
9367 true);
9368 if (rc < 0)
9369 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
9370 rc);
9371
9372
9373
9374
9375
9376 if (!CHIP_IS_E1x(bp)) {
9377
9378 rc = bnx2x_del_all_vlans(bp);
9379 if (rc < 0)
9380 BNX2X_ERR("Failed to delete all VLANs\n");
9381 }
9382
9383
9384 if (!CHIP_IS_E1(bp))
9385 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
9386
9387
9388
9389
9390
9391 netif_addr_lock_bh(bp->dev);
9392
9393 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
9394 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
9395 else if (bp->slowpath)
9396 bnx2x_set_storm_rx_mode(bp);
9397
9398
9399 rparam.mcast_obj = &bp->mcast_obj;
9400 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
9401 if (rc < 0)
9402 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
9403
9404 netif_addr_unlock_bh(bp->dev);
9405
9406 bnx2x_iov_chip_cleanup(bp);
9407
9408
9409
9410
9411
9412
9413 reset_code = bnx2x_send_unload_req(bp, unload_mode);
9414
9415
9416
9417
9418
9419 rc = bnx2x_func_wait_started(bp);
9420 if (rc) {
9421 BNX2X_ERR("bnx2x_func_wait_started failed\n");
9422 #ifdef BNX2X_STOP_ON_ERROR
9423 return;
9424 #endif
9425 }
9426
9427
9428
9429
9430 for_each_eth_queue(bp, i)
9431 if (bnx2x_stop_queue(bp, i))
9432 #ifdef BNX2X_STOP_ON_ERROR
9433 return;
9434 #else
9435 goto unload_error;
9436 #endif
9437
9438 if (CNIC_LOADED(bp)) {
9439 for_each_cnic_queue(bp, i)
9440 if (bnx2x_stop_queue(bp, i))
9441 #ifdef BNX2X_STOP_ON_ERROR
9442 return;
9443 #else
9444 goto unload_error;
9445 #endif
9446 }
9447
9448
9449
9450
9451 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
9452 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
9453
9454 #ifndef BNX2X_STOP_ON_ERROR
9455 unload_error:
9456 #endif
9457 rc = bnx2x_func_stop(bp);
9458 if (rc) {
9459 BNX2X_ERR("Function stop failed!\n");
9460 #ifdef BNX2X_STOP_ON_ERROR
9461 return;
9462 #endif
9463 }
9464
9465
9466
9467
9468
9469
9470 if (bp->flags & PTP_SUPPORTED) {
9471 bnx2x_stop_ptp(bp);
9472 if (bp->ptp_clock) {
9473 ptp_clock_unregister(bp->ptp_clock);
9474 bp->ptp_clock = NULL;
9475 }
9476 }
9477
9478
9479 bnx2x_netif_stop(bp, 1);
9480
9481 bnx2x_del_all_napi(bp);
9482 if (CNIC_LOADED(bp))
9483 bnx2x_del_all_napi_cnic(bp);
9484
9485
9486 bnx2x_free_irq(bp);
9487
9488
9489
9490
9491
9492
9493 if (!pci_channel_offline(bp->pdev)) {
9494 rc = bnx2x_reset_hw(bp, reset_code);
9495 if (rc)
9496 BNX2X_ERR("HW_RESET failed\n");
9497 }
9498
9499
9500 bnx2x_send_unload_done(bp, keep_link);
9501 }
9502
9503 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
9504 {
9505 u32 val;
9506
9507 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
9508
9509 if (CHIP_IS_E1(bp)) {
9510 int port = BP_PORT(bp);
9511 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9512 MISC_REG_AEU_MASK_ATTN_FUNC_0;
9513
9514 val = REG_RD(bp, addr);
9515 val &= ~(0x300);
9516 REG_WR(bp, addr, val);
9517 } else {
9518 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
9519 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
9520 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
9521 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
9522 }
9523 }
9524
9525
9526 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
9527 {
9528 u32 val;
9529
9530
9531 if (!CHIP_IS_E1(bp)) {
9532
9533 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
9534
9535 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
9536 }
9537
9538
9539 if (CHIP_IS_E1x(bp)) {
9540
9541 val = REG_RD(bp, HC_REG_CONFIG_1);
9542 REG_WR(bp, HC_REG_CONFIG_1,
9543 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
9544 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
9545
9546 val = REG_RD(bp, HC_REG_CONFIG_0);
9547 REG_WR(bp, HC_REG_CONFIG_0,
9548 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
9549 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
9550 } else {
9551
9552 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
9553
9554 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
9555 (!close) ?
9556 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
9557 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
9558 }
9559
9560 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
9561 close ? "closing" : "opening");
9562 }
9563
9564 #define SHARED_MF_CLP_MAGIC 0x80000000
9565
9566 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
9567 {
9568
9569 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9570 *magic_val = val & SHARED_MF_CLP_MAGIC;
9571 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
9572 }
9573
9574
9575
9576
9577
9578
9579
9580 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
9581 {
9582
9583 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
9584 MF_CFG_WR(bp, shared_mf_config.clp_mb,
9585 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
9586 }
9587
9588
9589
9590
9591
9592
9593
9594
9595
9596 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
9597 {
9598 u32 shmem;
9599 u32 validity_offset;
9600
9601 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
9602
9603
9604 if (!CHIP_IS_E1(bp))
9605 bnx2x_clp_reset_prep(bp, magic_val);
9606
9607
9608 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9609 validity_offset =
9610 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
9611
9612
9613 if (shmem > 0)
9614 REG_WR(bp, shmem + validity_offset, 0);
9615 }
9616
9617 #define MCP_TIMEOUT 5000
9618 #define MCP_ONE_TIMEOUT 100
9619
9620
9621
9622
9623
9624
9625 static void bnx2x_mcp_wait_one(struct bnx2x *bp)
9626 {
9627
9628
9629 if (CHIP_REV_IS_SLOW(bp))
9630 msleep(MCP_ONE_TIMEOUT*10);
9631 else
9632 msleep(MCP_ONE_TIMEOUT);
9633 }
9634
9635
9636
9637
9638 static int bnx2x_init_shmem(struct bnx2x *bp)
9639 {
9640 int cnt = 0;
9641 u32 val = 0;
9642
9643 do {
9644 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9645
9646
9647
9648
9649 if (bp->common.shmem_base == 0xFFFFFFFF) {
9650 bp->flags |= NO_MCP_FLAG;
9651 return -ENODEV;
9652 }
9653
9654 if (bp->common.shmem_base) {
9655 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9656 if (val & SHR_MEM_VALIDITY_MB)
9657 return 0;
9658 }
9659
9660 bnx2x_mcp_wait_one(bp);
9661
9662 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
9663
9664 BNX2X_ERR("BAD MCP validity signature\n");
9665
9666 return -ENODEV;
9667 }
9668
9669 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
9670 {
9671 int rc = bnx2x_init_shmem(bp);
9672
9673
9674 if (!CHIP_IS_E1(bp))
9675 bnx2x_clp_reset_done(bp, magic_val);
9676
9677 return rc;
9678 }
9679
9680 static void bnx2x_pxp_prep(struct bnx2x *bp)
9681 {
9682 if (!CHIP_IS_E1(bp)) {
9683 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
9684 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
9685 }
9686 }
9687
9688
9689
9690
9691
9692
9693
9694
9695
9696
9697
9698 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
9699 {
9700 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
9701 u32 global_bits2, stay_reset2;
9702
9703
9704
9705
9706
9707 global_bits2 =
9708 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9709 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
9710
9711
9712
9713
9714
9715
9716 not_reset_mask1 =
9717 MISC_REGISTERS_RESET_REG_1_RST_HC |
9718 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9719 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9720
9721 not_reset_mask2 =
9722 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
9723 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9724 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9725 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9726 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9727 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9728 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
9729 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9730 MISC_REGISTERS_RESET_REG_2_RST_ATC |
9731 MISC_REGISTERS_RESET_REG_2_PGLC |
9732 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9733 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9734 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9735 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9736 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9737 MISC_REGISTERS_RESET_REG_2_UMAC1;
9738
9739
9740
9741
9742
9743 stay_reset2 =
9744 MISC_REGISTERS_RESET_REG_2_XMAC |
9745 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9746
9747
9748 reset_mask1 = 0xffffffff;
9749
9750 if (CHIP_IS_E1(bp))
9751 reset_mask2 = 0xffff;
9752 else if (CHIP_IS_E1H(bp))
9753 reset_mask2 = 0x1ffff;
9754 else if (CHIP_IS_E2(bp))
9755 reset_mask2 = 0xfffff;
9756 else
9757 reset_mask2 = 0x3ffffff;
9758
9759
9760 if (!global)
9761 reset_mask2 &= ~global_bits2;
9762
9763
9764
9765
9766
9767
9768
9769
9770
9771
9772
9773
9774
9775
9776
9777 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9778 reset_mask2 & (~not_reset_mask2));
9779
9780 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9781 reset_mask1 & (~not_reset_mask1));
9782
9783 barrier();
9784
9785 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9786 reset_mask2 & (~stay_reset2));
9787
9788 barrier();
9789
9790 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
9791 }
9792
9793
9794
9795
9796
9797
9798
9799
9800
9801
9802 static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9803 {
9804 u32 cnt = 1000;
9805 u32 pend_bits = 0;
9806
9807 do {
9808 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9809
9810 if (pend_bits == 0)
9811 break;
9812
9813 usleep_range(1000, 2000);
9814 } while (cnt-- > 0);
9815
9816 if (cnt <= 0) {
9817 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9818 pend_bits);
9819 return -EBUSY;
9820 }
9821
9822 return 0;
9823 }
9824
9825 static int bnx2x_process_kill(struct bnx2x *bp, bool global)
9826 {
9827 int cnt = 1000;
9828 u32 val = 0;
9829 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
9830 u32 tags_63_32 = 0;
9831
9832
9833 do {
9834 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9835 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9836 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9837 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9838 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
9839 if (CHIP_IS_E3(bp))
9840 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9841
9842 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9843 ((port_is_idle_0 & 0x1) == 0x1) &&
9844 ((port_is_idle_1 & 0x1) == 0x1) &&
9845 (pgl_exp_rom2 == 0xffffffff) &&
9846 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
9847 break;
9848 usleep_range(1000, 2000);
9849 } while (cnt-- > 0);
9850
9851 if (cnt <= 0) {
9852 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9853 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
9854 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9855 pgl_exp_rom2);
9856 return -EAGAIN;
9857 }
9858
9859 barrier();
9860
9861
9862 bnx2x_set_234_gates(bp, true);
9863
9864
9865 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9866 return -EAGAIN;
9867
9868
9869
9870
9871 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9872 barrier();
9873
9874
9875
9876
9877 usleep_range(1000, 2000);
9878
9879
9880
9881 if (global)
9882 bnx2x_reset_mcp_prep(bp, &val);
9883
9884
9885 bnx2x_pxp_prep(bp);
9886 barrier();
9887
9888
9889 bnx2x_process_kill_chip_reset(bp, global);
9890 barrier();
9891
9892
9893 if (!CHIP_IS_E1x(bp))
9894 REG_WR(bp, PGLUE_B_REG_LATCHED_ERRORS_CLR, 0x7f);
9895
9896
9897
9898 if (global && bnx2x_reset_mcp_comp(bp, val))
9899 return -EAGAIN;
9900
9901
9902
9903
9904 bnx2x_set_234_gates(bp, false);
9905
9906
9907
9908
9909 return 0;
9910 }
9911
9912 static int bnx2x_leader_reset(struct bnx2x *bp)
9913 {
9914 int rc = 0;
9915 bool global = bnx2x_reset_is_global(bp);
9916 u32 load_code;
9917
9918
9919
9920
9921 if (!global && !BP_NOMCP(bp)) {
9922 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9923 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
9924 if (!load_code) {
9925 BNX2X_ERR("MCP response failure, aborting\n");
9926 rc = -EAGAIN;
9927 goto exit_leader_reset;
9928 }
9929 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9930 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9931 BNX2X_ERR("MCP unexpected resp, aborting\n");
9932 rc = -EAGAIN;
9933 goto exit_leader_reset2;
9934 }
9935 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9936 if (!load_code) {
9937 BNX2X_ERR("MCP response failure, aborting\n");
9938 rc = -EAGAIN;
9939 goto exit_leader_reset2;
9940 }
9941 }
9942
9943
9944 if (bnx2x_process_kill(bp, global)) {
9945 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9946 BP_PATH(bp));
9947 rc = -EAGAIN;
9948 goto exit_leader_reset2;
9949 }
9950
9951
9952
9953
9954
9955 bnx2x_set_reset_done(bp);
9956 if (global)
9957 bnx2x_clear_reset_global(bp);
9958
9959 exit_leader_reset2:
9960
9961 if (!global && !BP_NOMCP(bp)) {
9962 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9963 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9964 }
9965 exit_leader_reset:
9966 bp->is_leader = 0;
9967 bnx2x_release_leader_lock(bp);
9968 smp_mb();
9969 return rc;
9970 }
9971
9972 static void bnx2x_recovery_failed(struct bnx2x *bp)
9973 {
9974 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9975
9976
9977 netif_device_detach(bp->dev);
9978
9979
9980
9981
9982
9983 bnx2x_set_reset_in_progress(bp);
9984
9985
9986 bnx2x_set_power_state(bp, PCI_D3hot);
9987
9988 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9989
9990 smp_mb();
9991 }
9992
9993
9994
9995
9996
9997
9998 static void bnx2x_parity_recover(struct bnx2x *bp)
9999 {
10000 u32 error_recovered, error_unrecovered;
10001 bool is_parity, global = false;
10002 #ifdef CONFIG_BNX2X_SRIOV
10003 int vf_idx;
10004
10005 for (vf_idx = 0; vf_idx < bp->requested_nr_virtfn; vf_idx++) {
10006 struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
10007
10008 if (vf)
10009 vf->state = VF_LOST;
10010 }
10011 #endif
10012 DP(NETIF_MSG_HW, "Handling parity\n");
10013 while (1) {
10014 switch (bp->recovery_state) {
10015 case BNX2X_RECOVERY_INIT:
10016 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
10017 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
10018 WARN_ON(!is_parity);
10019
10020
10021 if (bnx2x_trylock_leader_lock(bp)) {
10022 bnx2x_set_reset_in_progress(bp);
10023
10024
10025
10026
10027
10028
10029 if (global)
10030 bnx2x_set_reset_global(bp);
10031
10032 bp->is_leader = 1;
10033 }
10034
10035
10036
10037 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
10038 return;
10039
10040 bp->recovery_state = BNX2X_RECOVERY_WAIT;
10041
10042
10043
10044
10045
10046 smp_mb();
10047 break;
10048
10049 case BNX2X_RECOVERY_WAIT:
10050 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
10051 if (bp->is_leader) {
10052 int other_engine = BP_PATH(bp) ? 0 : 1;
10053 bool other_load_status =
10054 bnx2x_get_load_status(bp, other_engine);
10055 bool load_status =
10056 bnx2x_get_load_status(bp, BP_PATH(bp));
10057 global = bnx2x_reset_is_global(bp);
10058
10059
10060
10061
10062
10063
10064
10065
10066
10067 if (load_status ||
10068 (global && other_load_status)) {
10069
10070
10071
10072 schedule_delayed_work(&bp->sp_rtnl_task,
10073 HZ/10);
10074 return;
10075 } else {
10076
10077
10078
10079
10080
10081 if (bnx2x_leader_reset(bp)) {
10082 bnx2x_recovery_failed(bp);
10083 return;
10084 }
10085
10086
10087
10088
10089
10090
10091 break;
10092 }
10093 } else {
10094 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
10095
10096
10097
10098
10099
10100
10101 if (bnx2x_trylock_leader_lock(bp)) {
10102
10103
10104
10105 bp->is_leader = 1;
10106 break;
10107 }
10108
10109 schedule_delayed_work(&bp->sp_rtnl_task,
10110 HZ/10);
10111 return;
10112
10113 } else {
10114
10115
10116
10117
10118 if (bnx2x_reset_is_global(bp)) {
10119 schedule_delayed_work(
10120 &bp->sp_rtnl_task,
10121 HZ/10);
10122 return;
10123 }
10124
10125 error_recovered =
10126 bp->eth_stats.recoverable_error;
10127 error_unrecovered =
10128 bp->eth_stats.unrecoverable_error;
10129 bp->recovery_state =
10130 BNX2X_RECOVERY_NIC_LOADING;
10131 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
10132 error_unrecovered++;
10133 netdev_err(bp->dev,
10134 "Recovery failed. Power cycle needed\n");
10135
10136 netif_device_detach(bp->dev);
10137
10138 bnx2x_set_power_state(
10139 bp, PCI_D3hot);
10140 smp_mb();
10141 } else {
10142 bp->recovery_state =
10143 BNX2X_RECOVERY_DONE;
10144 error_recovered++;
10145 smp_mb();
10146 }
10147 bp->eth_stats.recoverable_error =
10148 error_recovered;
10149 bp->eth_stats.unrecoverable_error =
10150 error_unrecovered;
10151
10152 return;
10153 }
10154 }
10155 default:
10156 return;
10157 }
10158 }
10159 }
10160
10161 static int bnx2x_udp_port_update(struct bnx2x *bp)
10162 {
10163 struct bnx2x_func_switch_update_params *switch_update_params;
10164 struct bnx2x_func_state_params func_params = {NULL};
10165 u16 vxlan_port = 0, geneve_port = 0;
10166 int rc;
10167
10168 switch_update_params = &func_params.params.switch_update;
10169
10170
10171 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
10172 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
10173
10174 func_params.f_obj = &bp->func_obj;
10175 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
10176
10177
10178 __set_bit(BNX2X_F_UPDATE_TUNNEL_CFG_CHNG,
10179 &switch_update_params->changes);
10180
10181 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) {
10182 geneve_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
10183 switch_update_params->geneve_dst_port = geneve_port;
10184 }
10185
10186 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) {
10187 vxlan_port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
10188 switch_update_params->vxlan_dst_port = vxlan_port;
10189 }
10190
10191
10192 __set_bit(BNX2X_F_UPDATE_TUNNEL_INNER_RSS,
10193 &switch_update_params->changes);
10194
10195 rc = bnx2x_func_state_change(bp, &func_params);
10196 if (rc)
10197 BNX2X_ERR("failed to set UDP dst port to %04x %04x (rc = 0x%x)\n",
10198 vxlan_port, geneve_port, rc);
10199 else
10200 DP(BNX2X_MSG_SP,
10201 "Configured UDP ports: Vxlan [%04x] Geneve [%04x]\n",
10202 vxlan_port, geneve_port);
10203
10204 return rc;
10205 }
10206
10207 static int bnx2x_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
10208 {
10209 struct bnx2x *bp = netdev_priv(netdev);
10210 struct udp_tunnel_info ti;
10211
10212 udp_tunnel_nic_get_port(netdev, table, 0, &ti);
10213 bp->udp_tunnel_ports[table] = be16_to_cpu(ti.port);
10214
10215 return bnx2x_udp_port_update(bp);
10216 }
10217
10218 static const struct udp_tunnel_nic_info bnx2x_udp_tunnels = {
10219 .sync_table = bnx2x_udp_tunnel_sync,
10220 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
10221 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
10222 .tables = {
10223 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
10224 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
10225 },
10226 };
10227
10228 static int bnx2x_close(struct net_device *dev);
10229
10230
10231
10232
10233 static void bnx2x_sp_rtnl_task(struct work_struct *work)
10234 {
10235 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
10236
10237 rtnl_lock();
10238
10239 if (!netif_running(bp->dev)) {
10240 rtnl_unlock();
10241 return;
10242 }
10243
10244 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
10245 #ifdef BNX2X_STOP_ON_ERROR
10246 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10247 "you will need to reboot when done\n");
10248 goto sp_rtnl_not_reset;
10249 #endif
10250
10251
10252
10253
10254 bp->sp_rtnl_state = 0;
10255 smp_mb();
10256
10257 bnx2x_parity_recover(bp);
10258
10259 rtnl_unlock();
10260 return;
10261 }
10262
10263 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
10264 #ifdef BNX2X_STOP_ON_ERROR
10265 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
10266 "you will need to reboot when done\n");
10267 goto sp_rtnl_not_reset;
10268 #endif
10269
10270
10271
10272
10273
10274 bp->sp_rtnl_state = 0;
10275 smp_mb();
10276
10277
10278 bp->link_vars.link_up = 0;
10279 bp->force_link_down = true;
10280 netif_carrier_off(bp->dev);
10281 BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
10282
10283 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10284
10285
10286
10287
10288 if (bnx2x_nic_load(bp, LOAD_NORMAL) == -ENOMEM) {
10289 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10290 if (bnx2x_nic_load(bp, LOAD_NORMAL))
10291 BNX2X_ERR("Open the NIC fails again!\n");
10292 }
10293 rtnl_unlock();
10294 return;
10295 }
10296 #ifdef BNX2X_STOP_ON_ERROR
10297 sp_rtnl_not_reset:
10298 #endif
10299 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
10300 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
10301 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
10302 bnx2x_after_function_update(bp);
10303
10304
10305
10306
10307
10308 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
10309 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
10310 netif_device_detach(bp->dev);
10311 bnx2x_close(bp->dev);
10312 rtnl_unlock();
10313 return;
10314 }
10315
10316 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
10317 DP(BNX2X_MSG_SP,
10318 "sending set mcast vf pf channel message from rtnl sp-task\n");
10319 bnx2x_vfpf_set_mcast(bp->dev);
10320 }
10321 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
10322 &bp->sp_rtnl_state)){
10323 if (netif_carrier_ok(bp->dev)) {
10324 bnx2x_tx_disable(bp);
10325 BNX2X_ERR("PF indicated channel is not servicable anymore. This means this VF device is no longer operational\n");
10326 }
10327 }
10328
10329 if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
10330 DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
10331 bnx2x_set_rx_mode_inner(bp);
10332 }
10333
10334 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
10335 &bp->sp_rtnl_state))
10336 bnx2x_pf_set_vfs_vlan(bp);
10337
10338 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state)) {
10339 bnx2x_dcbx_stop_hw_tx(bp);
10340 bnx2x_dcbx_resume_hw_tx(bp);
10341 }
10342
10343 if (test_and_clear_bit(BNX2X_SP_RTNL_GET_DRV_VERSION,
10344 &bp->sp_rtnl_state))
10345 bnx2x_update_mng_version(bp);
10346
10347 if (test_and_clear_bit(BNX2X_SP_RTNL_UPDATE_SVID, &bp->sp_rtnl_state))
10348 bnx2x_handle_update_svid_cmd(bp);
10349
10350
10351
10352
10353 rtnl_unlock();
10354
10355
10356 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
10357 &bp->sp_rtnl_state)) {
10358 bnx2x_disable_sriov(bp);
10359 bnx2x_enable_sriov(bp);
10360 }
10361 }
10362
10363 static void bnx2x_period_task(struct work_struct *work)
10364 {
10365 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
10366
10367 if (!netif_running(bp->dev))
10368 goto period_task_exit;
10369
10370 if (CHIP_REV_IS_SLOW(bp)) {
10371 BNX2X_ERR("period task called on emulation, ignoring\n");
10372 goto period_task_exit;
10373 }
10374
10375 bnx2x_acquire_phy_lock(bp);
10376
10377
10378
10379
10380
10381 smp_mb();
10382 if (bp->port.pmf) {
10383 bnx2x_period_func(&bp->link_params, &bp->link_vars);
10384
10385
10386 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
10387 }
10388
10389 bnx2x_release_phy_lock(bp);
10390 period_task_exit:
10391 return;
10392 }
10393
10394
10395
10396
10397
10398 static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
10399 {
10400 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
10401 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
10402 return base + (BP_ABS_FUNC(bp)) * stride;
10403 }
10404
10405 static bool bnx2x_prev_unload_close_umac(struct bnx2x *bp,
10406 u8 port, u32 reset_reg,
10407 struct bnx2x_mac_vals *vals)
10408 {
10409 u32 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
10410 u32 base_addr;
10411
10412 if (!(mask & reset_reg))
10413 return false;
10414
10415 BNX2X_DEV_INFO("Disable umac Rx %02x\n", port);
10416 base_addr = port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
10417 vals->umac_addr[port] = base_addr + UMAC_REG_COMMAND_CONFIG;
10418 vals->umac_val[port] = REG_RD(bp, vals->umac_addr[port]);
10419 REG_WR(bp, vals->umac_addr[port], 0);
10420
10421 return true;
10422 }
10423
10424 static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
10425 struct bnx2x_mac_vals *vals)
10426 {
10427 u32 val, base_addr, offset, mask, reset_reg;
10428 bool mac_stopped = false;
10429 u8 port = BP_PORT(bp);
10430
10431
10432 memset(vals, 0, sizeof(*vals));
10433
10434 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
10435
10436 if (!CHIP_IS_E3(bp)) {
10437 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
10438 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
10439 if ((mask & reset_reg) && val) {
10440 u32 wb_data[2];
10441 BNX2X_DEV_INFO("Disable bmac Rx\n");
10442 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
10443 : NIG_REG_INGRESS_BMAC0_MEM;
10444 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
10445 : BIGMAC_REGISTER_BMAC_CONTROL;
10446
10447
10448
10449
10450
10451
10452
10453 wb_data[0] = REG_RD(bp, base_addr + offset);
10454 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
10455 vals->bmac_addr = base_addr + offset;
10456 vals->bmac_val[0] = wb_data[0];
10457 vals->bmac_val[1] = wb_data[1];
10458 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
10459 REG_WR(bp, vals->bmac_addr, wb_data[0]);
10460 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
10461 }
10462 BNX2X_DEV_INFO("Disable emac Rx\n");
10463 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
10464 vals->emac_val = REG_RD(bp, vals->emac_addr);
10465 REG_WR(bp, vals->emac_addr, 0);
10466 mac_stopped = true;
10467 } else {
10468 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
10469 BNX2X_DEV_INFO("Disable xmac Rx\n");
10470 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
10471 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
10472 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10473 val & ~(1 << 1));
10474 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
10475 val | (1 << 1));
10476 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
10477 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
10478 REG_WR(bp, vals->xmac_addr, 0);
10479 mac_stopped = true;
10480 }
10481
10482 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 0,
10483 reset_reg, vals);
10484 mac_stopped |= bnx2x_prev_unload_close_umac(bp, 1,
10485 reset_reg, vals);
10486 }
10487
10488 if (mac_stopped)
10489 msleep(20);
10490 }
10491
10492 #define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
10493 #define BNX2X_PREV_UNDI_PROD_ADDR_H(f) (BAR_TSTRORM_INTMEM + \
10494 0x1848 + ((f) << 4))
10495 #define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
10496 #define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
10497 #define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
10498
10499 #define BCM_5710_UNDI_FW_MF_MAJOR (0x07)
10500 #define BCM_5710_UNDI_FW_MF_MINOR (0x08)
10501 #define BCM_5710_UNDI_FW_MF_VERS (0x05)
10502
10503 static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
10504 {
10505
10506
10507
10508 if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
10509 MISC_REGISTERS_RESET_REG_1_RST_DORQ))
10510 return false;
10511
10512 if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
10513 BNX2X_DEV_INFO("UNDI previously loaded\n");
10514 return true;
10515 }
10516
10517 return false;
10518 }
10519
10520 static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 inc)
10521 {
10522 u16 rcq, bd;
10523 u32 addr, tmp_reg;
10524
10525 if (BP_FUNC(bp) < 2)
10526 addr = BNX2X_PREV_UNDI_PROD_ADDR(BP_PORT(bp));
10527 else
10528 addr = BNX2X_PREV_UNDI_PROD_ADDR_H(BP_FUNC(bp) - 2);
10529
10530 tmp_reg = REG_RD(bp, addr);
10531 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
10532 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
10533
10534 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
10535 REG_WR(bp, addr, tmp_reg);
10536
10537 BNX2X_DEV_INFO("UNDI producer [%d/%d][%08x] rings bd -> 0x%04x, rcq -> 0x%04x\n",
10538 BP_PORT(bp), BP_FUNC(bp), addr, bd, rcq);
10539 }
10540
10541 static int bnx2x_prev_mcp_done(struct bnx2x *bp)
10542 {
10543 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
10544 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
10545 if (!rc) {
10546 BNX2X_ERR("MCP response failure, aborting\n");
10547 return -EBUSY;
10548 }
10549
10550 return 0;
10551 }
10552
10553 static struct bnx2x_prev_path_list *
10554 bnx2x_prev_path_get_entry(struct bnx2x *bp)
10555 {
10556 struct bnx2x_prev_path_list *tmp_list;
10557
10558 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
10559 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
10560 bp->pdev->bus->number == tmp_list->bus &&
10561 BP_PATH(bp) == tmp_list->path)
10562 return tmp_list;
10563
10564 return NULL;
10565 }
10566
10567 static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
10568 {
10569 struct bnx2x_prev_path_list *tmp_list;
10570 int rc;
10571
10572 rc = down_interruptible(&bnx2x_prev_sem);
10573 if (rc) {
10574 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10575 return rc;
10576 }
10577
10578 tmp_list = bnx2x_prev_path_get_entry(bp);
10579 if (tmp_list) {
10580 tmp_list->aer = 1;
10581 rc = 0;
10582 } else {
10583 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
10584 BP_PATH(bp));
10585 }
10586
10587 up(&bnx2x_prev_sem);
10588
10589 return rc;
10590 }
10591
10592 static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
10593 {
10594 struct bnx2x_prev_path_list *tmp_list;
10595 bool rc = false;
10596
10597 if (down_trylock(&bnx2x_prev_sem))
10598 return false;
10599
10600 tmp_list = bnx2x_prev_path_get_entry(bp);
10601 if (tmp_list) {
10602 if (tmp_list->aer) {
10603 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
10604 BP_PATH(bp));
10605 } else {
10606 rc = true;
10607 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
10608 BP_PATH(bp));
10609 }
10610 }
10611
10612 up(&bnx2x_prev_sem);
10613
10614 return rc;
10615 }
10616
10617 bool bnx2x_port_after_undi(struct bnx2x *bp)
10618 {
10619 struct bnx2x_prev_path_list *entry;
10620 bool val;
10621
10622 down(&bnx2x_prev_sem);
10623
10624 entry = bnx2x_prev_path_get_entry(bp);
10625 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
10626
10627 up(&bnx2x_prev_sem);
10628
10629 return val;
10630 }
10631
10632 static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
10633 {
10634 struct bnx2x_prev_path_list *tmp_list;
10635 int rc;
10636
10637 rc = down_interruptible(&bnx2x_prev_sem);
10638 if (rc) {
10639 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10640 return rc;
10641 }
10642
10643
10644 tmp_list = bnx2x_prev_path_get_entry(bp);
10645 if (tmp_list) {
10646 if (!tmp_list->aer) {
10647 BNX2X_ERR("Re-Marking the path.\n");
10648 } else {
10649 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
10650 BP_PATH(bp));
10651 tmp_list->aer = 0;
10652 }
10653 up(&bnx2x_prev_sem);
10654 return 0;
10655 }
10656 up(&bnx2x_prev_sem);
10657
10658
10659 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
10660 if (!tmp_list) {
10661 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
10662 return -ENOMEM;
10663 }
10664
10665 tmp_list->bus = bp->pdev->bus->number;
10666 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
10667 tmp_list->path = BP_PATH(bp);
10668 tmp_list->aer = 0;
10669 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
10670
10671 rc = down_interruptible(&bnx2x_prev_sem);
10672 if (rc) {
10673 BNX2X_ERR("Received %d when tried to take lock\n", rc);
10674 kfree(tmp_list);
10675 } else {
10676 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
10677 BP_PATH(bp));
10678 list_add(&tmp_list->list, &bnx2x_prev_list);
10679 up(&bnx2x_prev_sem);
10680 }
10681
10682 return rc;
10683 }
10684
10685 static int bnx2x_do_flr(struct bnx2x *bp)
10686 {
10687 struct pci_dev *dev = bp->pdev;
10688
10689 if (CHIP_IS_E1x(bp)) {
10690 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
10691 return -EINVAL;
10692 }
10693
10694
10695 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
10696 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
10697 bp->common.bc_ver);
10698 return -EINVAL;
10699 }
10700
10701 if (!pci_wait_for_pending_transaction(dev))
10702 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
10703
10704 BNX2X_DEV_INFO("Initiating FLR\n");
10705 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
10706
10707 return 0;
10708 }
10709
10710 static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
10711 {
10712 int rc;
10713
10714 BNX2X_DEV_INFO("Uncommon unload Flow\n");
10715
10716
10717 if (bnx2x_prev_is_path_marked(bp))
10718 return bnx2x_prev_mcp_done(bp);
10719
10720 BNX2X_DEV_INFO("Path is unmarked\n");
10721
10722
10723 if (bnx2x_prev_is_after_undi(bp))
10724 goto out;
10725
10726
10727
10728
10729
10730 rc = bnx2x_compare_fw_ver(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION, false);
10731
10732 if (!rc) {
10733
10734 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
10735 rc = bnx2x_do_flr(bp);
10736 }
10737
10738 if (!rc) {
10739
10740 BNX2X_DEV_INFO("FLR successful\n");
10741 return 0;
10742 }
10743
10744 BNX2X_DEV_INFO("Could not FLR\n");
10745
10746 out:
10747
10748 rc = bnx2x_prev_mcp_done(bp);
10749 if (!rc)
10750 rc = BNX2X_PREV_WAIT_NEEDED;
10751
10752 return rc;
10753 }
10754
10755 static int bnx2x_prev_unload_common(struct bnx2x *bp)
10756 {
10757 u32 reset_reg, tmp_reg = 0, rc;
10758 bool prev_undi = false;
10759 struct bnx2x_mac_vals mac_vals;
10760
10761
10762
10763
10764
10765 BNX2X_DEV_INFO("Common unload Flow\n");
10766
10767 memset(&mac_vals, 0, sizeof(mac_vals));
10768
10769 if (bnx2x_prev_is_path_marked(bp))
10770 return bnx2x_prev_mcp_done(bp);
10771
10772 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
10773
10774
10775 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
10776 u32 timer_count = 1000;
10777
10778
10779 bnx2x_prev_unload_close_mac(bp, &mac_vals);
10780
10781
10782 bnx2x_set_rx_filter(&bp->link_params, 0);
10783 bp->link_params.port ^= 1;
10784 bnx2x_set_rx_filter(&bp->link_params, 0);
10785 bp->link_params.port ^= 1;
10786
10787
10788 if (bnx2x_prev_is_after_undi(bp)) {
10789 prev_undi = true;
10790
10791 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
10792
10793 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
10794 }
10795 if (!CHIP_IS_E1x(bp))
10796
10797 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
10798
10799
10800 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10801 while (timer_count) {
10802 u32 prev_brb = tmp_reg;
10803
10804 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
10805 if (!tmp_reg)
10806 break;
10807
10808 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
10809
10810
10811 if (prev_brb > tmp_reg)
10812 timer_count = 1000;
10813 else
10814 timer_count--;
10815
10816
10817 if (prev_undi)
10818 bnx2x_prev_unload_undi_inc(bp, 1);
10819
10820 udelay(10);
10821 }
10822
10823 if (!timer_count)
10824 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10825 }
10826
10827
10828 bnx2x_reset_common(bp);
10829
10830 if (mac_vals.xmac_addr)
10831 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10832 if (mac_vals.umac_addr[0])
10833 REG_WR(bp, mac_vals.umac_addr[0], mac_vals.umac_val[0]);
10834 if (mac_vals.umac_addr[1])
10835 REG_WR(bp, mac_vals.umac_addr[1], mac_vals.umac_val[1]);
10836 if (mac_vals.emac_addr)
10837 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10838 if (mac_vals.bmac_addr) {
10839 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10840 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10841 }
10842
10843 rc = bnx2x_prev_mark_path(bp, prev_undi);
10844 if (rc) {
10845 bnx2x_prev_mcp_done(bp);
10846 return rc;
10847 }
10848
10849 return bnx2x_prev_mcp_done(bp);
10850 }
10851
10852 static int bnx2x_prev_unload(struct bnx2x *bp)
10853 {
10854 int time_counter = 10;
10855 u32 rc, fw, hw_lock_reg, hw_lock_val;
10856 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10857
10858
10859
10860
10861 bnx2x_clean_pglue_errors(bp);
10862
10863
10864 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10865 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10866 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10867
10868 hw_lock_val = REG_RD(bp, hw_lock_reg);
10869 if (hw_lock_val) {
10870 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10871 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10872 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10873 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10874 }
10875
10876 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10877 REG_WR(bp, hw_lock_reg, 0xffffffff);
10878 } else
10879 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10880
10881 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10882 BNX2X_DEV_INFO("Release previously held alr\n");
10883 bnx2x_release_alr(bp);
10884 }
10885
10886 do {
10887 int aer = 0;
10888
10889 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10890 if (!fw) {
10891 BNX2X_ERR("MCP response failure, aborting\n");
10892 rc = -EBUSY;
10893 break;
10894 }
10895
10896 rc = down_interruptible(&bnx2x_prev_sem);
10897 if (rc) {
10898 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10899 rc);
10900 } else {
10901
10902 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10903 bnx2x_prev_path_get_entry(bp)->aer);
10904 up(&bnx2x_prev_sem);
10905 }
10906
10907 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
10908 rc = bnx2x_prev_unload_common(bp);
10909 break;
10910 }
10911
10912
10913 rc = bnx2x_prev_unload_uncommon(bp);
10914 if (rc != BNX2X_PREV_WAIT_NEEDED)
10915 break;
10916
10917 msleep(20);
10918 } while (--time_counter);
10919
10920 if (!time_counter || rc) {
10921 BNX2X_DEV_INFO("Unloading previous driver did not occur, Possibly due to MF UNDI\n");
10922 rc = -EPROBE_DEFER;
10923 }
10924
10925
10926 if (bnx2x_port_after_undi(bp))
10927 bp->link_params.feature_config_flags |=
10928 FEATURE_CONFIG_BOOT_FROM_SAN;
10929
10930 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10931
10932 return rc;
10933 }
10934
10935 static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10936 {
10937 u32 val, val2, val3, val4, id, boot_mode;
10938 u16 pmc;
10939
10940
10941
10942 val = REG_RD(bp, MISC_REG_CHIP_NUM);
10943 id = ((val & 0xffff) << 16);
10944 val = REG_RD(bp, MISC_REG_CHIP_REV);
10945 id |= ((val & 0xf) << 12);
10946
10947
10948
10949
10950 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10951 id |= (((val >> 24) & 0xf) << 4);
10952 val = REG_RD(bp, MISC_REG_BOND_ID);
10953 id |= (val & 0xf);
10954 bp->common.chip_id = id;
10955
10956
10957 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10958 if (CHIP_IS_57810(bp))
10959 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10960 (bp->common.chip_id & 0x0000FFFF);
10961 else if (CHIP_IS_57810_MF(bp))
10962 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10963 (bp->common.chip_id & 0x0000FFFF);
10964 bp->common.chip_id |= 0x1;
10965 }
10966
10967
10968 bp->db_size = (1 << BNX2X_DB_SHIFT);
10969
10970 if (!CHIP_IS_E1x(bp)) {
10971 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10972 if ((val & 1) == 0)
10973 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10974 else
10975 val = (val >> 1) & 1;
10976 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10977 "2_PORT_MODE");
10978 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10979 CHIP_2_PORT_MODE;
10980
10981 if (CHIP_MODE_IS_4_PORT(bp))
10982 bp->pfid = (bp->pf_num >> 1);
10983 else
10984 bp->pfid = (bp->pf_num & 0x6);
10985 } else {
10986 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE;
10987 bp->pfid = bp->pf_num;
10988 }
10989
10990 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10991
10992 bp->link_params.chip_id = bp->common.chip_id;
10993 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
10994
10995 val = (REG_RD(bp, 0x2874) & 0x55);
10996 if ((bp->common.chip_id & 0x1) ||
10997 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10998 bp->flags |= ONE_PORT_FLAG;
10999 BNX2X_DEV_INFO("single port device\n");
11000 }
11001
11002 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
11003 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
11004 (val & MCPR_NVM_CFG4_FLASH_SIZE));
11005 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
11006 bp->common.flash_size, bp->common.flash_size);
11007
11008 bnx2x_init_shmem(bp);
11009
11010 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
11011 MISC_REG_GENERIC_CR_1 :
11012 MISC_REG_GENERIC_CR_0));
11013
11014 bp->link_params.shmem_base = bp->common.shmem_base;
11015 bp->link_params.shmem2_base = bp->common.shmem2_base;
11016 if (SHMEM2_RD(bp, size) >
11017 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
11018 bp->link_params.lfa_base =
11019 REG_RD(bp, bp->common.shmem2_base +
11020 (u32)offsetof(struct shmem2_region,
11021 lfa_host_addr[BP_PORT(bp)]));
11022 else
11023 bp->link_params.lfa_base = 0;
11024 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
11025 bp->common.shmem_base, bp->common.shmem2_base);
11026
11027 if (!bp->common.shmem_base) {
11028 BNX2X_DEV_INFO("MCP not active\n");
11029 bp->flags |= NO_MCP_FLAG;
11030 return;
11031 }
11032
11033 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
11034 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
11035
11036 bp->link_params.hw_led_mode = ((bp->common.hw_config &
11037 SHARED_HW_CFG_LED_MODE_MASK) >>
11038 SHARED_HW_CFG_LED_MODE_SHIFT);
11039
11040 bp->link_params.feature_config_flags = 0;
11041 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
11042 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
11043 bp->link_params.feature_config_flags |=
11044 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11045 else
11046 bp->link_params.feature_config_flags &=
11047 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
11048
11049 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
11050 bp->common.bc_ver = val;
11051 BNX2X_DEV_INFO("bc_ver %X\n", val);
11052 if (val < BNX2X_BC_VER) {
11053
11054
11055 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
11056 BNX2X_BC_VER, val);
11057 }
11058 bp->link_params.feature_config_flags |=
11059 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
11060 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
11061
11062 bp->link_params.feature_config_flags |=
11063 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
11064 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
11065 bp->link_params.feature_config_flags |=
11066 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
11067 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
11068 bp->link_params.feature_config_flags |=
11069 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
11070 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
11071
11072 bp->link_params.feature_config_flags |=
11073 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
11074 FEATURE_CONFIG_MT_SUPPORT : 0;
11075
11076 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
11077 BC_SUPPORTS_PFC_STATS : 0;
11078
11079 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
11080 BC_SUPPORTS_FCOE_FEATURES : 0;
11081
11082 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
11083 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
11084
11085 bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
11086 BC_SUPPORTS_RMMOD_CMD : 0;
11087
11088 boot_mode = SHMEM_RD(bp,
11089 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
11090 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
11091 switch (boot_mode) {
11092 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
11093 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
11094 break;
11095 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
11096 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
11097 break;
11098 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
11099 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
11100 break;
11101 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
11102 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
11103 break;
11104 }
11105
11106 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_PMC, &pmc);
11107 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
11108
11109 BNX2X_DEV_INFO("%sWoL capable\n",
11110 (bp->flags & NO_WOL_FLAG) ? "not " : "");
11111
11112 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
11113 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
11114 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
11115 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
11116
11117 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
11118 val, val2, val3, val4);
11119 }
11120
11121 #define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
11122 #define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
11123
11124 static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
11125 {
11126 int pfid = BP_FUNC(bp);
11127 int igu_sb_id;
11128 u32 val;
11129 u8 fid, igu_sb_cnt = 0;
11130
11131 bp->igu_base_sb = 0xff;
11132 if (CHIP_INT_MODE_IS_BC(bp)) {
11133 int vn = BP_VN(bp);
11134 igu_sb_cnt = bp->igu_sb_cnt;
11135 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
11136 FP_SB_MAX_E1x;
11137
11138 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
11139 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
11140
11141 return 0;
11142 }
11143
11144
11145 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
11146 igu_sb_id++) {
11147 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
11148 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
11149 continue;
11150 fid = IGU_FID(val);
11151 if ((fid & IGU_FID_ENCODE_IS_PF)) {
11152 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
11153 continue;
11154 if (IGU_VEC(val) == 0)
11155
11156 bp->igu_dsb_id = igu_sb_id;
11157 else {
11158 if (bp->igu_base_sb == 0xff)
11159 bp->igu_base_sb = igu_sb_id;
11160 igu_sb_cnt++;
11161 }
11162 }
11163 }
11164
11165 #ifdef CONFIG_PCI_MSI
11166
11167
11168
11169
11170
11171
11172 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
11173 #endif
11174
11175 if (igu_sb_cnt == 0) {
11176 BNX2X_ERR("CAM configuration error\n");
11177 return -EINVAL;
11178 }
11179
11180 return 0;
11181 }
11182
11183 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
11184 {
11185 int cfg_size = 0, idx, port = BP_PORT(bp);
11186
11187
11188 bp->port.supported[0] = 0;
11189 bp->port.supported[1] = 0;
11190 switch (bp->link_params.num_phys) {
11191 case 1:
11192 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
11193 cfg_size = 1;
11194 break;
11195 case 2:
11196 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
11197 cfg_size = 1;
11198 break;
11199 case 3:
11200 if (bp->link_params.multi_phy_config &
11201 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
11202 bp->port.supported[1] =
11203 bp->link_params.phy[EXT_PHY1].supported;
11204 bp->port.supported[0] =
11205 bp->link_params.phy[EXT_PHY2].supported;
11206 } else {
11207 bp->port.supported[0] =
11208 bp->link_params.phy[EXT_PHY1].supported;
11209 bp->port.supported[1] =
11210 bp->link_params.phy[EXT_PHY2].supported;
11211 }
11212 cfg_size = 2;
11213 break;
11214 }
11215
11216 if (!(bp->port.supported[0] || bp->port.supported[1])) {
11217 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
11218 SHMEM_RD(bp,
11219 dev_info.port_hw_config[port].external_phy_config),
11220 SHMEM_RD(bp,
11221 dev_info.port_hw_config[port].external_phy_config2));
11222 return;
11223 }
11224
11225 if (CHIP_IS_E3(bp))
11226 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
11227 else {
11228 switch (switch_cfg) {
11229 case SWITCH_CFG_1G:
11230 bp->port.phy_addr = REG_RD(
11231 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
11232 break;
11233 case SWITCH_CFG_10G:
11234 bp->port.phy_addr = REG_RD(
11235 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
11236 break;
11237 default:
11238 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
11239 bp->port.link_config[0]);
11240 return;
11241 }
11242 }
11243 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
11244
11245 for (idx = 0; idx < cfg_size; idx++) {
11246 if (!(bp->link_params.speed_cap_mask[idx] &
11247 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
11248 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
11249
11250 if (!(bp->link_params.speed_cap_mask[idx] &
11251 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
11252 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
11253
11254 if (!(bp->link_params.speed_cap_mask[idx] &
11255 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
11256 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
11257
11258 if (!(bp->link_params.speed_cap_mask[idx] &
11259 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
11260 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
11261
11262 if (!(bp->link_params.speed_cap_mask[idx] &
11263 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
11264 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
11265 SUPPORTED_1000baseT_Full);
11266
11267 if (!(bp->link_params.speed_cap_mask[idx] &
11268 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
11269 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
11270
11271 if (!(bp->link_params.speed_cap_mask[idx] &
11272 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
11273 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
11274
11275 if (!(bp->link_params.speed_cap_mask[idx] &
11276 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
11277 bp->port.supported[idx] &= ~SUPPORTED_20000baseKR2_Full;
11278 }
11279
11280 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
11281 bp->port.supported[1]);
11282 }
11283
11284 static void bnx2x_link_settings_requested(struct bnx2x *bp)
11285 {
11286 u32 link_config, idx, cfg_size = 0;
11287 bp->port.advertising[0] = 0;
11288 bp->port.advertising[1] = 0;
11289 switch (bp->link_params.num_phys) {
11290 case 1:
11291 case 2:
11292 cfg_size = 1;
11293 break;
11294 case 3:
11295 cfg_size = 2;
11296 break;
11297 }
11298 for (idx = 0; idx < cfg_size; idx++) {
11299 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
11300 link_config = bp->port.link_config[idx];
11301 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
11302 case PORT_FEATURE_LINK_SPEED_AUTO:
11303 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
11304 bp->link_params.req_line_speed[idx] =
11305 SPEED_AUTO_NEG;
11306 bp->port.advertising[idx] |=
11307 bp->port.supported[idx];
11308 if (bp->link_params.phy[EXT_PHY1].type ==
11309 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
11310 bp->port.advertising[idx] |=
11311 (SUPPORTED_100baseT_Half |
11312 SUPPORTED_100baseT_Full);
11313 } else {
11314
11315 bp->link_params.req_line_speed[idx] =
11316 SPEED_10000;
11317 bp->port.advertising[idx] |=
11318 (ADVERTISED_10000baseT_Full |
11319 ADVERTISED_FIBRE);
11320 continue;
11321 }
11322 break;
11323
11324 case PORT_FEATURE_LINK_SPEED_10M_FULL:
11325 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
11326 bp->link_params.req_line_speed[idx] =
11327 SPEED_10;
11328 bp->port.advertising[idx] |=
11329 (ADVERTISED_10baseT_Full |
11330 ADVERTISED_TP);
11331 } else {
11332 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11333 link_config,
11334 bp->link_params.speed_cap_mask[idx]);
11335 return;
11336 }
11337 break;
11338
11339 case PORT_FEATURE_LINK_SPEED_10M_HALF:
11340 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
11341 bp->link_params.req_line_speed[idx] =
11342 SPEED_10;
11343 bp->link_params.req_duplex[idx] =
11344 DUPLEX_HALF;
11345 bp->port.advertising[idx] |=
11346 (ADVERTISED_10baseT_Half |
11347 ADVERTISED_TP);
11348 } else {
11349 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11350 link_config,
11351 bp->link_params.speed_cap_mask[idx]);
11352 return;
11353 }
11354 break;
11355
11356 case PORT_FEATURE_LINK_SPEED_100M_FULL:
11357 if (bp->port.supported[idx] &
11358 SUPPORTED_100baseT_Full) {
11359 bp->link_params.req_line_speed[idx] =
11360 SPEED_100;
11361 bp->port.advertising[idx] |=
11362 (ADVERTISED_100baseT_Full |
11363 ADVERTISED_TP);
11364 } else {
11365 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11366 link_config,
11367 bp->link_params.speed_cap_mask[idx]);
11368 return;
11369 }
11370 break;
11371
11372 case PORT_FEATURE_LINK_SPEED_100M_HALF:
11373 if (bp->port.supported[idx] &
11374 SUPPORTED_100baseT_Half) {
11375 bp->link_params.req_line_speed[idx] =
11376 SPEED_100;
11377 bp->link_params.req_duplex[idx] =
11378 DUPLEX_HALF;
11379 bp->port.advertising[idx] |=
11380 (ADVERTISED_100baseT_Half |
11381 ADVERTISED_TP);
11382 } else {
11383 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11384 link_config,
11385 bp->link_params.speed_cap_mask[idx]);
11386 return;
11387 }
11388 break;
11389
11390 case PORT_FEATURE_LINK_SPEED_1G:
11391 if (bp->port.supported[idx] &
11392 SUPPORTED_1000baseT_Full) {
11393 bp->link_params.req_line_speed[idx] =
11394 SPEED_1000;
11395 bp->port.advertising[idx] |=
11396 (ADVERTISED_1000baseT_Full |
11397 ADVERTISED_TP);
11398 } else if (bp->port.supported[idx] &
11399 SUPPORTED_1000baseKX_Full) {
11400 bp->link_params.req_line_speed[idx] =
11401 SPEED_1000;
11402 bp->port.advertising[idx] |=
11403 ADVERTISED_1000baseKX_Full;
11404 } else {
11405 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11406 link_config,
11407 bp->link_params.speed_cap_mask[idx]);
11408 return;
11409 }
11410 break;
11411
11412 case PORT_FEATURE_LINK_SPEED_2_5G:
11413 if (bp->port.supported[idx] &
11414 SUPPORTED_2500baseX_Full) {
11415 bp->link_params.req_line_speed[idx] =
11416 SPEED_2500;
11417 bp->port.advertising[idx] |=
11418 (ADVERTISED_2500baseX_Full |
11419 ADVERTISED_TP);
11420 } else {
11421 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11422 link_config,
11423 bp->link_params.speed_cap_mask[idx]);
11424 return;
11425 }
11426 break;
11427
11428 case PORT_FEATURE_LINK_SPEED_10G_CX4:
11429 if (bp->port.supported[idx] &
11430 SUPPORTED_10000baseT_Full) {
11431 bp->link_params.req_line_speed[idx] =
11432 SPEED_10000;
11433 bp->port.advertising[idx] |=
11434 (ADVERTISED_10000baseT_Full |
11435 ADVERTISED_FIBRE);
11436 } else if (bp->port.supported[idx] &
11437 SUPPORTED_10000baseKR_Full) {
11438 bp->link_params.req_line_speed[idx] =
11439 SPEED_10000;
11440 bp->port.advertising[idx] |=
11441 (ADVERTISED_10000baseKR_Full |
11442 ADVERTISED_FIBRE);
11443 } else {
11444 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
11445 link_config,
11446 bp->link_params.speed_cap_mask[idx]);
11447 return;
11448 }
11449 break;
11450 case PORT_FEATURE_LINK_SPEED_20G:
11451 bp->link_params.req_line_speed[idx] = SPEED_20000;
11452
11453 break;
11454 default:
11455 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
11456 link_config);
11457 bp->link_params.req_line_speed[idx] =
11458 SPEED_AUTO_NEG;
11459 bp->port.advertising[idx] =
11460 bp->port.supported[idx];
11461 break;
11462 }
11463
11464 bp->link_params.req_flow_ctrl[idx] = (link_config &
11465 PORT_FEATURE_FLOW_CONTROL_MASK);
11466 if (bp->link_params.req_flow_ctrl[idx] ==
11467 BNX2X_FLOW_CTRL_AUTO) {
11468 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
11469 bp->link_params.req_flow_ctrl[idx] =
11470 BNX2X_FLOW_CTRL_NONE;
11471 else
11472 bnx2x_set_requested_fc(bp);
11473 }
11474
11475 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
11476 bp->link_params.req_line_speed[idx],
11477 bp->link_params.req_duplex[idx],
11478 bp->link_params.req_flow_ctrl[idx],
11479 bp->port.advertising[idx]);
11480 }
11481 }
11482
11483 static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
11484 {
11485 __be16 mac_hi_be = cpu_to_be16(mac_hi);
11486 __be32 mac_lo_be = cpu_to_be32(mac_lo);
11487 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
11488 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
11489 }
11490
11491 static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
11492 {
11493 int port = BP_PORT(bp);
11494 u32 config;
11495 u32 ext_phy_type, ext_phy_config, eee_mode;
11496
11497 bp->link_params.bp = bp;
11498 bp->link_params.port = port;
11499
11500 bp->link_params.lane_config =
11501 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
11502
11503 bp->link_params.speed_cap_mask[0] =
11504 SHMEM_RD(bp,
11505 dev_info.port_hw_config[port].speed_capability_mask) &
11506 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11507 bp->link_params.speed_cap_mask[1] =
11508 SHMEM_RD(bp,
11509 dev_info.port_hw_config[port].speed_capability_mask2) &
11510 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
11511 bp->port.link_config[0] =
11512 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
11513
11514 bp->port.link_config[1] =
11515 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
11516
11517 bp->link_params.multi_phy_config =
11518 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
11519
11520
11521
11522 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
11523 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
11524 (config & PORT_FEATURE_WOL_ENABLED));
11525
11526 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11527 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
11528 bp->flags |= NO_ISCSI_FLAG;
11529 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
11530 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
11531 bp->flags |= NO_FCOE_FLAG;
11532
11533 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
11534 bp->link_params.lane_config,
11535 bp->link_params.speed_cap_mask[0],
11536 bp->port.link_config[0]);
11537
11538 bp->link_params.switch_cfg = (bp->port.link_config[0] &
11539 PORT_FEATURE_CONNECTED_SWITCH_MASK);
11540 bnx2x_phy_probe(&bp->link_params);
11541 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
11542
11543 bnx2x_link_settings_requested(bp);
11544
11545
11546
11547
11548
11549 ext_phy_config =
11550 SHMEM_RD(bp,
11551 dev_info.port_hw_config[port].external_phy_config);
11552 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
11553 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
11554 bp->mdio.prtad = bp->port.phy_addr;
11555
11556 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
11557 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
11558 bp->mdio.prtad =
11559 XGXS_EXT_PHY_ADDR(ext_phy_config);
11560
11561
11562 eee_mode = (((SHMEM_RD(bp, dev_info.
11563 port_feature_config[port].eee_power_mode)) &
11564 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
11565 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
11566 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
11567 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
11568 EEE_MODE_ENABLE_LPI |
11569 EEE_MODE_OUTPUT_TIME;
11570 } else {
11571 bp->link_params.eee_mode = 0;
11572 }
11573 }
11574
11575 void bnx2x_get_iscsi_info(struct bnx2x *bp)
11576 {
11577 u32 no_flags = NO_ISCSI_FLAG;
11578 int port = BP_PORT(bp);
11579 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11580 drv_lic_key[port].max_iscsi_conn);
11581
11582 if (!CNIC_SUPPORT(bp)) {
11583 bp->flags |= no_flags;
11584 return;
11585 }
11586
11587
11588 bp->cnic_eth_dev.max_iscsi_conn =
11589 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
11590 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
11591
11592 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
11593 bp->cnic_eth_dev.max_iscsi_conn);
11594
11595
11596
11597
11598
11599 if (!bp->cnic_eth_dev.max_iscsi_conn)
11600 bp->flags |= no_flags;
11601 }
11602
11603 static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
11604 {
11605
11606 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11607 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
11608 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11609 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
11610
11611
11612 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11613 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
11614 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11615 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
11616 }
11617
11618 static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
11619 {
11620 u8 count = 0;
11621
11622 if (IS_MF(bp)) {
11623 u8 fid;
11624
11625
11626 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
11627 if (IS_MF_SD(bp)) {
11628 u32 cfg = MF_CFG_RD(bp,
11629 func_mf_config[fid].config);
11630
11631 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
11632 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
11633 FUNC_MF_CFG_PROTOCOL_FCOE))
11634 count++;
11635 } else {
11636 u32 cfg = MF_CFG_RD(bp,
11637 func_ext_config[fid].
11638 func_cfg);
11639
11640 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
11641 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
11642 count++;
11643 }
11644 }
11645 } else {
11646 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
11647
11648 for (port = 0; port < port_cnt; port++) {
11649 u32 lic = SHMEM_RD(bp,
11650 drv_lic_key[port].max_fcoe_conn) ^
11651 FW_ENCODE_32BIT_PATTERN;
11652 if (lic)
11653 count++;
11654 }
11655 }
11656
11657 return count;
11658 }
11659
11660 static void bnx2x_get_fcoe_info(struct bnx2x *bp)
11661 {
11662 int port = BP_PORT(bp);
11663 int func = BP_ABS_FUNC(bp);
11664 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
11665 drv_lic_key[port].max_fcoe_conn);
11666 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
11667
11668 if (!CNIC_SUPPORT(bp)) {
11669 bp->flags |= NO_FCOE_FLAG;
11670 return;
11671 }
11672
11673
11674 bp->cnic_eth_dev.max_fcoe_conn =
11675 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
11676 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
11677
11678
11679 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
11680
11681
11682 if (num_fcoe_func)
11683 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
11684
11685
11686 if (!IS_MF(bp)) {
11687
11688 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
11689 SHMEM_RD(bp,
11690 dev_info.port_hw_config[port].
11691 fcoe_wwn_port_name_upper);
11692 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
11693 SHMEM_RD(bp,
11694 dev_info.port_hw_config[port].
11695 fcoe_wwn_port_name_lower);
11696
11697
11698 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
11699 SHMEM_RD(bp,
11700 dev_info.port_hw_config[port].
11701 fcoe_wwn_node_name_upper);
11702 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
11703 SHMEM_RD(bp,
11704 dev_info.port_hw_config[port].
11705 fcoe_wwn_node_name_lower);
11706 } else if (!IS_MF_SD(bp)) {
11707
11708
11709
11710 if (BNX2X_HAS_MF_EXT_PROTOCOL_FCOE(bp))
11711 bnx2x_get_ext_wwn_info(bp, func);
11712 } else {
11713 if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
11714 bnx2x_get_ext_wwn_info(bp, func);
11715 }
11716
11717 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
11718
11719
11720
11721
11722
11723 if (!bp->cnic_eth_dev.max_fcoe_conn) {
11724 bp->flags |= NO_FCOE_FLAG;
11725 eth_zero_addr(bp->fip_mac);
11726 }
11727 }
11728
11729 static void bnx2x_get_cnic_info(struct bnx2x *bp)
11730 {
11731
11732
11733
11734
11735
11736 bnx2x_get_iscsi_info(bp);
11737 bnx2x_get_fcoe_info(bp);
11738 }
11739
11740 static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
11741 {
11742 u32 val, val2;
11743 int func = BP_ABS_FUNC(bp);
11744 int port = BP_PORT(bp);
11745 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
11746 u8 *fip_mac = bp->fip_mac;
11747
11748 if (IS_MF(bp)) {
11749
11750
11751
11752
11753
11754 if (!IS_MF_SD(bp)) {
11755 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
11756 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
11757 val2 = MF_CFG_RD(bp, func_ext_config[func].
11758 iscsi_mac_addr_upper);
11759 val = MF_CFG_RD(bp, func_ext_config[func].
11760 iscsi_mac_addr_lower);
11761 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11762 BNX2X_DEV_INFO
11763 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11764 } else {
11765 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11766 }
11767
11768 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
11769 val2 = MF_CFG_RD(bp, func_ext_config[func].
11770 fcoe_mac_addr_upper);
11771 val = MF_CFG_RD(bp, func_ext_config[func].
11772 fcoe_mac_addr_lower);
11773 bnx2x_set_mac_buf(fip_mac, val, val2);
11774 BNX2X_DEV_INFO
11775 ("Read FCoE L2 MAC: %pM\n", fip_mac);
11776 } else {
11777 bp->flags |= NO_FCOE_FLAG;
11778 }
11779
11780 bp->mf_ext_config = cfg;
11781
11782 } else {
11783 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
11784
11785 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
11786
11787 BNX2X_DEV_INFO("SD ISCSI MODE\n");
11788 BNX2X_DEV_INFO
11789 ("Read iSCSI MAC: %pM\n", iscsi_mac);
11790 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
11791
11792 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
11793 BNX2X_DEV_INFO("SD FCoE MODE\n");
11794 BNX2X_DEV_INFO
11795 ("Read FIP MAC: %pM\n", fip_mac);
11796 }
11797 }
11798
11799
11800
11801
11802
11803 if (IS_MF_FCOE_AFEX(bp))
11804 eth_hw_addr_set(bp->dev, fip_mac);
11805 } else {
11806 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11807 iscsi_mac_upper);
11808 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11809 iscsi_mac_lower);
11810 bnx2x_set_mac_buf(iscsi_mac, val, val2);
11811
11812 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
11813 fcoe_fip_mac_upper);
11814 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
11815 fcoe_fip_mac_lower);
11816 bnx2x_set_mac_buf(fip_mac, val, val2);
11817 }
11818
11819
11820 if (!is_valid_ether_addr(iscsi_mac)) {
11821 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
11822 eth_zero_addr(iscsi_mac);
11823 }
11824
11825
11826 if (!is_valid_ether_addr(fip_mac)) {
11827 bp->flags |= NO_FCOE_FLAG;
11828 eth_zero_addr(bp->fip_mac);
11829 }
11830 }
11831
11832 static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
11833 {
11834 u32 val, val2;
11835 int func = BP_ABS_FUNC(bp);
11836 int port = BP_PORT(bp);
11837 u8 addr[ETH_ALEN] = {};
11838
11839
11840 eth_hw_addr_set(bp->dev, addr);
11841
11842 if (BP_NOMCP(bp)) {
11843 BNX2X_ERROR("warning: random MAC workaround active\n");
11844 eth_hw_addr_random(bp->dev);
11845 } else if (IS_MF(bp)) {
11846 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11847 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11848 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11849 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
11850 bnx2x_set_mac_buf(addr, val, val2);
11851 eth_hw_addr_set(bp->dev, addr);
11852 }
11853
11854 if (CNIC_SUPPORT(bp))
11855 bnx2x_get_cnic_mac_hwinfo(bp);
11856 } else {
11857
11858 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11859 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11860 bnx2x_set_mac_buf(addr, val, val2);
11861 eth_hw_addr_set(bp->dev, addr);
11862
11863 if (CNIC_SUPPORT(bp))
11864 bnx2x_get_cnic_mac_hwinfo(bp);
11865 }
11866
11867 if (!BP_NOMCP(bp)) {
11868
11869 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11870 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11871 bnx2x_set_mac_buf(bp->phys_port_id, val, val2);
11872 bp->flags |= HAS_PHYS_PORT_ID;
11873 }
11874
11875 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
11876
11877 if (!is_valid_ether_addr(bp->dev->dev_addr))
11878 dev_err(&bp->pdev->dev,
11879 "bad Ethernet MAC address configuration: %pM\n"
11880 "change it manually before bringing up the appropriate network interface\n",
11881 bp->dev->dev_addr);
11882 }
11883
11884 static bool bnx2x_get_dropless_info(struct bnx2x *bp)
11885 {
11886 int tmp;
11887 u32 cfg;
11888
11889 if (IS_VF(bp))
11890 return false;
11891
11892 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11893
11894 tmp = BP_ABS_FUNC(bp);
11895 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11896 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11897 } else {
11898
11899 tmp = BP_PORT(bp);
11900 cfg = SHMEM_RD(bp,
11901 dev_info.port_hw_config[tmp].generic_features);
11902 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11903 }
11904 return cfg;
11905 }
11906
11907 static void validate_set_si_mode(struct bnx2x *bp)
11908 {
11909 u8 func = BP_ABS_FUNC(bp);
11910 u32 val;
11911
11912 val = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11913
11914
11915 if (val != 0xffff) {
11916 bp->mf_mode = MULTI_FUNCTION_SI;
11917 bp->mf_config[BP_VN(bp)] =
11918 MF_CFG_RD(bp, func_mf_config[func].config);
11919 } else
11920 BNX2X_DEV_INFO("illegal MAC address for SI\n");
11921 }
11922
11923 static int bnx2x_get_hwinfo(struct bnx2x *bp)
11924 {
11925 int func = BP_ABS_FUNC(bp);
11926 int vn;
11927 u32 val = 0, val2 = 0;
11928 int rc = 0;
11929
11930
11931 if (REG_RD(bp, MISC_REG_CHIP_NUM) == 0xffffffff) {
11932 dev_err(&bp->pdev->dev,
11933 "Chip read returns all Fs. Preventing probe from continuing\n");
11934 return -EINVAL;
11935 }
11936
11937 bnx2x_get_common_hwinfo(bp);
11938
11939
11940
11941
11942 if (CHIP_IS_E1x(bp)) {
11943 bp->common.int_block = INT_BLOCK_HC;
11944
11945 bp->igu_dsb_id = DEF_SB_IGU_ID;
11946 bp->igu_base_sb = 0;
11947 } else {
11948 bp->common.int_block = INT_BLOCK_IGU;
11949
11950
11951 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11952
11953 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
11954
11955 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11956 int tout = 5000;
11957
11958 BNX2X_DEV_INFO("FORCING Normal Mode\n");
11959
11960 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11961 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11962 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11963
11964 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11965 tout--;
11966 usleep_range(1000, 2000);
11967 }
11968
11969 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11970 dev_err(&bp->pdev->dev,
11971 "FORCING Normal Mode failed!!!\n");
11972 bnx2x_release_hw_lock(bp,
11973 HW_LOCK_RESOURCE_RESET);
11974 return -EPERM;
11975 }
11976 }
11977
11978 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11979 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
11980 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11981 } else
11982 BNX2X_DEV_INFO("IGU Normal Mode\n");
11983
11984 rc = bnx2x_get_igu_cam_info(bp);
11985 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11986 if (rc)
11987 return rc;
11988 }
11989
11990
11991
11992
11993
11994
11995 if (CHIP_IS_E1x(bp))
11996 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11997 else
11998
11999
12000
12001
12002 bp->base_fw_ndsb = bp->igu_base_sb;
12003
12004 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
12005 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
12006 bp->igu_sb_cnt, bp->base_fw_ndsb);
12007
12008
12009
12010
12011 bp->mf_ov = 0;
12012 bp->mf_mode = 0;
12013 bp->mf_sub_mode = 0;
12014 vn = BP_VN(bp);
12015
12016 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
12017 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
12018 bp->common.shmem2_base, SHMEM2_RD(bp, size),
12019 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
12020
12021 if (SHMEM2_HAS(bp, mf_cfg_addr))
12022 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
12023 else
12024 bp->common.mf_cfg_base = bp->common.shmem_base +
12025 offsetof(struct shmem_region, func_mb) +
12026 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
12027
12028
12029
12030
12031
12032
12033
12034
12035 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12036
12037 val = SHMEM_RD(bp,
12038 dev_info.shared_feature_config.config);
12039 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
12040
12041 switch (val) {
12042 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
12043 validate_set_si_mode(bp);
12044 break;
12045 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
12046 if ((!CHIP_IS_E1x(bp)) &&
12047 (MF_CFG_RD(bp, func_mf_config[func].
12048 mac_upper) != 0xffff) &&
12049 (SHMEM2_HAS(bp,
12050 afex_driver_support))) {
12051 bp->mf_mode = MULTI_FUNCTION_AFEX;
12052 bp->mf_config[vn] = MF_CFG_RD(bp,
12053 func_mf_config[func].config);
12054 } else {
12055 BNX2X_DEV_INFO("can not configure afex mode\n");
12056 }
12057 break;
12058 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
12059
12060 val = MF_CFG_RD(bp,
12061 func_mf_config[FUNC_0].e1hov_tag);
12062 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
12063
12064 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12065 bp->mf_mode = MULTI_FUNCTION_SD;
12066 bp->mf_config[vn] = MF_CFG_RD(bp,
12067 func_mf_config[func].config);
12068 } else
12069 BNX2X_DEV_INFO("illegal OV for SD\n");
12070 break;
12071 case SHARED_FEAT_CFG_FORCE_SF_MODE_BD_MODE:
12072 bp->mf_mode = MULTI_FUNCTION_SD;
12073 bp->mf_sub_mode = SUB_MF_MODE_BD;
12074 bp->mf_config[vn] =
12075 MF_CFG_RD(bp,
12076 func_mf_config[func].config);
12077
12078 if (SHMEM2_HAS(bp, mtu_size)) {
12079 int mtu_idx = BP_FW_MB_IDX(bp);
12080 u16 mtu_size;
12081 u32 mtu;
12082
12083 mtu = SHMEM2_RD(bp, mtu_size[mtu_idx]);
12084 mtu_size = (u16)mtu;
12085 DP(NETIF_MSG_IFUP, "Read MTU size %04x [%08x]\n",
12086 mtu_size, mtu);
12087
12088
12089 if ((mtu_size >= ETH_MIN_PACKET_SIZE) &&
12090 (mtu_size <=
12091 ETH_MAX_JUMBO_PACKET_SIZE))
12092 bp->dev->mtu = mtu_size;
12093 }
12094 break;
12095 case SHARED_FEAT_CFG_FORCE_SF_MODE_UFP_MODE:
12096 bp->mf_mode = MULTI_FUNCTION_SD;
12097 bp->mf_sub_mode = SUB_MF_MODE_UFP;
12098 bp->mf_config[vn] =
12099 MF_CFG_RD(bp,
12100 func_mf_config[func].config);
12101 break;
12102 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
12103 bp->mf_config[vn] = 0;
12104 break;
12105 case SHARED_FEAT_CFG_FORCE_SF_MODE_EXTENDED_MODE:
12106 val2 = SHMEM_RD(bp,
12107 dev_info.shared_hw_config.config_3);
12108 val2 &= SHARED_HW_CFG_EXTENDED_MF_MODE_MASK;
12109 switch (val2) {
12110 case SHARED_HW_CFG_EXTENDED_MF_MODE_NPAR1_DOT_5:
12111 validate_set_si_mode(bp);
12112 bp->mf_sub_mode =
12113 SUB_MF_MODE_NPAR1_DOT_5;
12114 break;
12115 default:
12116
12117 bp->mf_config[vn] = 0;
12118 BNX2X_DEV_INFO("unknown extended MF mode 0x%x\n",
12119 val);
12120 }
12121 break;
12122 default:
12123
12124 bp->mf_config[vn] = 0;
12125 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
12126 }
12127 }
12128
12129 BNX2X_DEV_INFO("%s function mode\n",
12130 IS_MF(bp) ? "multi" : "single");
12131
12132 switch (bp->mf_mode) {
12133 case MULTI_FUNCTION_SD:
12134 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
12135 FUNC_MF_CFG_E1HOV_TAG_MASK;
12136 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
12137 bp->mf_ov = val;
12138 bp->path_has_ovlan = true;
12139
12140 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
12141 func, bp->mf_ov, bp->mf_ov);
12142 } else if ((bp->mf_sub_mode == SUB_MF_MODE_UFP) ||
12143 (bp->mf_sub_mode == SUB_MF_MODE_BD)) {
12144 dev_err(&bp->pdev->dev,
12145 "Unexpected - no valid MF OV for func %d in UFP/BD mode\n",
12146 func);
12147 bp->path_has_ovlan = true;
12148 } else {
12149 dev_err(&bp->pdev->dev,
12150 "No valid MF OV for func %d, aborting\n",
12151 func);
12152 return -EPERM;
12153 }
12154 break;
12155 case MULTI_FUNCTION_AFEX:
12156 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
12157 break;
12158 case MULTI_FUNCTION_SI:
12159 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
12160 func);
12161 break;
12162 default:
12163 if (vn) {
12164 dev_err(&bp->pdev->dev,
12165 "VN %d is in a single function mode, aborting\n",
12166 vn);
12167 return -EPERM;
12168 }
12169 break;
12170 }
12171
12172
12173
12174
12175
12176
12177 if (CHIP_MODE_IS_4_PORT(bp) &&
12178 !bp->path_has_ovlan &&
12179 !IS_MF(bp) &&
12180 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
12181 u8 other_port = !BP_PORT(bp);
12182 u8 other_func = BP_PATH(bp) + 2*other_port;
12183 val = MF_CFG_RD(bp,
12184 func_mf_config[other_func].e1hov_tag);
12185 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
12186 bp->path_has_ovlan = true;
12187 }
12188 }
12189
12190
12191 if (CHIP_IS_E1H(bp) && IS_MF(bp))
12192 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
12193
12194
12195 bnx2x_get_port_hwinfo(bp);
12196
12197
12198 bnx2x_get_mac_hwinfo(bp);
12199
12200 bnx2x_get_cnic_info(bp);
12201
12202 return rc;
12203 }
12204
12205 static void bnx2x_read_fwinfo(struct bnx2x *bp)
12206 {
12207 char str_id[VENDOR_ID_LEN + 1];
12208 unsigned int vpd_len, kw_len;
12209 u8 *vpd_data;
12210 int rodi;
12211
12212 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
12213
12214 vpd_data = pci_vpd_alloc(bp->pdev, &vpd_len);
12215 if (IS_ERR(vpd_data))
12216 return;
12217
12218 rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
12219 PCI_VPD_RO_KEYWORD_MFR_ID, &kw_len);
12220 if (rodi < 0 || kw_len != VENDOR_ID_LEN)
12221 goto out_not_found;
12222
12223
12224 snprintf(str_id, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
12225 if (!strncasecmp(str_id, &vpd_data[rodi], VENDOR_ID_LEN)) {
12226 rodi = pci_vpd_find_ro_info_keyword(vpd_data, vpd_len,
12227 PCI_VPD_RO_KEYWORD_VENDOR0,
12228 &kw_len);
12229 if (rodi >= 0 && kw_len < sizeof(bp->fw_ver)) {
12230 memcpy(bp->fw_ver, &vpd_data[rodi], kw_len);
12231 bp->fw_ver[kw_len] = ' ';
12232 }
12233 }
12234 out_not_found:
12235 kfree(vpd_data);
12236 }
12237
12238 static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
12239 {
12240 u32 flags = 0;
12241
12242 if (CHIP_REV_IS_FPGA(bp))
12243 SET_FLAGS(flags, MODE_FPGA);
12244 else if (CHIP_REV_IS_EMUL(bp))
12245 SET_FLAGS(flags, MODE_EMUL);
12246 else
12247 SET_FLAGS(flags, MODE_ASIC);
12248
12249 if (CHIP_MODE_IS_4_PORT(bp))
12250 SET_FLAGS(flags, MODE_PORT4);
12251 else
12252 SET_FLAGS(flags, MODE_PORT2);
12253
12254 if (CHIP_IS_E2(bp))
12255 SET_FLAGS(flags, MODE_E2);
12256 else if (CHIP_IS_E3(bp)) {
12257 SET_FLAGS(flags, MODE_E3);
12258 if (CHIP_REV(bp) == CHIP_REV_Ax)
12259 SET_FLAGS(flags, MODE_E3_A0);
12260 else
12261 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
12262 }
12263
12264 if (IS_MF(bp)) {
12265 SET_FLAGS(flags, MODE_MF);
12266 switch (bp->mf_mode) {
12267 case MULTI_FUNCTION_SD:
12268 SET_FLAGS(flags, MODE_MF_SD);
12269 break;
12270 case MULTI_FUNCTION_SI:
12271 SET_FLAGS(flags, MODE_MF_SI);
12272 break;
12273 case MULTI_FUNCTION_AFEX:
12274 SET_FLAGS(flags, MODE_MF_AFEX);
12275 break;
12276 }
12277 } else
12278 SET_FLAGS(flags, MODE_SF);
12279
12280 #if defined(__LITTLE_ENDIAN)
12281 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
12282 #else
12283 SET_FLAGS(flags, MODE_BIG_ENDIAN);
12284 #endif
12285 INIT_MODE_FLAGS(bp) = flags;
12286 }
12287
12288 static int bnx2x_init_bp(struct bnx2x *bp)
12289 {
12290 int func;
12291 int rc;
12292
12293 mutex_init(&bp->port.phy_mutex);
12294 mutex_init(&bp->fw_mb_mutex);
12295 mutex_init(&bp->drv_info_mutex);
12296 sema_init(&bp->stats_lock, 1);
12297 bp->drv_info_mng_owner = false;
12298 INIT_LIST_HEAD(&bp->vlan_reg);
12299
12300 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
12301 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
12302 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
12303 INIT_DELAYED_WORK(&bp->iov_task, bnx2x_iov_task);
12304 if (IS_PF(bp)) {
12305 rc = bnx2x_get_hwinfo(bp);
12306 if (rc)
12307 return rc;
12308 } else {
12309 static const u8 zero_addr[ETH_ALEN] = {};
12310
12311 eth_hw_addr_set(bp->dev, zero_addr);
12312 }
12313
12314 bnx2x_set_modes_bitmap(bp);
12315
12316 rc = bnx2x_alloc_mem_bp(bp);
12317 if (rc)
12318 return rc;
12319
12320 bnx2x_read_fwinfo(bp);
12321
12322 func = BP_FUNC(bp);
12323
12324
12325 if (IS_PF(bp) && !BP_NOMCP(bp)) {
12326
12327 bp->fw_seq =
12328 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12329 DRV_MSG_SEQ_NUMBER_MASK;
12330 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12331
12332 rc = bnx2x_prev_unload(bp);
12333 if (rc) {
12334 bnx2x_free_mem_bp(bp);
12335 return rc;
12336 }
12337 }
12338
12339 if (CHIP_REV_IS_FPGA(bp))
12340 dev_err(&bp->pdev->dev, "FPGA detected\n");
12341
12342 if (BP_NOMCP(bp) && (func == 0))
12343 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
12344
12345 bp->disable_tpa = disable_tpa;
12346 bp->disable_tpa |= !!IS_MF_STORAGE_ONLY(bp);
12347
12348 bp->disable_tpa |= is_kdump_kernel();
12349
12350
12351 if (bp->disable_tpa) {
12352 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12353 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
12354 }
12355
12356 if (CHIP_IS_E1(bp))
12357 bp->dropless_fc = false;
12358 else
12359 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
12360
12361 bp->mrrs = mrrs;
12362
12363 bp->tx_ring_size = IS_MF_STORAGE_ONLY(bp) ? 0 : MAX_TX_AVAIL;
12364 if (IS_VF(bp))
12365 bp->rx_ring_size = MAX_RX_AVAIL;
12366
12367
12368 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
12369 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
12370
12371 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
12372
12373 timer_setup(&bp->timer, bnx2x_timer, 0);
12374 bp->timer.expires = jiffies + bp->current_interval;
12375
12376 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
12377 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
12378 SHMEM2_HAS(bp, dcbx_en) &&
12379 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
12380 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset) &&
12381 SHMEM2_RD(bp, dcbx_en[BP_PORT(bp)])) {
12382 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
12383 bnx2x_dcbx_init_params(bp);
12384 } else {
12385 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
12386 }
12387
12388 if (CHIP_IS_E1x(bp))
12389 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
12390 else
12391 bp->cnic_base_cl_id = FP_SB_MAX_E2;
12392
12393
12394 if (IS_VF(bp))
12395 bp->max_cos = 1;
12396 else if (CHIP_IS_E1x(bp))
12397 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
12398 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
12399 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
12400 else if (CHIP_IS_E3B0(bp))
12401 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
12402 else
12403 BNX2X_ERR("unknown chip %x revision %x\n",
12404 CHIP_NUM(bp), CHIP_REV(bp));
12405 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
12406
12407
12408
12409
12410
12411 if (IS_VF(bp))
12412 bp->min_msix_vec_cnt = 1;
12413 else if (CNIC_SUPPORT(bp))
12414 bp->min_msix_vec_cnt = 3;
12415 else
12416 bp->min_msix_vec_cnt = 2;
12417 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
12418
12419 bp->dump_preset_idx = 1;
12420
12421 return rc;
12422 }
12423
12424
12425
12426
12427
12428
12429
12430
12431
12432
12433 static int bnx2x_open(struct net_device *dev)
12434 {
12435 struct bnx2x *bp = netdev_priv(dev);
12436 int rc;
12437
12438 bp->stats_init = true;
12439
12440 netif_carrier_off(dev);
12441
12442 bnx2x_set_power_state(bp, PCI_D0);
12443
12444
12445
12446
12447
12448
12449
12450 if (IS_PF(bp)) {
12451 int other_engine = BP_PATH(bp) ? 0 : 1;
12452 bool other_load_status, load_status;
12453 bool global = false;
12454
12455 other_load_status = bnx2x_get_load_status(bp, other_engine);
12456 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
12457 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
12458 bnx2x_chk_parity_attn(bp, &global, true)) {
12459 do {
12460
12461
12462
12463
12464
12465 if (global)
12466 bnx2x_set_reset_global(bp);
12467
12468
12469
12470
12471
12472
12473 if ((!load_status &&
12474 (!global || !other_load_status)) &&
12475 bnx2x_trylock_leader_lock(bp) &&
12476 !bnx2x_leader_reset(bp)) {
12477 netdev_info(bp->dev,
12478 "Recovered in open\n");
12479 break;
12480 }
12481
12482
12483 bnx2x_set_power_state(bp, PCI_D3hot);
12484 bp->recovery_state = BNX2X_RECOVERY_FAILED;
12485
12486 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
12487 "If you still see this message after a few retries then power cycle is required.\n");
12488
12489 return -EAGAIN;
12490 } while (0);
12491 }
12492 }
12493
12494 bp->recovery_state = BNX2X_RECOVERY_DONE;
12495 rc = bnx2x_nic_load(bp, LOAD_OPEN);
12496 if (rc)
12497 return rc;
12498
12499 return 0;
12500 }
12501
12502
12503 static int bnx2x_close(struct net_device *dev)
12504 {
12505 struct bnx2x *bp = netdev_priv(dev);
12506
12507
12508 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
12509
12510 return 0;
12511 }
12512
12513 struct bnx2x_mcast_list_elem_group
12514 {
12515 struct list_head mcast_group_link;
12516 struct bnx2x_mcast_list_elem mcast_elems[];
12517 };
12518
12519 #define MCAST_ELEMS_PER_PG \
12520 ((PAGE_SIZE - sizeof(struct bnx2x_mcast_list_elem_group)) / \
12521 sizeof(struct bnx2x_mcast_list_elem))
12522
12523 static void bnx2x_free_mcast_macs_list(struct list_head *mcast_group_list)
12524 {
12525 struct bnx2x_mcast_list_elem_group *current_mcast_group;
12526
12527 while (!list_empty(mcast_group_list)) {
12528 current_mcast_group = list_first_entry(mcast_group_list,
12529 struct bnx2x_mcast_list_elem_group,
12530 mcast_group_link);
12531 list_del(¤t_mcast_group->mcast_group_link);
12532 free_page((unsigned long)current_mcast_group);
12533 }
12534 }
12535
12536 static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
12537 struct bnx2x_mcast_ramrod_params *p,
12538 struct list_head *mcast_group_list)
12539 {
12540 struct bnx2x_mcast_list_elem *mc_mac;
12541 struct netdev_hw_addr *ha;
12542 struct bnx2x_mcast_list_elem_group *current_mcast_group = NULL;
12543 int mc_count = netdev_mc_count(bp->dev);
12544 int offset = 0;
12545
12546 INIT_LIST_HEAD(&p->mcast_list);
12547 netdev_for_each_mc_addr(ha, bp->dev) {
12548 if (!offset) {
12549 current_mcast_group =
12550 (struct bnx2x_mcast_list_elem_group *)
12551 __get_free_page(GFP_ATOMIC);
12552 if (!current_mcast_group) {
12553 bnx2x_free_mcast_macs_list(mcast_group_list);
12554 BNX2X_ERR("Failed to allocate mc MAC list\n");
12555 return -ENOMEM;
12556 }
12557 list_add(¤t_mcast_group->mcast_group_link,
12558 mcast_group_list);
12559 }
12560 mc_mac = ¤t_mcast_group->mcast_elems[offset];
12561 mc_mac->mac = bnx2x_mc_addr(ha);
12562 list_add_tail(&mc_mac->link, &p->mcast_list);
12563 offset++;
12564 if (offset == MCAST_ELEMS_PER_PG)
12565 offset = 0;
12566 }
12567 p->mcast_list_len = mc_count;
12568 return 0;
12569 }
12570
12571
12572
12573
12574
12575
12576
12577
12578 static int bnx2x_set_uc_list(struct bnx2x *bp)
12579 {
12580 int rc;
12581 struct net_device *dev = bp->dev;
12582 struct netdev_hw_addr *ha;
12583 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
12584 unsigned long ramrod_flags = 0;
12585
12586
12587 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
12588 if (rc < 0) {
12589 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
12590 return rc;
12591 }
12592
12593 netdev_for_each_uc_addr(ha, dev) {
12594 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
12595 BNX2X_UC_LIST_MAC, &ramrod_flags);
12596 if (rc == -EEXIST) {
12597 DP(BNX2X_MSG_SP,
12598 "Failed to schedule ADD operations: %d\n", rc);
12599
12600 rc = 0;
12601
12602 } else if (rc < 0) {
12603
12604 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
12605 rc);
12606 return rc;
12607 }
12608 }
12609
12610
12611 __set_bit(RAMROD_CONT, &ramrod_flags);
12612 return bnx2x_set_mac_one(bp, NULL, mac_obj, false ,
12613 BNX2X_UC_LIST_MAC, &ramrod_flags);
12614 }
12615
12616 static int bnx2x_set_mc_list_e1x(struct bnx2x *bp)
12617 {
12618 LIST_HEAD(mcast_group_list);
12619 struct net_device *dev = bp->dev;
12620 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12621 int rc = 0;
12622
12623 rparam.mcast_obj = &bp->mcast_obj;
12624
12625
12626 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12627 if (rc < 0) {
12628 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
12629 return rc;
12630 }
12631
12632
12633 if (netdev_mc_count(dev)) {
12634 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12635 if (rc)
12636 return rc;
12637
12638
12639 rc = bnx2x_config_mcast(bp, &rparam,
12640 BNX2X_MCAST_CMD_ADD);
12641 if (rc < 0)
12642 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12643 rc);
12644
12645 bnx2x_free_mcast_macs_list(&mcast_group_list);
12646 }
12647
12648 return rc;
12649 }
12650
12651 static int bnx2x_set_mc_list(struct bnx2x *bp)
12652 {
12653 LIST_HEAD(mcast_group_list);
12654 struct bnx2x_mcast_ramrod_params rparam = {NULL};
12655 struct net_device *dev = bp->dev;
12656 int rc = 0;
12657
12658
12659 if (CHIP_IS_E1x(bp))
12660 return bnx2x_set_mc_list_e1x(bp);
12661
12662 rparam.mcast_obj = &bp->mcast_obj;
12663
12664 if (netdev_mc_count(dev)) {
12665 rc = bnx2x_init_mcast_macs_list(bp, &rparam, &mcast_group_list);
12666 if (rc)
12667 return rc;
12668
12669
12670 rc = bnx2x_config_mcast(bp, &rparam,
12671 BNX2X_MCAST_CMD_SET);
12672 if (rc < 0)
12673 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
12674 rc);
12675
12676 bnx2x_free_mcast_macs_list(&mcast_group_list);
12677 } else {
12678
12679 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
12680 if (rc < 0)
12681 BNX2X_ERR("Failed to clear multicast configuration %d\n",
12682 rc);
12683 }
12684
12685 return rc;
12686 }
12687
12688
12689 static void bnx2x_set_rx_mode(struct net_device *dev)
12690 {
12691 struct bnx2x *bp = netdev_priv(dev);
12692
12693 if (bp->state != BNX2X_STATE_OPEN) {
12694 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12695 return;
12696 } else {
12697
12698 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
12699 NETIF_MSG_IFUP);
12700 }
12701 }
12702
12703 void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
12704 {
12705 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12706
12707 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
12708
12709 netif_addr_lock_bh(bp->dev);
12710
12711 if (bp->dev->flags & IFF_PROMISC) {
12712 rx_mode = BNX2X_RX_MODE_PROMISC;
12713 } else if ((bp->dev->flags & IFF_ALLMULTI) ||
12714 ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
12715 CHIP_IS_E1(bp))) {
12716 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12717 } else {
12718 if (IS_PF(bp)) {
12719
12720 if (bnx2x_set_mc_list(bp) < 0)
12721 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12722
12723
12724 netif_addr_unlock_bh(bp->dev);
12725 if (bnx2x_set_uc_list(bp) < 0)
12726 rx_mode = BNX2X_RX_MODE_PROMISC;
12727 netif_addr_lock_bh(bp->dev);
12728 } else {
12729
12730
12731
12732 bnx2x_schedule_sp_rtnl(bp,
12733 BNX2X_SP_RTNL_VFPF_MCAST, 0);
12734 }
12735 }
12736
12737 bp->rx_mode = rx_mode;
12738
12739 if (IS_MF_ISCSI_ONLY(bp))
12740 bp->rx_mode = BNX2X_RX_MODE_NONE;
12741
12742
12743 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
12744 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
12745 netif_addr_unlock_bh(bp->dev);
12746 return;
12747 }
12748
12749 if (IS_PF(bp)) {
12750 bnx2x_set_storm_rx_mode(bp);
12751 netif_addr_unlock_bh(bp->dev);
12752 } else {
12753
12754
12755
12756
12757 netif_addr_unlock_bh(bp->dev);
12758 bnx2x_vfpf_storm_rx_mode(bp);
12759 }
12760 }
12761
12762
12763 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12764 int devad, u16 addr)
12765 {
12766 struct bnx2x *bp = netdev_priv(netdev);
12767 u16 value;
12768 int rc;
12769
12770 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12771 prtad, devad, addr);
12772
12773
12774 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12775
12776 bnx2x_acquire_phy_lock(bp);
12777 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
12778 bnx2x_release_phy_lock(bp);
12779 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12780
12781 if (!rc)
12782 rc = value;
12783 return rc;
12784 }
12785
12786
12787 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12788 u16 addr, u16 value)
12789 {
12790 struct bnx2x *bp = netdev_priv(netdev);
12791 int rc;
12792
12793 DP(NETIF_MSG_LINK,
12794 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
12795 prtad, devad, addr, value);
12796
12797
12798 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12799
12800 bnx2x_acquire_phy_lock(bp);
12801 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
12802 bnx2x_release_phy_lock(bp);
12803 return rc;
12804 }
12805
12806
12807 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12808 {
12809 struct bnx2x *bp = netdev_priv(dev);
12810 struct mii_ioctl_data *mdio = if_mii(ifr);
12811
12812 if (!netif_running(dev))
12813 return -EAGAIN;
12814
12815 switch (cmd) {
12816 case SIOCSHWTSTAMP:
12817 return bnx2x_hwtstamp_ioctl(bp, ifr);
12818 default:
12819 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12820 mdio->phy_id, mdio->reg_num, mdio->val_in);
12821 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12822 }
12823 }
12824
12825 static int bnx2x_validate_addr(struct net_device *dev)
12826 {
12827 struct bnx2x *bp = netdev_priv(dev);
12828
12829
12830 if (IS_VF(bp))
12831 bnx2x_sample_bulletin(bp);
12832
12833 if (!is_valid_ether_addr(dev->dev_addr)) {
12834 BNX2X_ERR("Non-valid Ethernet address\n");
12835 return -EADDRNOTAVAIL;
12836 }
12837 return 0;
12838 }
12839
12840 static int bnx2x_get_phys_port_id(struct net_device *netdev,
12841 struct netdev_phys_item_id *ppid)
12842 {
12843 struct bnx2x *bp = netdev_priv(netdev);
12844
12845 if (!(bp->flags & HAS_PHYS_PORT_ID))
12846 return -EOPNOTSUPP;
12847
12848 ppid->id_len = sizeof(bp->phys_port_id);
12849 memcpy(ppid->id, bp->phys_port_id, ppid->id_len);
12850
12851 return 0;
12852 }
12853
12854 static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
12855 struct net_device *dev,
12856 netdev_features_t features)
12857 {
12858
12859
12860
12861
12862
12863
12864
12865
12866
12867
12868
12869
12870
12871 if (unlikely(skb_is_gso(skb) &&
12872 (skb_shinfo(skb)->gso_size > 9000) &&
12873 !skb_gso_validate_mac_len(skb, 9700)))
12874 features &= ~NETIF_F_GSO_MASK;
12875
12876 features = vlan_features_check(skb, features);
12877 return vxlan_features_check(skb, features);
12878 }
12879
12880 static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
12881 {
12882 int rc;
12883
12884 if (IS_PF(bp)) {
12885 unsigned long ramrod_flags = 0;
12886
12887 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
12888 rc = bnx2x_set_vlan_one(bp, vid, &bp->sp_objs->vlan_obj,
12889 add, &ramrod_flags);
12890 } else {
12891 rc = bnx2x_vfpf_update_vlan(bp, vid, bp->fp->index, add);
12892 }
12893
12894 return rc;
12895 }
12896
12897 static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
12898 {
12899 struct bnx2x_vlan_entry *vlan;
12900 int rc = 0;
12901
12902
12903 list_for_each_entry(vlan, &bp->vlan_reg, link) {
12904 if (vlan->hw)
12905 continue;
12906
12907 if (bp->vlan_cnt >= bp->vlan_credit)
12908 return -ENOBUFS;
12909
12910 rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
12911 if (rc) {
12912 BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
12913 return rc;
12914 }
12915
12916 DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
12917 vlan->hw = true;
12918 bp->vlan_cnt++;
12919 }
12920
12921 return 0;
12922 }
12923
12924 static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
12925 {
12926 bool need_accept_any_vlan;
12927
12928 need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
12929
12930 if (bp->accept_any_vlan != need_accept_any_vlan) {
12931 bp->accept_any_vlan = need_accept_any_vlan;
12932 DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
12933 bp->accept_any_vlan ? "raised" : "cleared");
12934 if (set_rx_mode) {
12935 if (IS_PF(bp))
12936 bnx2x_set_rx_mode_inner(bp);
12937 else
12938 bnx2x_vfpf_storm_rx_mode(bp);
12939 }
12940 }
12941 }
12942
12943 int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
12944 {
12945
12946 bnx2x_vlan_configure(bp, false);
12947
12948 return 0;
12949 }
12950
12951 static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
12952 {
12953 struct bnx2x *bp = netdev_priv(dev);
12954 struct bnx2x_vlan_entry *vlan;
12955
12956 DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
12957
12958 vlan = kmalloc(sizeof(*vlan), GFP_KERNEL);
12959 if (!vlan)
12960 return -ENOMEM;
12961
12962 vlan->vid = vid;
12963 vlan->hw = false;
12964 list_add_tail(&vlan->link, &bp->vlan_reg);
12965
12966 if (netif_running(dev))
12967 bnx2x_vlan_configure(bp, true);
12968
12969 return 0;
12970 }
12971
12972 static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
12973 {
12974 struct bnx2x *bp = netdev_priv(dev);
12975 struct bnx2x_vlan_entry *vlan;
12976 bool found = false;
12977 int rc = 0;
12978
12979 DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
12980
12981 list_for_each_entry(vlan, &bp->vlan_reg, link)
12982 if (vlan->vid == vid) {
12983 found = true;
12984 break;
12985 }
12986
12987 if (!found) {
12988 BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
12989 return -EINVAL;
12990 }
12991
12992 if (netif_running(dev) && vlan->hw) {
12993 rc = __bnx2x_vlan_configure_vid(bp, vid, false);
12994 DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
12995 bp->vlan_cnt--;
12996 }
12997
12998 list_del(&vlan->link);
12999 kfree(vlan);
13000
13001 if (netif_running(dev))
13002 bnx2x_vlan_configure(bp, true);
13003
13004 DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
13005
13006 return rc;
13007 }
13008
13009 static const struct net_device_ops bnx2x_netdev_ops = {
13010 .ndo_open = bnx2x_open,
13011 .ndo_stop = bnx2x_close,
13012 .ndo_start_xmit = bnx2x_start_xmit,
13013 .ndo_select_queue = bnx2x_select_queue,
13014 .ndo_set_rx_mode = bnx2x_set_rx_mode,
13015 .ndo_set_mac_address = bnx2x_change_mac_addr,
13016 .ndo_validate_addr = bnx2x_validate_addr,
13017 .ndo_eth_ioctl = bnx2x_ioctl,
13018 .ndo_change_mtu = bnx2x_change_mtu,
13019 .ndo_fix_features = bnx2x_fix_features,
13020 .ndo_set_features = bnx2x_set_features,
13021 .ndo_tx_timeout = bnx2x_tx_timeout,
13022 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
13023 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
13024 .ndo_setup_tc = __bnx2x_setup_tc,
13025 #ifdef CONFIG_BNX2X_SRIOV
13026 .ndo_set_vf_mac = bnx2x_set_vf_mac,
13027 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
13028 .ndo_get_vf_config = bnx2x_get_vf_config,
13029 .ndo_set_vf_spoofchk = bnx2x_set_vf_spoofchk,
13030 #endif
13031 #ifdef NETDEV_FCOE_WWNN
13032 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
13033 #endif
13034
13035 .ndo_get_phys_port_id = bnx2x_get_phys_port_id,
13036 .ndo_set_vf_link_state = bnx2x_set_vf_link_state,
13037 .ndo_features_check = bnx2x_features_check,
13038 };
13039
13040 static void bnx2x_disable_pcie_error_reporting(struct bnx2x *bp)
13041 {
13042 if (bp->flags & AER_ENABLED) {
13043 pci_disable_pcie_error_reporting(bp->pdev);
13044 bp->flags &= ~AER_ENABLED;
13045 }
13046 }
13047
13048 static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
13049 struct net_device *dev, unsigned long board_type)
13050 {
13051 int rc;
13052 u32 pci_cfg_dword;
13053 bool chip_is_e1x = (board_type == BCM57710 ||
13054 board_type == BCM57711 ||
13055 board_type == BCM57711E);
13056
13057 SET_NETDEV_DEV(dev, &pdev->dev);
13058
13059 bp->dev = dev;
13060 bp->pdev = pdev;
13061
13062 rc = pci_enable_device(pdev);
13063 if (rc) {
13064 dev_err(&bp->pdev->dev,
13065 "Cannot enable PCI device, aborting\n");
13066 goto err_out;
13067 }
13068
13069 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
13070 dev_err(&bp->pdev->dev,
13071 "Cannot find PCI device base address, aborting\n");
13072 rc = -ENODEV;
13073 goto err_out_disable;
13074 }
13075
13076 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
13077 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
13078 rc = -ENODEV;
13079 goto err_out_disable;
13080 }
13081
13082 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
13083 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
13084 PCICFG_REVESION_ID_ERROR_VAL) {
13085 pr_err("PCI device error, probably due to fan failure, aborting\n");
13086 rc = -ENODEV;
13087 goto err_out_disable;
13088 }
13089
13090 if (atomic_read(&pdev->enable_cnt) == 1) {
13091 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
13092 if (rc) {
13093 dev_err(&bp->pdev->dev,
13094 "Cannot obtain PCI resources, aborting\n");
13095 goto err_out_disable;
13096 }
13097
13098 pci_set_master(pdev);
13099 pci_save_state(pdev);
13100 }
13101
13102 if (IS_PF(bp)) {
13103 if (!pdev->pm_cap) {
13104 dev_err(&bp->pdev->dev,
13105 "Cannot find power management capability, aborting\n");
13106 rc = -EIO;
13107 goto err_out_release;
13108 }
13109 }
13110
13111 if (!pci_is_pcie(pdev)) {
13112 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
13113 rc = -EIO;
13114 goto err_out_release;
13115 }
13116
13117 rc = dma_set_mask_and_coherent(&bp->pdev->dev, DMA_BIT_MASK(64));
13118 if (rc) {
13119 dev_err(&bp->pdev->dev, "System does not support DMA, aborting\n");
13120 goto err_out_release;
13121 }
13122
13123 dev->mem_start = pci_resource_start(pdev, 0);
13124 dev->base_addr = dev->mem_start;
13125 dev->mem_end = pci_resource_end(pdev, 0);
13126
13127 dev->irq = pdev->irq;
13128
13129 bp->regview = pci_ioremap_bar(pdev, 0);
13130 if (!bp->regview) {
13131 dev_err(&bp->pdev->dev,
13132 "Cannot map register space, aborting\n");
13133 rc = -ENOMEM;
13134 goto err_out_release;
13135 }
13136
13137
13138
13139
13140
13141
13142 if (chip_is_e1x) {
13143 bp->pf_num = PCI_FUNC(pdev->devfn);
13144 } else {
13145
13146 pci_read_config_dword(bp->pdev,
13147 PCICFG_ME_REGISTER, &pci_cfg_dword);
13148 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
13149 ME_REG_ABS_PF_NUM_SHIFT);
13150 }
13151 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
13152
13153
13154 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
13155 PCICFG_VENDOR_ID_OFFSET);
13156
13157
13158 pdev->needs_freset = 1;
13159
13160
13161 rc = pci_enable_pcie_error_reporting(pdev);
13162 if (!rc)
13163 bp->flags |= AER_ENABLED;
13164 else
13165 BNX2X_DEV_INFO("Failed To configure PCIe AER [%d]\n", rc);
13166
13167
13168
13169
13170
13171 if (IS_PF(bp)) {
13172 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
13173 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
13174 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
13175 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
13176
13177 if (chip_is_e1x) {
13178 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
13179 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
13180 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
13181 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
13182 }
13183
13184
13185
13186
13187
13188 if (!chip_is_e1x)
13189 REG_WR(bp,
13190 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
13191 }
13192
13193 dev->watchdog_timeo = TX_TIMEOUT;
13194
13195 dev->netdev_ops = &bnx2x_netdev_ops;
13196 bnx2x_set_ethtool_ops(bp, dev);
13197
13198 dev->priv_flags |= IFF_UNICAST_FLT;
13199
13200 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13201 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13202 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | NETIF_F_GRO_HW |
13203 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
13204 if (!chip_is_e1x) {
13205 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13206 NETIF_F_GSO_IPXIP4 |
13207 NETIF_F_GSO_UDP_TUNNEL |
13208 NETIF_F_GSO_UDP_TUNNEL_CSUM |
13209 NETIF_F_GSO_PARTIAL;
13210
13211 dev->hw_enc_features =
13212 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
13213 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
13214 NETIF_F_GSO_IPXIP4 |
13215 NETIF_F_GSO_GRE | NETIF_F_GSO_GRE_CSUM |
13216 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM |
13217 NETIF_F_GSO_PARTIAL;
13218
13219 dev->gso_partial_features = NETIF_F_GSO_GRE_CSUM |
13220 NETIF_F_GSO_UDP_TUNNEL_CSUM;
13221
13222 if (IS_PF(bp))
13223 dev->udp_tunnel_nic_info = &bnx2x_udp_tunnels;
13224 }
13225
13226 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
13227 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
13228
13229 if (IS_PF(bp)) {
13230 if (chip_is_e1x)
13231 bp->accept_any_vlan = true;
13232 else
13233 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13234 }
13235
13236
13237
13238
13239 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
13240 dev->features |= NETIF_F_HIGHDMA;
13241 if (dev->features & NETIF_F_LRO)
13242 dev->features &= ~NETIF_F_GRO_HW;
13243
13244
13245 dev->hw_features |= NETIF_F_LOOPBACK;
13246
13247 #ifdef BCM_DCBNL
13248 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
13249 #endif
13250
13251
13252 dev->min_mtu = ETH_MIN_PACKET_SIZE;
13253 dev->max_mtu = ETH_MAX_JUMBO_PACKET_SIZE;
13254
13255
13256 bp->mdio.prtad = MDIO_PRTAD_NONE;
13257 bp->mdio.mmds = 0;
13258 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13259 bp->mdio.dev = dev;
13260 bp->mdio.mdio_read = bnx2x_mdio_read;
13261 bp->mdio.mdio_write = bnx2x_mdio_write;
13262
13263 return 0;
13264
13265 err_out_release:
13266 if (atomic_read(&pdev->enable_cnt) == 1)
13267 pci_release_regions(pdev);
13268
13269 err_out_disable:
13270 pci_disable_device(pdev);
13271
13272 err_out:
13273 return rc;
13274 }
13275
13276 static int bnx2x_check_firmware(struct bnx2x *bp)
13277 {
13278 const struct firmware *firmware = bp->firmware;
13279 struct bnx2x_fw_file_hdr *fw_hdr;
13280 struct bnx2x_fw_file_section *sections;
13281 u32 offset, len, num_ops;
13282 __be16 *ops_offsets;
13283 int i;
13284 const u8 *fw_ver;
13285
13286 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
13287 BNX2X_ERR("Wrong FW size\n");
13288 return -EINVAL;
13289 }
13290
13291 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13292 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13293
13294
13295
13296 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13297 offset = be32_to_cpu(sections[i].offset);
13298 len = be32_to_cpu(sections[i].len);
13299 if (offset + len > firmware->size) {
13300 BNX2X_ERR("Section %d length is out of bounds\n", i);
13301 return -EINVAL;
13302 }
13303 }
13304
13305
13306 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13307 ops_offsets = (__force __be16 *)(firmware->data + offset);
13308 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13309
13310 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13311 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13312 BNX2X_ERR("Section offset %d is out of bounds\n", i);
13313 return -EINVAL;
13314 }
13315 }
13316
13317
13318 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13319 fw_ver = firmware->data + offset;
13320 if (fw_ver[0] != bp->fw_major || fw_ver[1] != bp->fw_minor ||
13321 fw_ver[2] != bp->fw_rev || fw_ver[3] != bp->fw_eng) {
13322 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13323 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
13324 bp->fw_major, bp->fw_minor, bp->fw_rev, bp->fw_eng);
13325 return -EINVAL;
13326 }
13327
13328 return 0;
13329 }
13330
13331 static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13332 {
13333 const __be32 *source = (const __be32 *)_source;
13334 u32 *target = (u32 *)_target;
13335 u32 i;
13336
13337 for (i = 0; i < n/4; i++)
13338 target[i] = be32_to_cpu(source[i]);
13339 }
13340
13341
13342
13343
13344
13345 static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13346 {
13347 const __be32 *source = (const __be32 *)_source;
13348 struct raw_op *target = (struct raw_op *)_target;
13349 u32 i, j, tmp;
13350
13351 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13352 tmp = be32_to_cpu(source[j]);
13353 target[i].op = (tmp >> 24) & 0xff;
13354 target[i].offset = tmp & 0xffffff;
13355 target[i].raw_data = be32_to_cpu(source[j + 1]);
13356 }
13357 }
13358
13359
13360
13361
13362 static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
13363 {
13364 const __be32 *source = (const __be32 *)_source;
13365 struct iro *target = (struct iro *)_target;
13366 u32 i, j, tmp;
13367
13368 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
13369 target[i].base = be32_to_cpu(source[j]);
13370 j++;
13371 tmp = be32_to_cpu(source[j]);
13372 target[i].m1 = (tmp >> 16) & 0xffff;
13373 target[i].m2 = tmp & 0xffff;
13374 j++;
13375 tmp = be32_to_cpu(source[j]);
13376 target[i].m3 = (tmp >> 16) & 0xffff;
13377 target[i].size = tmp & 0xffff;
13378 j++;
13379 }
13380 }
13381
13382 static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13383 {
13384 const __be16 *source = (const __be16 *)_source;
13385 u16 *target = (u16 *)_target;
13386 u32 i;
13387
13388 for (i = 0; i < n/2; i++)
13389 target[i] = be16_to_cpu(source[i]);
13390 }
13391
13392 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13393 do { \
13394 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13395 bp->arr = kmalloc(len, GFP_KERNEL); \
13396 if (!bp->arr) \
13397 goto lbl; \
13398 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13399 (u8 *)bp->arr, len); \
13400 } while (0)
13401
13402 static int bnx2x_init_firmware(struct bnx2x *bp)
13403 {
13404 const char *fw_file_name, *fw_file_name_v15;
13405 struct bnx2x_fw_file_hdr *fw_hdr;
13406 int rc;
13407
13408 if (bp->firmware)
13409 return 0;
13410
13411 if (CHIP_IS_E1(bp)) {
13412 fw_file_name = FW_FILE_NAME_E1;
13413 fw_file_name_v15 = FW_FILE_NAME_E1_V15;
13414 } else if (CHIP_IS_E1H(bp)) {
13415 fw_file_name = FW_FILE_NAME_E1H;
13416 fw_file_name_v15 = FW_FILE_NAME_E1H_V15;
13417 } else if (!CHIP_IS_E1x(bp)) {
13418 fw_file_name = FW_FILE_NAME_E2;
13419 fw_file_name_v15 = FW_FILE_NAME_E2_V15;
13420 } else {
13421 BNX2X_ERR("Unsupported chip revision\n");
13422 return -EINVAL;
13423 }
13424
13425 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
13426
13427 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
13428 if (rc) {
13429 BNX2X_DEV_INFO("Trying to load older fw %s\n", fw_file_name_v15);
13430
13431
13432 rc = request_firmware(&bp->firmware, fw_file_name_v15, &bp->pdev->dev);
13433
13434 if (rc)
13435 goto request_firmware_exit;
13436
13437 bp->fw_rev = BCM_5710_FW_REVISION_VERSION_V15;
13438 } else {
13439 bp->fw_cap |= FW_CAP_INVALIDATE_VF_FP_HSI;
13440 bp->fw_rev = BCM_5710_FW_REVISION_VERSION;
13441 }
13442
13443 bp->fw_major = BCM_5710_FW_MAJOR_VERSION;
13444 bp->fw_minor = BCM_5710_FW_MINOR_VERSION;
13445 bp->fw_eng = BCM_5710_FW_ENGINEERING_VERSION;
13446
13447 rc = bnx2x_check_firmware(bp);
13448 if (rc) {
13449 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
13450 goto request_firmware_exit;
13451 }
13452
13453 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13454
13455
13456
13457 rc = -ENOMEM;
13458 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13459
13460
13461 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13462
13463
13464 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13465 be16_to_cpu_n);
13466
13467
13468 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13469 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13470 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13471 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13472 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13473 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13474 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13475 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13476 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13477 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13478 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13479 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13480 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13481 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13482 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13483 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13484
13485 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
13486
13487 return 0;
13488
13489 iro_alloc_err:
13490 kfree(bp->init_ops_offsets);
13491 init_offsets_alloc_err:
13492 kfree(bp->init_ops);
13493 init_ops_alloc_err:
13494 kfree(bp->init_data);
13495 request_firmware_exit:
13496 release_firmware(bp->firmware);
13497 bp->firmware = NULL;
13498
13499 return rc;
13500 }
13501
13502 static void bnx2x_release_firmware(struct bnx2x *bp)
13503 {
13504 kfree(bp->init_ops_offsets);
13505 kfree(bp->init_ops);
13506 kfree(bp->init_data);
13507 release_firmware(bp->firmware);
13508 bp->firmware = NULL;
13509 }
13510
13511 static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
13512 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
13513 .init_hw_cmn = bnx2x_init_hw_common,
13514 .init_hw_port = bnx2x_init_hw_port,
13515 .init_hw_func = bnx2x_init_hw_func,
13516
13517 .reset_hw_cmn = bnx2x_reset_common,
13518 .reset_hw_port = bnx2x_reset_port,
13519 .reset_hw_func = bnx2x_reset_func,
13520
13521 .gunzip_init = bnx2x_gunzip_init,
13522 .gunzip_end = bnx2x_gunzip_end,
13523
13524 .init_fw = bnx2x_init_firmware,
13525 .release_fw = bnx2x_release_firmware,
13526 };
13527
13528 void bnx2x__init_func_obj(struct bnx2x *bp)
13529 {
13530
13531 bnx2x_setup_dmae(bp);
13532
13533 bnx2x_init_func_obj(bp, &bp->func_obj,
13534 bnx2x_sp(bp, func_rdata),
13535 bnx2x_sp_mapping(bp, func_rdata),
13536 bnx2x_sp(bp, func_afex_rdata),
13537 bnx2x_sp_mapping(bp, func_afex_rdata),
13538 &bnx2x_func_sp_drv);
13539 }
13540
13541
13542 static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
13543 {
13544 int cid_count = BNX2X_L2_MAX_CID(bp);
13545
13546 if (IS_SRIOV(bp))
13547 cid_count += BNX2X_VF_CIDS;
13548
13549 if (CNIC_SUPPORT(bp))
13550 cid_count += CNIC_CID_MAX;
13551
13552 return roundup(cid_count, QM_CID_ROUND);
13553 }
13554
13555
13556
13557
13558
13559
13560
13561 static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
13562 {
13563 int index;
13564 u16 control = 0;
13565
13566
13567
13568
13569
13570 if (!pdev->msix_cap) {
13571 dev_info(&pdev->dev, "no msix capability found\n");
13572 return 1 + cnic_cnt;
13573 }
13574 dev_info(&pdev->dev, "msix capability found\n");
13575
13576
13577
13578
13579
13580
13581
13582
13583 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
13584
13585 index = control & PCI_MSIX_FLAGS_QSIZE;
13586
13587 return index;
13588 }
13589
13590 static int set_max_cos_est(int chip_id)
13591 {
13592 switch (chip_id) {
13593 case BCM57710:
13594 case BCM57711:
13595 case BCM57711E:
13596 return BNX2X_MULTI_TX_COS_E1X;
13597 case BCM57712:
13598 case BCM57712_MF:
13599 return BNX2X_MULTI_TX_COS_E2_E3A0;
13600 case BCM57800:
13601 case BCM57800_MF:
13602 case BCM57810:
13603 case BCM57810_MF:
13604 case BCM57840_4_10:
13605 case BCM57840_2_20:
13606 case BCM57840_O:
13607 case BCM57840_MFO:
13608 case BCM57840_MF:
13609 case BCM57811:
13610 case BCM57811_MF:
13611 return BNX2X_MULTI_TX_COS_E3B0;
13612 case BCM57712_VF:
13613 case BCM57800_VF:
13614 case BCM57810_VF:
13615 case BCM57840_VF:
13616 case BCM57811_VF:
13617 return 1;
13618 default:
13619 pr_err("Unknown board_type (%d), aborting\n", chip_id);
13620 return -ENODEV;
13621 }
13622 }
13623
13624 static int set_is_vf(int chip_id)
13625 {
13626 switch (chip_id) {
13627 case BCM57712_VF:
13628 case BCM57800_VF:
13629 case BCM57810_VF:
13630 case BCM57840_VF:
13631 case BCM57811_VF:
13632 return true;
13633 default:
13634 return false;
13635 }
13636 }
13637
13638
13639 #define tsgen_ctrl 0x0
13640 #define tsgen_freecount 0x10
13641 #define tsgen_synctime_t0 0x20
13642 #define tsgen_offset_t0 0x28
13643 #define tsgen_drift_t0 0x30
13644 #define tsgen_synctime_t1 0x58
13645 #define tsgen_offset_t1 0x60
13646 #define tsgen_drift_t1 0x68
13647
13648
13649 static int bnx2x_send_update_drift_ramrod(struct bnx2x *bp, int drift_dir,
13650 int best_val, int best_period)
13651 {
13652 struct bnx2x_func_state_params func_params = {NULL};
13653 struct bnx2x_func_set_timesync_params *set_timesync_params =
13654 &func_params.params.set_timesync;
13655
13656
13657 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
13658 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
13659
13660 func_params.f_obj = &bp->func_obj;
13661 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
13662
13663
13664 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_SET;
13665 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
13666 set_timesync_params->add_sub_drift_adjust_value =
13667 drift_dir ? TS_ADD_VALUE : TS_SUB_VALUE;
13668 set_timesync_params->drift_adjust_value = best_val;
13669 set_timesync_params->drift_adjust_period = best_period;
13670
13671 return bnx2x_func_state_change(bp, &func_params);
13672 }
13673
13674 static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
13675 {
13676 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13677 int rc;
13678 int drift_dir = 1;
13679 int val, period, period1, period2, dif, dif1, dif2;
13680 int best_dif = BNX2X_MAX_PHC_DRIFT, best_period = 0, best_val = 0;
13681
13682 DP(BNX2X_MSG_PTP, "PTP adjfreq called, ppb = %d\n", ppb);
13683
13684 if (!netif_running(bp->dev)) {
13685 DP(BNX2X_MSG_PTP,
13686 "PTP adjfreq called while the interface is down\n");
13687 return -ENETDOWN;
13688 }
13689
13690 if (ppb < 0) {
13691 ppb = -ppb;
13692 drift_dir = 0;
13693 }
13694
13695 if (ppb == 0) {
13696 best_val = 1;
13697 best_period = 0x1FFFFFF;
13698 } else if (ppb >= BNX2X_MAX_PHC_DRIFT) {
13699 best_val = 31;
13700 best_period = 1;
13701 } else {
13702
13703
13704
13705 for (val = 0; val <= 31; val++) {
13706 if ((val & 0x7) == 0)
13707 continue;
13708 period1 = val * 1000000 / ppb;
13709 period2 = period1 + 1;
13710 if (period1 != 0)
13711 dif1 = ppb - (val * 1000000 / period1);
13712 else
13713 dif1 = BNX2X_MAX_PHC_DRIFT;
13714 if (dif1 < 0)
13715 dif1 = -dif1;
13716 dif2 = ppb - (val * 1000000 / period2);
13717 if (dif2 < 0)
13718 dif2 = -dif2;
13719 dif = (dif1 < dif2) ? dif1 : dif2;
13720 period = (dif1 < dif2) ? period1 : period2;
13721 if (dif < best_dif) {
13722 best_dif = dif;
13723 best_val = val;
13724 best_period = period;
13725 }
13726 }
13727 }
13728
13729 rc = bnx2x_send_update_drift_ramrod(bp, drift_dir, best_val,
13730 best_period);
13731 if (rc) {
13732 BNX2X_ERR("Failed to set drift\n");
13733 return -EFAULT;
13734 }
13735
13736 DP(BNX2X_MSG_PTP, "Configured val = %d, period = %d\n", best_val,
13737 best_period);
13738
13739 return 0;
13740 }
13741
13742 static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
13743 {
13744 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13745
13746 if (!netif_running(bp->dev)) {
13747 DP(BNX2X_MSG_PTP,
13748 "PTP adjtime called while the interface is down\n");
13749 return -ENETDOWN;
13750 }
13751
13752 DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
13753
13754 timecounter_adjtime(&bp->timecounter, delta);
13755
13756 return 0;
13757 }
13758
13759 static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
13760 {
13761 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13762 u64 ns;
13763
13764 if (!netif_running(bp->dev)) {
13765 DP(BNX2X_MSG_PTP,
13766 "PTP gettime called while the interface is down\n");
13767 return -ENETDOWN;
13768 }
13769
13770 ns = timecounter_read(&bp->timecounter);
13771
13772 DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
13773
13774 *ts = ns_to_timespec64(ns);
13775
13776 return 0;
13777 }
13778
13779 static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
13780 const struct timespec64 *ts)
13781 {
13782 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13783 u64 ns;
13784
13785 if (!netif_running(bp->dev)) {
13786 DP(BNX2X_MSG_PTP,
13787 "PTP settime called while the interface is down\n");
13788 return -ENETDOWN;
13789 }
13790
13791 ns = timespec64_to_ns(ts);
13792
13793 DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
13794
13795
13796 timecounter_init(&bp->timecounter, &bp->cyclecounter, ns);
13797
13798 return 0;
13799 }
13800
13801
13802 static int bnx2x_ptp_enable(struct ptp_clock_info *ptp,
13803 struct ptp_clock_request *rq, int on)
13804 {
13805 struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
13806
13807 BNX2X_ERR("PHC ancillary features are not supported\n");
13808 return -ENOTSUPP;
13809 }
13810
13811 void bnx2x_register_phc(struct bnx2x *bp)
13812 {
13813
13814 bp->ptp_clock_info.owner = THIS_MODULE;
13815 snprintf(bp->ptp_clock_info.name, 16, "%s", bp->dev->name);
13816 bp->ptp_clock_info.max_adj = BNX2X_MAX_PHC_DRIFT;
13817 bp->ptp_clock_info.n_alarm = 0;
13818 bp->ptp_clock_info.n_ext_ts = 0;
13819 bp->ptp_clock_info.n_per_out = 0;
13820 bp->ptp_clock_info.pps = 0;
13821 bp->ptp_clock_info.adjfreq = bnx2x_ptp_adjfreq;
13822 bp->ptp_clock_info.adjtime = bnx2x_ptp_adjtime;
13823 bp->ptp_clock_info.gettime64 = bnx2x_ptp_gettime;
13824 bp->ptp_clock_info.settime64 = bnx2x_ptp_settime;
13825 bp->ptp_clock_info.enable = bnx2x_ptp_enable;
13826
13827 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13828 if (IS_ERR(bp->ptp_clock)) {
13829 bp->ptp_clock = NULL;
13830 BNX2X_ERR("PTP clock registration failed\n");
13831 }
13832 }
13833
13834 static int bnx2x_init_one(struct pci_dev *pdev,
13835 const struct pci_device_id *ent)
13836 {
13837 struct net_device *dev = NULL;
13838 struct bnx2x *bp;
13839 int rc, max_non_def_sbs;
13840 int rx_count, tx_count, rss_count, doorbell_size;
13841 int max_cos_est;
13842 bool is_vf;
13843 int cnic_cnt;
13844
13845
13846
13847
13848 if (is_kdump_kernel()) {
13849 ktime_t now = ktime_get_boottime();
13850 ktime_t fw_ready_time = ktime_set(5, 0);
13851
13852 if (ktime_before(now, fw_ready_time))
13853 msleep(ktime_ms_delta(fw_ready_time, now));
13854 }
13855
13856
13857
13858
13859
13860
13861
13862
13863
13864 max_cos_est = set_max_cos_est(ent->driver_data);
13865 if (max_cos_est < 0)
13866 return max_cos_est;
13867 is_vf = set_is_vf(ent->driver_data);
13868 cnic_cnt = is_vf ? 0 : 1;
13869
13870 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
13871
13872
13873 max_non_def_sbs += is_vf ? 1 : 0;
13874
13875
13876 rss_count = max_non_def_sbs - cnic_cnt;
13877
13878 if (rss_count < 1)
13879 return -EINVAL;
13880
13881
13882 rx_count = rss_count + cnic_cnt;
13883
13884
13885
13886
13887 tx_count = rss_count * max_cos_est + cnic_cnt;
13888
13889
13890 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
13891 if (!dev)
13892 return -ENOMEM;
13893
13894 bp = netdev_priv(dev);
13895
13896 bp->flags = 0;
13897 if (is_vf)
13898 bp->flags |= IS_VF_FLAG;
13899
13900 bp->igu_sb_cnt = max_non_def_sbs;
13901 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
13902 bp->msg_enable = debug;
13903 bp->cnic_support = cnic_cnt;
13904 bp->cnic_probe = bnx2x_cnic_probe;
13905
13906 pci_set_drvdata(pdev, dev);
13907
13908 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
13909 if (rc < 0) {
13910 free_netdev(dev);
13911 return rc;
13912 }
13913
13914 BNX2X_DEV_INFO("This is a %s function\n",
13915 IS_PF(bp) ? "physical" : "virtual");
13916 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
13917 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
13918 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
13919 tx_count, rx_count);
13920
13921 rc = bnx2x_init_bp(bp);
13922 if (rc)
13923 goto init_one_exit;
13924
13925
13926
13927
13928
13929 if (IS_VF(bp)) {
13930 bp->doorbells = bnx2x_vf_doorbells(bp);
13931 rc = bnx2x_vf_pci_alloc(bp);
13932 if (rc)
13933 goto init_one_freemem;
13934 } else {
13935 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
13936 if (doorbell_size > pci_resource_len(pdev, 2)) {
13937 dev_err(&bp->pdev->dev,
13938 "Cannot map doorbells, bar size too small, aborting\n");
13939 rc = -ENOMEM;
13940 goto init_one_freemem;
13941 }
13942 bp->doorbells = ioremap(pci_resource_start(pdev, 2),
13943 doorbell_size);
13944 }
13945 if (!bp->doorbells) {
13946 dev_err(&bp->pdev->dev,
13947 "Cannot map doorbell space, aborting\n");
13948 rc = -ENOMEM;
13949 goto init_one_freemem;
13950 }
13951
13952 if (IS_VF(bp)) {
13953 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
13954 if (rc)
13955 goto init_one_freemem;
13956
13957 #ifdef CONFIG_BNX2X_SRIOV
13958
13959 if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
13960 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13961 dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
13962 }
13963 #endif
13964 }
13965
13966
13967 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
13968 if (rc)
13969 goto init_one_freemem;
13970
13971
13972 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
13973 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
13974
13975
13976 if (CHIP_IS_E1x(bp))
13977 bp->flags |= NO_FCOE_FLAG;
13978
13979
13980 bnx2x_set_num_queues(bp);
13981
13982
13983
13984
13985 rc = bnx2x_set_int_mode(bp);
13986 if (rc) {
13987 dev_err(&pdev->dev, "Cannot set interrupts\n");
13988 goto init_one_freemem;
13989 }
13990 BNX2X_DEV_INFO("set interrupts successfully\n");
13991
13992
13993 rc = register_netdev(dev);
13994 if (rc) {
13995 dev_err(&pdev->dev, "Cannot register net device\n");
13996 goto init_one_freemem;
13997 }
13998 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
13999
14000 if (!NO_FCOE(bp)) {
14001
14002 rtnl_lock();
14003 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14004 rtnl_unlock();
14005 }
14006 BNX2X_DEV_INFO(
14007 "%s (%c%d) PCI-E found at mem %lx, IRQ %d, node addr %pM\n",
14008 board_info[ent->driver_data].name,
14009 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
14010 dev->base_addr, bp->pdev->irq, dev->dev_addr);
14011 pcie_print_link_status(bp->pdev);
14012
14013 if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
14014 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
14015
14016 return 0;
14017
14018 init_one_freemem:
14019 bnx2x_free_mem_bp(bp);
14020
14021 init_one_exit:
14022 bnx2x_disable_pcie_error_reporting(bp);
14023
14024 if (bp->regview)
14025 iounmap(bp->regview);
14026
14027 if (IS_PF(bp) && bp->doorbells)
14028 iounmap(bp->doorbells);
14029
14030 free_netdev(dev);
14031
14032 if (atomic_read(&pdev->enable_cnt) == 1)
14033 pci_release_regions(pdev);
14034
14035 pci_disable_device(pdev);
14036
14037 return rc;
14038 }
14039
14040 static void __bnx2x_remove(struct pci_dev *pdev,
14041 struct net_device *dev,
14042 struct bnx2x *bp,
14043 bool remove_netdev)
14044 {
14045
14046 if (!NO_FCOE(bp)) {
14047 rtnl_lock();
14048 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
14049 rtnl_unlock();
14050 }
14051
14052 #ifdef BCM_DCBNL
14053
14054 bnx2x_dcbnl_update_applist(bp, true);
14055 #endif
14056
14057 if (IS_PF(bp) &&
14058 !BP_NOMCP(bp) &&
14059 (bp->flags & BC_SUPPORTS_RMMOD_CMD))
14060 bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
14061
14062
14063 if (remove_netdev) {
14064 unregister_netdev(dev);
14065 } else {
14066 rtnl_lock();
14067 dev_close(dev);
14068 rtnl_unlock();
14069 }
14070
14071 bnx2x_iov_remove_one(bp);
14072
14073
14074 if (IS_PF(bp)) {
14075 bnx2x_set_power_state(bp, PCI_D0);
14076 bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_NOT_LOADED);
14077
14078
14079
14080
14081 bnx2x_reset_endianity(bp);
14082 }
14083
14084
14085 bnx2x_disable_msi(bp);
14086
14087
14088 if (IS_PF(bp))
14089 bnx2x_set_power_state(bp, PCI_D3hot);
14090
14091
14092 cancel_delayed_work_sync(&bp->sp_rtnl_task);
14093
14094
14095 if (IS_VF(bp))
14096 bnx2x_vfpf_release(bp);
14097
14098
14099 if (system_state == SYSTEM_POWER_OFF) {
14100 pci_wake_from_d3(pdev, bp->wol);
14101 pci_set_power_state(pdev, PCI_D3hot);
14102 }
14103
14104 bnx2x_disable_pcie_error_reporting(bp);
14105 if (remove_netdev) {
14106 if (bp->regview)
14107 iounmap(bp->regview);
14108
14109
14110
14111
14112 if (IS_PF(bp)) {
14113 if (bp->doorbells)
14114 iounmap(bp->doorbells);
14115
14116 bnx2x_release_firmware(bp);
14117 } else {
14118 bnx2x_vf_pci_dealloc(bp);
14119 }
14120 bnx2x_free_mem_bp(bp);
14121
14122 free_netdev(dev);
14123
14124 if (atomic_read(&pdev->enable_cnt) == 1)
14125 pci_release_regions(pdev);
14126
14127 pci_disable_device(pdev);
14128 }
14129 }
14130
14131 static void bnx2x_remove_one(struct pci_dev *pdev)
14132 {
14133 struct net_device *dev = pci_get_drvdata(pdev);
14134 struct bnx2x *bp;
14135
14136 if (!dev) {
14137 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
14138 return;
14139 }
14140 bp = netdev_priv(dev);
14141
14142 __bnx2x_remove(pdev, dev, bp, true);
14143 }
14144
14145 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
14146 {
14147 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
14148
14149 bp->rx_mode = BNX2X_RX_MODE_NONE;
14150
14151 if (CNIC_LOADED(bp))
14152 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
14153
14154
14155 bnx2x_tx_disable(bp);
14156 netdev_reset_tc(bp->dev);
14157
14158 del_timer_sync(&bp->timer);
14159 cancel_delayed_work_sync(&bp->sp_task);
14160 cancel_delayed_work_sync(&bp->period_task);
14161
14162 if (!down_timeout(&bp->stats_lock, HZ / 10)) {
14163 bp->stats_state = STATS_STATE_DISABLED;
14164 up(&bp->stats_lock);
14165 }
14166
14167 bnx2x_save_statistics(bp);
14168
14169 netif_carrier_off(bp->dev);
14170
14171 return 0;
14172 }
14173
14174
14175
14176
14177
14178
14179
14180
14181
14182 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
14183 pci_channel_state_t state)
14184 {
14185 struct net_device *dev = pci_get_drvdata(pdev);
14186 struct bnx2x *bp = netdev_priv(dev);
14187
14188 rtnl_lock();
14189
14190 BNX2X_ERR("IO error detected\n");
14191
14192 netif_device_detach(dev);
14193
14194 if (state == pci_channel_io_perm_failure) {
14195 rtnl_unlock();
14196 return PCI_ERS_RESULT_DISCONNECT;
14197 }
14198
14199 if (netif_running(dev))
14200 bnx2x_eeh_nic_unload(bp);
14201
14202 bnx2x_prev_path_mark_eeh(bp);
14203
14204 pci_disable_device(pdev);
14205
14206 rtnl_unlock();
14207
14208
14209 return PCI_ERS_RESULT_NEED_RESET;
14210 }
14211
14212
14213
14214
14215
14216
14217
14218 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
14219 {
14220 struct net_device *dev = pci_get_drvdata(pdev);
14221 struct bnx2x *bp = netdev_priv(dev);
14222 int i;
14223
14224 rtnl_lock();
14225 BNX2X_ERR("IO slot reset initializing...\n");
14226 if (pci_enable_device(pdev)) {
14227 dev_err(&pdev->dev,
14228 "Cannot re-enable PCI device after reset\n");
14229 rtnl_unlock();
14230 return PCI_ERS_RESULT_DISCONNECT;
14231 }
14232
14233 pci_set_master(pdev);
14234 pci_restore_state(pdev);
14235 pci_save_state(pdev);
14236
14237 if (netif_running(dev))
14238 bnx2x_set_power_state(bp, PCI_D0);
14239
14240 if (netif_running(dev)) {
14241 BNX2X_ERR("IO slot reset --> driver unload\n");
14242
14243
14244 if (bnx2x_init_shmem(bp)) {
14245 rtnl_unlock();
14246 return PCI_ERS_RESULT_DISCONNECT;
14247 }
14248
14249 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
14250 u32 v;
14251
14252 v = SHMEM2_RD(bp,
14253 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
14254 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
14255 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
14256 }
14257 bnx2x_drain_tx_queues(bp);
14258 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
14259 bnx2x_netif_stop(bp, 1);
14260 bnx2x_del_all_napi(bp);
14261
14262 if (CNIC_LOADED(bp))
14263 bnx2x_del_all_napi_cnic(bp);
14264
14265 bnx2x_free_irq(bp);
14266
14267
14268 bnx2x_send_unload_done(bp, true);
14269
14270 bp->sp_state = 0;
14271 bp->port.pmf = 0;
14272
14273 bnx2x_prev_unload(bp);
14274
14275
14276
14277
14278 bnx2x_squeeze_objects(bp);
14279 bnx2x_free_skbs(bp);
14280 for_each_rx_queue(bp, i)
14281 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
14282 bnx2x_free_fp_mem(bp);
14283 bnx2x_free_mem(bp);
14284
14285 bp->state = BNX2X_STATE_CLOSED;
14286 }
14287
14288 rtnl_unlock();
14289
14290 return PCI_ERS_RESULT_RECOVERED;
14291 }
14292
14293
14294
14295
14296
14297
14298
14299
14300 static void bnx2x_io_resume(struct pci_dev *pdev)
14301 {
14302 struct net_device *dev = pci_get_drvdata(pdev);
14303 struct bnx2x *bp = netdev_priv(dev);
14304
14305 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
14306 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
14307 return;
14308 }
14309
14310 rtnl_lock();
14311
14312 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
14313 DRV_MSG_SEQ_NUMBER_MASK;
14314
14315 if (netif_running(dev))
14316 bnx2x_nic_load(bp, LOAD_NORMAL);
14317
14318 netif_device_attach(dev);
14319
14320 rtnl_unlock();
14321 }
14322
14323 static const struct pci_error_handlers bnx2x_err_handler = {
14324 .error_detected = bnx2x_io_error_detected,
14325 .slot_reset = bnx2x_io_slot_reset,
14326 .resume = bnx2x_io_resume,
14327 };
14328
14329 static void bnx2x_shutdown(struct pci_dev *pdev)
14330 {
14331 struct net_device *dev = pci_get_drvdata(pdev);
14332 struct bnx2x *bp;
14333
14334 if (!dev)
14335 return;
14336
14337 bp = netdev_priv(dev);
14338 if (!bp)
14339 return;
14340
14341 rtnl_lock();
14342 netif_device_detach(dev);
14343 rtnl_unlock();
14344
14345
14346
14347
14348
14349 __bnx2x_remove(pdev, dev, bp, false);
14350 }
14351
14352 static struct pci_driver bnx2x_pci_driver = {
14353 .name = DRV_MODULE_NAME,
14354 .id_table = bnx2x_pci_tbl,
14355 .probe = bnx2x_init_one,
14356 .remove = bnx2x_remove_one,
14357 .driver.pm = &bnx2x_pm_ops,
14358 .err_handler = &bnx2x_err_handler,
14359 #ifdef CONFIG_BNX2X_SRIOV
14360 .sriov_configure = bnx2x_sriov_configure,
14361 #endif
14362 .shutdown = bnx2x_shutdown,
14363 };
14364
14365 static int __init bnx2x_init(void)
14366 {
14367 int ret;
14368
14369 bnx2x_wq = create_singlethread_workqueue("bnx2x");
14370 if (bnx2x_wq == NULL) {
14371 pr_err("Cannot create workqueue\n");
14372 return -ENOMEM;
14373 }
14374 bnx2x_iov_wq = create_singlethread_workqueue("bnx2x_iov");
14375 if (!bnx2x_iov_wq) {
14376 pr_err("Cannot create iov workqueue\n");
14377 destroy_workqueue(bnx2x_wq);
14378 return -ENOMEM;
14379 }
14380
14381 ret = pci_register_driver(&bnx2x_pci_driver);
14382 if (ret) {
14383 pr_err("Cannot register driver\n");
14384 destroy_workqueue(bnx2x_wq);
14385 destroy_workqueue(bnx2x_iov_wq);
14386 }
14387 return ret;
14388 }
14389
14390 static void __exit bnx2x_cleanup(void)
14391 {
14392 struct list_head *pos, *q;
14393
14394 pci_unregister_driver(&bnx2x_pci_driver);
14395
14396 destroy_workqueue(bnx2x_wq);
14397 destroy_workqueue(bnx2x_iov_wq);
14398
14399
14400 list_for_each_safe(pos, q, &bnx2x_prev_list) {
14401 struct bnx2x_prev_path_list *tmp =
14402 list_entry(pos, struct bnx2x_prev_path_list, list);
14403 list_del(pos);
14404 kfree(tmp);
14405 }
14406 }
14407
14408 void bnx2x_notify_link_changed(struct bnx2x *bp)
14409 {
14410 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
14411 }
14412
14413 module_init(bnx2x_init);
14414 module_exit(bnx2x_cleanup);
14415
14416
14417
14418
14419
14420
14421
14422
14423 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
14424 {
14425 unsigned long ramrod_flags = 0;
14426
14427 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
14428 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
14429 &bp->iscsi_l2_mac_obj, true,
14430 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
14431 }
14432
14433
14434 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
14435 {
14436 struct eth_spe *spe;
14437 int cxt_index, cxt_offset;
14438
14439 #ifdef BNX2X_STOP_ON_ERROR
14440 if (unlikely(bp->panic))
14441 return;
14442 #endif
14443
14444 spin_lock_bh(&bp->spq_lock);
14445 BUG_ON(bp->cnic_spq_pending < count);
14446 bp->cnic_spq_pending -= count;
14447
14448 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
14449 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
14450 & SPE_HDR_CONN_TYPE) >>
14451 SPE_HDR_CONN_TYPE_SHIFT;
14452 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
14453 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
14454
14455
14456
14457
14458 if (type == ETH_CONNECTION_TYPE) {
14459 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
14460 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
14461 ILT_PAGE_CIDS;
14462 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
14463 (cxt_index * ILT_PAGE_CIDS);
14464 bnx2x_set_ctx_validation(bp,
14465 &bp->context[cxt_index].
14466 vcxt[cxt_offset].eth,
14467 BNX2X_ISCSI_ETH_CID(bp));
14468 }
14469 }
14470
14471
14472
14473
14474
14475
14476
14477 if (type == ETH_CONNECTION_TYPE) {
14478 if (!atomic_read(&bp->cq_spq_left))
14479 break;
14480 else
14481 atomic_dec(&bp->cq_spq_left);
14482 } else if (type == NONE_CONNECTION_TYPE) {
14483 if (!atomic_read(&bp->eq_spq_left))
14484 break;
14485 else
14486 atomic_dec(&bp->eq_spq_left);
14487 } else if ((type == ISCSI_CONNECTION_TYPE) ||
14488 (type == FCOE_CONNECTION_TYPE)) {
14489 if (bp->cnic_spq_pending >=
14490 bp->cnic_eth_dev.max_kwqe_pending)
14491 break;
14492 else
14493 bp->cnic_spq_pending++;
14494 } else {
14495 BNX2X_ERR("Unknown SPE type: %d\n", type);
14496 bnx2x_panic();
14497 break;
14498 }
14499
14500 spe = bnx2x_sp_get_next(bp);
14501 *spe = *bp->cnic_kwq_cons;
14502
14503 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
14504 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
14505
14506 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
14507 bp->cnic_kwq_cons = bp->cnic_kwq;
14508 else
14509 bp->cnic_kwq_cons++;
14510 }
14511 bnx2x_sp_prod_update(bp);
14512 spin_unlock_bh(&bp->spq_lock);
14513 }
14514
14515 static int bnx2x_cnic_sp_queue(struct net_device *dev,
14516 struct kwqe_16 *kwqes[], u32 count)
14517 {
14518 struct bnx2x *bp = netdev_priv(dev);
14519 int i;
14520
14521 #ifdef BNX2X_STOP_ON_ERROR
14522 if (unlikely(bp->panic)) {
14523 BNX2X_ERR("Can't post to SP queue while panic\n");
14524 return -EIO;
14525 }
14526 #endif
14527
14528 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
14529 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
14530 BNX2X_ERR("Handling parity error recovery. Try again later\n");
14531 return -EAGAIN;
14532 }
14533
14534 spin_lock_bh(&bp->spq_lock);
14535
14536 for (i = 0; i < count; i++) {
14537 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
14538
14539 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
14540 break;
14541
14542 *bp->cnic_kwq_prod = *spe;
14543
14544 bp->cnic_kwq_pending++;
14545
14546 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
14547 spe->hdr.conn_and_cmd_data, spe->hdr.type,
14548 spe->data.update_data_addr.hi,
14549 spe->data.update_data_addr.lo,
14550 bp->cnic_kwq_pending);
14551
14552 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
14553 bp->cnic_kwq_prod = bp->cnic_kwq;
14554 else
14555 bp->cnic_kwq_prod++;
14556 }
14557
14558 spin_unlock_bh(&bp->spq_lock);
14559
14560 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
14561 bnx2x_cnic_sp_post(bp, 0);
14562
14563 return i;
14564 }
14565
14566 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14567 {
14568 struct cnic_ops *c_ops;
14569 int rc = 0;
14570
14571 mutex_lock(&bp->cnic_mutex);
14572 c_ops = rcu_dereference_protected(bp->cnic_ops,
14573 lockdep_is_held(&bp->cnic_mutex));
14574 if (c_ops)
14575 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14576 mutex_unlock(&bp->cnic_mutex);
14577
14578 return rc;
14579 }
14580
14581 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
14582 {
14583 struct cnic_ops *c_ops;
14584 int rc = 0;
14585
14586 rcu_read_lock();
14587 c_ops = rcu_dereference(bp->cnic_ops);
14588 if (c_ops)
14589 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
14590 rcu_read_unlock();
14591
14592 return rc;
14593 }
14594
14595
14596
14597
14598 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
14599 {
14600 struct cnic_ctl_info ctl = {0};
14601
14602 ctl.cmd = cmd;
14603
14604 return bnx2x_cnic_ctl_send(bp, &ctl);
14605 }
14606
14607 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
14608 {
14609 struct cnic_ctl_info ctl = {0};
14610
14611
14612 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
14613 ctl.data.comp.cid = cid;
14614 ctl.data.comp.error = err;
14615
14616 bnx2x_cnic_ctl_send_bh(bp, &ctl);
14617 bnx2x_cnic_sp_post(bp, 0);
14618 }
14619
14620
14621
14622
14623
14624
14625 static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
14626 {
14627 unsigned long accept_flags = 0, ramrod_flags = 0;
14628 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
14629 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
14630
14631 if (start) {
14632
14633
14634
14635
14636
14637
14638 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
14639 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
14640 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
14641 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
14642
14643
14644 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
14645
14646 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
14647 } else
14648
14649 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
14650
14651 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
14652 set_bit(sched_state, &bp->sp_state);
14653 else {
14654 __set_bit(RAMROD_RX, &ramrod_flags);
14655 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
14656 ramrod_flags);
14657 }
14658 }
14659
14660 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
14661 {
14662 struct bnx2x *bp = netdev_priv(dev);
14663 int rc = 0;
14664
14665 switch (ctl->cmd) {
14666 case DRV_CTL_CTXTBL_WR_CMD: {
14667 u32 index = ctl->data.io.offset;
14668 dma_addr_t addr = ctl->data.io.dma_addr;
14669
14670 bnx2x_ilt_wr(bp, index, addr);
14671 break;
14672 }
14673
14674 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
14675 int count = ctl->data.credit.credit_count;
14676
14677 bnx2x_cnic_sp_post(bp, count);
14678 break;
14679 }
14680
14681
14682 case DRV_CTL_START_L2_CMD: {
14683 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14684 unsigned long sp_bits = 0;
14685
14686
14687 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
14688 cp->iscsi_l2_client_id,
14689 cp->iscsi_l2_cid, BP_FUNC(bp),
14690 bnx2x_sp(bp, mac_rdata),
14691 bnx2x_sp_mapping(bp, mac_rdata),
14692 BNX2X_FILTER_MAC_PENDING,
14693 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
14694 &bp->macs_pool);
14695
14696
14697 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
14698 if (rc)
14699 break;
14700
14701 barrier();
14702
14703
14704
14705 netif_addr_lock_bh(dev);
14706 bnx2x_set_iscsi_eth_rx_mode(bp, true);
14707 netif_addr_unlock_bh(dev);
14708
14709
14710 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14711 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
14712
14713 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14714 BNX2X_ERR("rx_mode completion timed out!\n");
14715
14716 break;
14717 }
14718
14719
14720 case DRV_CTL_STOP_L2_CMD: {
14721 unsigned long sp_bits = 0;
14722
14723
14724 netif_addr_lock_bh(dev);
14725 bnx2x_set_iscsi_eth_rx_mode(bp, false);
14726 netif_addr_unlock_bh(dev);
14727
14728
14729 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
14730 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
14731
14732 if (!bnx2x_wait_sp_comp(bp, sp_bits))
14733 BNX2X_ERR("rx_mode completion timed out!\n");
14734
14735 barrier();
14736
14737
14738 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
14739 BNX2X_ISCSI_ETH_MAC, true);
14740 break;
14741 }
14742 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
14743 int count = ctl->data.credit.credit_count;
14744
14745 smp_mb__before_atomic();
14746 atomic_add(count, &bp->cq_spq_left);
14747 smp_mb__after_atomic();
14748 break;
14749 }
14750 case DRV_CTL_ULP_REGISTER_CMD: {
14751 int ulp_type = ctl->data.register_data.ulp_type;
14752
14753 if (CHIP_IS_E3(bp)) {
14754 int idx = BP_FW_MB_IDX(bp);
14755 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14756 int path = BP_PATH(bp);
14757 int port = BP_PORT(bp);
14758 int i;
14759 u32 scratch_offset;
14760 u32 *host_addr;
14761
14762
14763 if (ulp_type == CNIC_ULP_ISCSI)
14764 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14765 else if (ulp_type == CNIC_ULP_FCOE)
14766 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14767 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14768
14769 if ((ulp_type != CNIC_ULP_FCOE) ||
14770 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
14771 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
14772 break;
14773
14774
14775 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
14776 if (!scratch_offset)
14777 break;
14778 scratch_offset += offsetof(struct glob_ncsi_oem_data,
14779 fcoe_features[path][port]);
14780 host_addr = (u32 *) &(ctl->data.register_data.
14781 fcoe_features);
14782 for (i = 0; i < sizeof(struct fcoe_capabilities);
14783 i += 4)
14784 REG_WR(bp, scratch_offset + i,
14785 *(host_addr + i/4));
14786 }
14787 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14788 break;
14789 }
14790
14791 case DRV_CTL_ULP_UNREGISTER_CMD: {
14792 int ulp_type = ctl->data.ulp_type;
14793
14794 if (CHIP_IS_E3(bp)) {
14795 int idx = BP_FW_MB_IDX(bp);
14796 u32 cap;
14797
14798 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
14799 if (ulp_type == CNIC_ULP_ISCSI)
14800 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
14801 else if (ulp_type == CNIC_ULP_FCOE)
14802 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
14803 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
14804 }
14805 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
14806 break;
14807 }
14808
14809 default:
14810 BNX2X_ERR("unknown command %x\n", ctl->cmd);
14811 rc = -EINVAL;
14812 }
14813
14814
14815 if (IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp)) {
14816 switch (ctl->drv_state) {
14817 case DRV_NOP:
14818 break;
14819 case DRV_ACTIVE:
14820 bnx2x_set_os_driver_state(bp,
14821 OS_DRIVER_STATE_ACTIVE);
14822 break;
14823 case DRV_INACTIVE:
14824 bnx2x_set_os_driver_state(bp,
14825 OS_DRIVER_STATE_DISABLED);
14826 break;
14827 case DRV_UNLOADED:
14828 bnx2x_set_os_driver_state(bp,
14829 OS_DRIVER_STATE_NOT_LOADED);
14830 break;
14831 default:
14832 BNX2X_ERR("Unknown cnic driver state: %d\n", ctl->drv_state);
14833 }
14834 }
14835
14836 return rc;
14837 }
14838
14839 static int bnx2x_get_fc_npiv(struct net_device *dev,
14840 struct cnic_fc_npiv_tbl *cnic_tbl)
14841 {
14842 struct bnx2x *bp = netdev_priv(dev);
14843 struct bdn_fc_npiv_tbl *tbl = NULL;
14844 u32 offset, entries;
14845 int rc = -EINVAL;
14846 int i;
14847
14848 if (!SHMEM2_HAS(bp, fc_npiv_nvram_tbl_addr[0]))
14849 goto out;
14850
14851 DP(BNX2X_MSG_MCP, "About to read the FC-NPIV table\n");
14852
14853 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
14854 if (!tbl) {
14855 BNX2X_ERR("Failed to allocate fc_npiv table\n");
14856 goto out;
14857 }
14858
14859 offset = SHMEM2_RD(bp, fc_npiv_nvram_tbl_addr[BP_PORT(bp)]);
14860 if (!offset) {
14861 DP(BNX2X_MSG_MCP, "No FC-NPIV in NVRAM\n");
14862 goto out;
14863 }
14864 DP(BNX2X_MSG_MCP, "Offset of FC-NPIV in NVRAM: %08x\n", offset);
14865
14866
14867 if (bnx2x_nvram_read(bp, offset, (u8 *)tbl, sizeof(*tbl))) {
14868 BNX2X_ERR("Failed to read FC-NPIV table\n");
14869 goto out;
14870 }
14871
14872
14873
14874
14875 entries = tbl->fc_npiv_cfg.num_of_npiv;
14876 entries = (__force u32)be32_to_cpu((__force __be32)entries);
14877 tbl->fc_npiv_cfg.num_of_npiv = entries;
14878
14879 if (!tbl->fc_npiv_cfg.num_of_npiv) {
14880 DP(BNX2X_MSG_MCP,
14881 "No FC-NPIV table [valid, simply not present]\n");
14882 goto out;
14883 } else if (tbl->fc_npiv_cfg.num_of_npiv > MAX_NUMBER_NPIV) {
14884 BNX2X_ERR("FC-NPIV table with bad length 0x%08x\n",
14885 tbl->fc_npiv_cfg.num_of_npiv);
14886 goto out;
14887 } else {
14888 DP(BNX2X_MSG_MCP, "Read 0x%08x entries from NVRAM\n",
14889 tbl->fc_npiv_cfg.num_of_npiv);
14890 }
14891
14892
14893 cnic_tbl->count = tbl->fc_npiv_cfg.num_of_npiv;
14894 for (i = 0; i < cnic_tbl->count; i++) {
14895 memcpy(cnic_tbl->wwpn[i], tbl->settings[i].npiv_wwpn, 8);
14896 memcpy(cnic_tbl->wwnn[i], tbl->settings[i].npiv_wwnn, 8);
14897 }
14898
14899 rc = 0;
14900 out:
14901 kfree(tbl);
14902 return rc;
14903 }
14904
14905 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
14906 {
14907 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14908
14909 if (bp->flags & USING_MSIX_FLAG) {
14910 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
14911 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
14912 cp->irq_arr[0].vector = bp->msix_table[1].vector;
14913 } else {
14914 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
14915 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
14916 }
14917 if (!CHIP_IS_E1x(bp))
14918 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
14919 else
14920 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
14921
14922 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
14923 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
14924 cp->irq_arr[1].status_blk = bp->def_status_blk;
14925 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
14926 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
14927
14928 cp->num_irq = 2;
14929 }
14930
14931 void bnx2x_setup_cnic_info(struct bnx2x *bp)
14932 {
14933 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14934
14935 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
14936 bnx2x_cid_ilt_lines(bp);
14937 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
14938 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
14939 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
14940
14941 DP(NETIF_MSG_IFUP, "BNX2X_1st_NON_L2_ETH_CID(bp) %x, cp->starting_cid %x, cp->fcoe_init_cid %x, cp->iscsi_l2_cid %x\n",
14942 BNX2X_1st_NON_L2_ETH_CID(bp), cp->starting_cid, cp->fcoe_init_cid,
14943 cp->iscsi_l2_cid);
14944
14945 if (NO_ISCSI_OOO(bp))
14946 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
14947 }
14948
14949 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
14950 void *data)
14951 {
14952 struct bnx2x *bp = netdev_priv(dev);
14953 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
14954 int rc;
14955
14956 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
14957
14958 if (ops == NULL) {
14959 BNX2X_ERR("NULL ops received\n");
14960 return -EINVAL;
14961 }
14962
14963 if (!CNIC_SUPPORT(bp)) {
14964 BNX2X_ERR("Can't register CNIC when not supported\n");
14965 return -EOPNOTSUPP;
14966 }
14967
14968 if (!CNIC_LOADED(bp)) {
14969 rc = bnx2x_load_cnic(bp);
14970 if (rc) {
14971 BNX2X_ERR("CNIC-related load failed\n");
14972 return rc;
14973 }
14974 }
14975
14976 bp->cnic_enabled = true;
14977
14978 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
14979 if (!bp->cnic_kwq)
14980 return -ENOMEM;
14981
14982 bp->cnic_kwq_cons = bp->cnic_kwq;
14983 bp->cnic_kwq_prod = bp->cnic_kwq;
14984 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
14985
14986 bp->cnic_spq_pending = 0;
14987 bp->cnic_kwq_pending = 0;
14988
14989 bp->cnic_data = data;
14990
14991 cp->num_irq = 0;
14992 cp->drv_state |= CNIC_DRV_STATE_REGD;
14993 cp->iro_arr = bp->iro_arr;
14994
14995 bnx2x_setup_cnic_irq_info(bp);
14996
14997 rcu_assign_pointer(bp->cnic_ops, ops);
14998
14999
15000 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
15001
15002 return 0;
15003 }
15004
15005 static int bnx2x_unregister_cnic(struct net_device *dev)
15006 {
15007 struct bnx2x *bp = netdev_priv(dev);
15008 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15009
15010 mutex_lock(&bp->cnic_mutex);
15011 cp->drv_state = 0;
15012 RCU_INIT_POINTER(bp->cnic_ops, NULL);
15013 mutex_unlock(&bp->cnic_mutex);
15014 synchronize_rcu();
15015 bp->cnic_enabled = false;
15016 kfree(bp->cnic_kwq);
15017 bp->cnic_kwq = NULL;
15018
15019 return 0;
15020 }
15021
15022 static struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
15023 {
15024 struct bnx2x *bp = netdev_priv(dev);
15025 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
15026
15027
15028
15029
15030
15031 if (NO_ISCSI(bp) && NO_FCOE(bp))
15032 return NULL;
15033
15034 cp->drv_owner = THIS_MODULE;
15035 cp->chip_id = CHIP_ID(bp);
15036 cp->pdev = bp->pdev;
15037 cp->io_base = bp->regview;
15038 cp->io_base2 = bp->doorbells;
15039 cp->max_kwqe_pending = 8;
15040 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
15041 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
15042 bnx2x_cid_ilt_lines(bp);
15043 cp->ctx_tbl_len = CNIC_ILT_LINES;
15044 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
15045 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
15046 cp->drv_ctl = bnx2x_drv_ctl;
15047 cp->drv_get_fc_npiv_tbl = bnx2x_get_fc_npiv;
15048 cp->drv_register_cnic = bnx2x_register_cnic;
15049 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
15050 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
15051 cp->iscsi_l2_client_id =
15052 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
15053 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
15054
15055 if (NO_ISCSI_OOO(bp))
15056 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
15057
15058 if (NO_ISCSI(bp))
15059 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
15060
15061 if (NO_FCOE(bp))
15062 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
15063
15064 BNX2X_DEV_INFO(
15065 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
15066 cp->ctx_blk_size,
15067 cp->ctx_tbl_offset,
15068 cp->ctx_tbl_len,
15069 cp->starting_cid);
15070 return cp;
15071 }
15072
15073 static u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
15074 {
15075 struct bnx2x *bp = fp->bp;
15076 u32 offset = BAR_USTRORM_INTMEM;
15077
15078 if (IS_VF(bp))
15079 return bnx2x_vf_ustorm_prods_offset(bp, fp);
15080 else if (!CHIP_IS_E1x(bp))
15081 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
15082 else
15083 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
15084
15085 return offset;
15086 }
15087
15088
15089
15090
15091
15092
15093 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
15094 {
15095 u32 pretend_reg;
15096
15097 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
15098 return -1;
15099
15100
15101 pretend_reg = bnx2x_get_pretend_reg(bp);
15102 REG_WR(bp, pretend_reg, pretend_func_val);
15103 REG_RD(bp, pretend_reg);
15104 return 0;
15105 }
15106
15107 static void bnx2x_ptp_task(struct work_struct *work)
15108 {
15109 struct bnx2x *bp = container_of(work, struct bnx2x, ptp_task);
15110 int port = BP_PORT(bp);
15111 u32 val_seq;
15112 u64 timestamp, ns;
15113 struct skb_shared_hwtstamps shhwtstamps;
15114 bool bail = true;
15115 int i;
15116
15117
15118
15119
15120 for (i = 0; i < 10; i++) {
15121
15122 val_seq = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15123 NIG_REG_P0_TLLH_PTP_BUF_SEQID);
15124 if (val_seq & 0x10000) {
15125 bail = false;
15126 break;
15127 }
15128 msleep(1 << i);
15129 }
15130
15131 if (!bail) {
15132
15133 timestamp = REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_MSB :
15134 NIG_REG_P0_TLLH_PTP_BUF_TS_MSB);
15135 timestamp <<= 32;
15136 timestamp |= REG_RD(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_TS_LSB :
15137 NIG_REG_P0_TLLH_PTP_BUF_TS_LSB);
15138
15139 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15140 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15141 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15142
15143 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
15144 shhwtstamps.hwtstamp = ns_to_ktime(ns);
15145 skb_tstamp_tx(bp->ptp_tx_skb, &shhwtstamps);
15146
15147 DP(BNX2X_MSG_PTP, "Tx timestamp, timestamp cycles = %llu, ns = %llu\n",
15148 timestamp, ns);
15149 } else {
15150 DP(BNX2X_MSG_PTP,
15151 "Tx timestamp is not recorded (register read=%u)\n",
15152 val_seq);
15153 bp->eth_stats.ptp_skip_tx_ts++;
15154 }
15155
15156 dev_kfree_skb_any(bp->ptp_tx_skb);
15157 bp->ptp_tx_skb = NULL;
15158 }
15159
15160 void bnx2x_set_rx_ts(struct bnx2x *bp, struct sk_buff *skb)
15161 {
15162 int port = BP_PORT(bp);
15163 u64 timestamp, ns;
15164
15165 timestamp = REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_MSB :
15166 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_MSB);
15167 timestamp <<= 32;
15168 timestamp |= REG_RD(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_TS_LSB :
15169 NIG_REG_P0_LLH_PTP_HOST_BUF_TS_LSB);
15170
15171
15172 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15173 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15174
15175 ns = timecounter_cyc2time(&bp->timecounter, timestamp);
15176
15177 skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(ns);
15178
15179 DP(BNX2X_MSG_PTP, "Rx timestamp, timestamp cycles = %llu, ns = %llu\n",
15180 timestamp, ns);
15181 }
15182
15183
15184 static u64 bnx2x_cyclecounter_read(const struct cyclecounter *cc)
15185 {
15186 struct bnx2x *bp = container_of(cc, struct bnx2x, cyclecounter);
15187 int port = BP_PORT(bp);
15188 u32 wb_data[2];
15189 u64 phc_cycles;
15190
15191 REG_RD_DMAE(bp, port ? NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t1 :
15192 NIG_REG_TIMESYNC_GEN_REG + tsgen_synctime_t0, wb_data, 2);
15193 phc_cycles = wb_data[1];
15194 phc_cycles = (phc_cycles << 32) + wb_data[0];
15195
15196 DP(BNX2X_MSG_PTP, "PHC read cycles = %llu\n", phc_cycles);
15197
15198 return phc_cycles;
15199 }
15200
15201 static void bnx2x_init_cyclecounter(struct bnx2x *bp)
15202 {
15203 memset(&bp->cyclecounter, 0, sizeof(bp->cyclecounter));
15204 bp->cyclecounter.read = bnx2x_cyclecounter_read;
15205 bp->cyclecounter.mask = CYCLECOUNTER_MASK(64);
15206 bp->cyclecounter.shift = 0;
15207 bp->cyclecounter.mult = 1;
15208 }
15209
15210 static int bnx2x_send_reset_timesync_ramrod(struct bnx2x *bp)
15211 {
15212 struct bnx2x_func_state_params func_params = {NULL};
15213 struct bnx2x_func_set_timesync_params *set_timesync_params =
15214 &func_params.params.set_timesync;
15215
15216
15217 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
15218 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
15219
15220 func_params.f_obj = &bp->func_obj;
15221 func_params.cmd = BNX2X_F_CMD_SET_TIMESYNC;
15222
15223
15224 set_timesync_params->drift_adjust_cmd = TS_DRIFT_ADJUST_RESET;
15225 set_timesync_params->offset_cmd = TS_OFFSET_KEEP;
15226
15227 return bnx2x_func_state_change(bp, &func_params);
15228 }
15229
15230 static int bnx2x_enable_ptp_packets(struct bnx2x *bp)
15231 {
15232 struct bnx2x_queue_state_params q_params;
15233 int rc, i;
15234
15235
15236 memset(&q_params, 0, sizeof(q_params));
15237 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
15238 q_params.cmd = BNX2X_Q_CMD_UPDATE;
15239 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS_CHNG,
15240 &q_params.params.update.update_flags);
15241 __set_bit(BNX2X_Q_UPDATE_PTP_PKTS,
15242 &q_params.params.update.update_flags);
15243
15244
15245 for_each_eth_queue(bp, i) {
15246 struct bnx2x_fastpath *fp = &bp->fp[i];
15247
15248
15249 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
15250
15251
15252 rc = bnx2x_queue_state_change(bp, &q_params);
15253 if (rc) {
15254 BNX2X_ERR("Failed to enable PTP packets\n");
15255 return rc;
15256 }
15257 }
15258
15259 return 0;
15260 }
15261
15262 #define BNX2X_P2P_DETECT_PARAM_MASK 0x5F5
15263 #define BNX2X_P2P_DETECT_RULE_MASK 0x3DBB
15264 #define BNX2X_PTP_TX_ON_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15265 #define BNX2X_PTP_TX_ON_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15266 #define BNX2X_PTP_V1_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EE)
15267 #define BNX2X_PTP_V1_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FFE)
15268 #define BNX2X_PTP_V2_L4_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x7EA)
15269 #define BNX2X_PTP_V2_L4_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3FEE)
15270 #define BNX2X_PTP_V2_L2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6BF)
15271 #define BNX2X_PTP_V2_L2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EFF)
15272 #define BNX2X_PTP_V2_PARAM_MASK (BNX2X_P2P_DETECT_PARAM_MASK & 0x6AA)
15273 #define BNX2X_PTP_V2_RULE_MASK (BNX2X_P2P_DETECT_RULE_MASK & 0x3EEE)
15274
15275 int bnx2x_configure_ptp_filters(struct bnx2x *bp)
15276 {
15277 int port = BP_PORT(bp);
15278 u32 param, rule;
15279 int rc;
15280
15281 if (!bp->hwtstamp_ioctl_called)
15282 return 0;
15283
15284 param = port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15285 NIG_REG_P0_TLLH_PTP_PARAM_MASK;
15286 rule = port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15287 NIG_REG_P0_TLLH_PTP_RULE_MASK;
15288 switch (bp->tx_type) {
15289 case HWTSTAMP_TX_ON:
15290 bp->flags |= TX_TIMESTAMPING_EN;
15291 REG_WR(bp, param, BNX2X_PTP_TX_ON_PARAM_MASK);
15292 REG_WR(bp, rule, BNX2X_PTP_TX_ON_RULE_MASK);
15293 break;
15294 case HWTSTAMP_TX_ONESTEP_SYNC:
15295 case HWTSTAMP_TX_ONESTEP_P2P:
15296 BNX2X_ERR("One-step timestamping is not supported\n");
15297 return -ERANGE;
15298 }
15299
15300 param = port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15301 NIG_REG_P0_LLH_PTP_PARAM_MASK;
15302 rule = port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15303 NIG_REG_P0_LLH_PTP_RULE_MASK;
15304 switch (bp->rx_filter) {
15305 case HWTSTAMP_FILTER_NONE:
15306 break;
15307 case HWTSTAMP_FILTER_ALL:
15308 case HWTSTAMP_FILTER_SOME:
15309 case HWTSTAMP_FILTER_NTP_ALL:
15310 bp->rx_filter = HWTSTAMP_FILTER_NONE;
15311 break;
15312 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
15313 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
15314 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
15315 bp->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
15316
15317 REG_WR(bp, param, BNX2X_PTP_V1_L4_PARAM_MASK);
15318 REG_WR(bp, rule, BNX2X_PTP_V1_L4_RULE_MASK);
15319 break;
15320 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
15321 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
15322 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
15323 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
15324
15325 REG_WR(bp, param, BNX2X_PTP_V2_L4_PARAM_MASK);
15326 REG_WR(bp, rule, BNX2X_PTP_V2_L4_RULE_MASK);
15327 break;
15328 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
15329 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
15330 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
15331 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
15332
15333 REG_WR(bp, param, BNX2X_PTP_V2_L2_PARAM_MASK);
15334 REG_WR(bp, rule, BNX2X_PTP_V2_L2_RULE_MASK);
15335
15336 break;
15337 case HWTSTAMP_FILTER_PTP_V2_EVENT:
15338 case HWTSTAMP_FILTER_PTP_V2_SYNC:
15339 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
15340 bp->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
15341
15342 REG_WR(bp, param, BNX2X_PTP_V2_PARAM_MASK);
15343 REG_WR(bp, rule, BNX2X_PTP_V2_RULE_MASK);
15344 break;
15345 }
15346
15347
15348 rc = bnx2x_enable_ptp_packets(bp);
15349 if (rc)
15350 return rc;
15351
15352
15353 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15354 NIG_REG_P0_LLH_PTP_TO_HOST, 0x1);
15355
15356 return 0;
15357 }
15358
15359 static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
15360 {
15361 struct hwtstamp_config config;
15362 int rc;
15363
15364 DP(BNX2X_MSG_PTP, "HWTSTAMP IOCTL called\n");
15365
15366 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
15367 return -EFAULT;
15368
15369 DP(BNX2X_MSG_PTP, "Requested tx_type: %d, requested rx_filters = %d\n",
15370 config.tx_type, config.rx_filter);
15371
15372 bp->hwtstamp_ioctl_called = true;
15373 bp->tx_type = config.tx_type;
15374 bp->rx_filter = config.rx_filter;
15375
15376 rc = bnx2x_configure_ptp_filters(bp);
15377 if (rc)
15378 return rc;
15379
15380 config.rx_filter = bp->rx_filter;
15381
15382 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
15383 -EFAULT : 0;
15384 }
15385
15386
15387 static int bnx2x_configure_ptp(struct bnx2x *bp)
15388 {
15389 int rc, port = BP_PORT(bp);
15390 u32 wb_data[2];
15391
15392
15393 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_PARAM_MASK :
15394 NIG_REG_P0_LLH_PTP_PARAM_MASK, 0x7FF);
15395 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_RULE_MASK :
15396 NIG_REG_P0_LLH_PTP_RULE_MASK, 0x3FFF);
15397 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_PARAM_MASK :
15398 NIG_REG_P0_TLLH_PTP_PARAM_MASK, 0x7FF);
15399 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_RULE_MASK :
15400 NIG_REG_P0_TLLH_PTP_RULE_MASK, 0x3FFF);
15401
15402
15403 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_TO_HOST :
15404 NIG_REG_P0_LLH_PTP_TO_HOST, 0x0);
15405
15406
15407 REG_WR(bp, port ? NIG_REG_P1_PTP_EN :
15408 NIG_REG_P0_PTP_EN, 0x3F);
15409
15410
15411 wb_data[0] = 0;
15412 wb_data[1] = 0;
15413 REG_WR_DMAE(bp, NIG_REG_TIMESYNC_GEN_REG + tsgen_ctrl, wb_data, 2);
15414
15415
15416 rc = bnx2x_send_reset_timesync_ramrod(bp);
15417 if (rc) {
15418 BNX2X_ERR("Failed to reset PHC drift register\n");
15419 return -EFAULT;
15420 }
15421
15422
15423 REG_WR(bp, port ? NIG_REG_P1_LLH_PTP_HOST_BUF_SEQID :
15424 NIG_REG_P0_LLH_PTP_HOST_BUF_SEQID, 0x10000);
15425 REG_WR(bp, port ? NIG_REG_P1_TLLH_PTP_BUF_SEQID :
15426 NIG_REG_P0_TLLH_PTP_BUF_SEQID, 0x10000);
15427
15428 return 0;
15429 }
15430
15431
15432 void bnx2x_init_ptp(struct bnx2x *bp)
15433 {
15434 int rc;
15435
15436
15437 rc = bnx2x_configure_ptp(bp);
15438 if (rc) {
15439 BNX2X_ERR("Stopping PTP initialization\n");
15440 return;
15441 }
15442
15443
15444 INIT_WORK(&bp->ptp_task, bnx2x_ptp_task);
15445
15446
15447
15448
15449
15450 if (!bp->timecounter_init_done) {
15451 bnx2x_init_cyclecounter(bp);
15452 timecounter_init(&bp->timecounter, &bp->cyclecounter,
15453 ktime_to_ns(ktime_get_real()));
15454 bp->timecounter_init_done = true;
15455 }
15456
15457 DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
15458 }