0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #ifndef BNX2X_CMN_H
0020 #define BNX2X_CMN_H
0021
0022 #include <linux/types.h>
0023 #include <linux/pci.h>
0024 #include <linux/netdevice.h>
0025 #include <linux/etherdevice.h>
0026 #include <linux/irq.h>
0027
0028 #include "bnx2x.h"
0029 #include "bnx2x_sriov.h"
0030
0031
0032 extern int bnx2x_load_count[2][3];
0033 extern int bnx2x_num_queues;
0034
0035
0036 #define BNX2X_PCI_FREE(x, y, size) \
0037 do { \
0038 if (x) { \
0039 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
0040 x = NULL; \
0041 y = 0; \
0042 } \
0043 } while (0)
0044
0045 #define BNX2X_FREE(x) \
0046 do { \
0047 if (x) { \
0048 kfree((void *)x); \
0049 x = NULL; \
0050 } \
0051 } while (0)
0052
0053 #define BNX2X_PCI_ALLOC(y, size) \
0054 ({ \
0055 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
0056 if (x) \
0057 DP(NETIF_MSG_HW, \
0058 "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
0059 (unsigned long long)(*y), x); \
0060 x; \
0061 })
0062 #define BNX2X_PCI_FALLOC(y, size) \
0063 ({ \
0064 void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
0065 if (x) { \
0066 memset(x, 0xff, size); \
0067 DP(NETIF_MSG_HW, \
0068 "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \
0069 (unsigned long long)(*y), x); \
0070 } \
0071 x; \
0072 })
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087 u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
0088
0089
0090
0091
0092
0093
0094
0095 void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
0107 bool config_hash, bool enable);
0108
0109
0110
0111
0112
0113
0114
0115
0116
0117
0118 void bnx2x__init_func_obj(struct bnx2x *bp);
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128 int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
0129 bool leading);
0130
0131
0132
0133
0134
0135
0136 int bnx2x_setup_leading(struct bnx2x *bp);
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
0148
0149
0150
0151
0152
0153
0154
0155 int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
0156
0157
0158
0159
0160
0161
0162 void bnx2x_link_set(struct bnx2x *bp);
0163
0164
0165
0166
0167
0168
0169
0170 void bnx2x_force_link_reset(struct bnx2x *bp);
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190 void bnx2x_drv_pulse(struct bnx2x *bp);
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202 void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
0203 u16 index, u8 op, u8 update);
0204
0205
0206 void bnx2x_pf_disable(struct bnx2x *bp);
0207 int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val);
0208
0209
0210
0211
0212
0213
0214 void bnx2x__link_status_update(struct bnx2x *bp);
0215
0216
0217
0218
0219
0220
0221 void bnx2x_link_report(struct bnx2x *bp);
0222
0223
0224 void __bnx2x_link_report(struct bnx2x *bp);
0225
0226
0227
0228
0229
0230
0231
0232
0233 u16 bnx2x_get_mf_speed(struct bnx2x *bp);
0234
0235
0236
0237
0238
0239
0240
0241 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
0242
0243
0244
0245
0246
0247
0248
0249 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
0250
0251
0252
0253
0254
0255
0256
0257 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
0258
0259
0260
0261
0262
0263
0264 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
0265
0266
0267
0268
0269
0270
0271 void bnx2x_setup_cnic_info(struct bnx2x *bp);
0272
0273
0274
0275
0276
0277
0278 void bnx2x_int_enable(struct bnx2x *bp);
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
0290
0291
0292
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302 void bnx2x_nic_init_cnic(struct bnx2x *bp);
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314 void bnx2x_pre_irq_nic_init(struct bnx2x *bp);
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327 void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code);
0328
0329
0330
0331
0332
0333 int bnx2x_alloc_mem_cnic(struct bnx2x *bp);
0334
0335
0336
0337
0338
0339 int bnx2x_alloc_mem(struct bnx2x *bp);
0340
0341
0342
0343
0344
0345
0346 void bnx2x_free_mem_cnic(struct bnx2x *bp);
0347
0348
0349
0350
0351
0352 void bnx2x_free_mem(struct bnx2x *bp);
0353
0354
0355
0356
0357
0358
0359 void bnx2x_set_num_queues(struct bnx2x *bp);
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link);
0373
0374
0375
0376
0377
0378
0379
0380 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
0381
0382
0383
0384
0385
0386
0387
0388 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
0389
0390
0391
0392
0393
0394
0395 int bnx2x_release_leader_lock(struct bnx2x *bp);
0396
0397
0398
0399
0400
0401
0402
0403
0404
0405 int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
0406
0407
0408
0409
0410
0411
0412
0413
0414
0415
0416 void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
0417
0418
0419 void bnx2x_set_pf_load(struct bnx2x *bp);
0420 bool bnx2x_clear_pf_load(struct bnx2x *bp);
0421 bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
0422 bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
0423 void bnx2x_set_reset_in_progress(struct bnx2x *bp);
0424 void bnx2x_set_reset_global(struct bnx2x *bp);
0425 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
0426 int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
0427
0428 void bnx2x_clear_vlan_info(struct bnx2x *bp);
0429
0430
0431
0432
0433
0434
0435
0436 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
0437
0438
0439
0440
0441
0442
0443 void bnx2x_ilt_set_info(struct bnx2x *bp);
0444
0445
0446
0447
0448
0449
0450
0451 void bnx2x_ilt_set_info_cnic(struct bnx2x *bp);
0452
0453
0454
0455
0456
0457
0458 void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem);
0459
0460
0461
0462
0463
0464
0465
0466
0467
0468 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
0469
0470
0471
0472
0473
0474
0475
0476 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
0477
0478 void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
0479
0480
0481 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link);
0482
0483
0484 int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
0485
0486
0487 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
0488
0489
0490 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
0491 int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
0492 void *type_data);
0493
0494 int bnx2x_get_vf_config(struct net_device *dev, int vf,
0495 struct ifla_vf_info *ivi);
0496 int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
0497 int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
0498 __be16 vlan_proto);
0499 int bnx2x_set_vf_spoofchk(struct net_device *dev, int idx, bool val);
0500
0501
0502 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
0503 struct net_device *sb_dev);
0504
0505 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
0506 struct bnx2x_fastpath *fp,
0507 u16 bd_prod, u16 rx_comp_prod,
0508 u16 rx_sge_prod)
0509 {
0510 struct ustorm_eth_rx_producers rx_prods = {0};
0511 u32 i;
0512
0513
0514 rx_prods.bd_prod = bd_prod;
0515 rx_prods.cqe_prod = rx_comp_prod;
0516 rx_prods.sge_prod = rx_sge_prod;
0517
0518
0519
0520
0521
0522
0523
0524
0525 wmb();
0526
0527 for (i = 0; i < sizeof(rx_prods)/4; i++)
0528 REG_WR_RELAXED(bp, fp->ustorm_rx_prods_offset + i * 4,
0529 ((u32 *)&rx_prods)[i]);
0530
0531 DP(NETIF_MSG_RX_STATUS,
0532 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
0533 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
0534 }
0535
0536
0537 int bnx2x_reload_if_running(struct net_device *dev);
0538
0539 int bnx2x_change_mac_addr(struct net_device *dev, void *p);
0540
0541
0542 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
0543
0544 extern const struct dev_pm_ops bnx2x_pm_ops;
0545
0546
0547 void bnx2x_free_irq(struct bnx2x *bp);
0548
0549 void bnx2x_free_fp_mem(struct bnx2x *bp);
0550 void bnx2x_init_rx_rings(struct bnx2x *bp);
0551 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp);
0552 void bnx2x_free_skbs(struct bnx2x *bp);
0553 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
0554 void bnx2x_netif_start(struct bnx2x *bp);
0555 int bnx2x_load_cnic(struct bnx2x *bp);
0556
0557
0558
0559
0560
0561
0562
0563
0564
0565 int bnx2x_enable_msix(struct bnx2x *bp);
0566
0567
0568
0569
0570
0571
0572 int bnx2x_enable_msi(struct bnx2x *bp);
0573
0574
0575
0576
0577
0578
0579 int bnx2x_alloc_mem_bp(struct bnx2x *bp);
0580
0581
0582
0583
0584
0585
0586 void bnx2x_free_mem_bp(struct bnx2x *bp);
0587
0588
0589
0590
0591
0592
0593
0594
0595 int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
0596
0597 #ifdef NETDEV_FCOE_WWNN
0598
0599
0600
0601
0602
0603
0604
0605
0606 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
0607 #endif
0608
0609 netdev_features_t bnx2x_fix_features(struct net_device *dev,
0610 netdev_features_t features);
0611 int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
0612
0613
0614
0615
0616
0617
0618 void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue);
0619
0620
0621
0622
0623
0624
0625
0626 void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default);
0627
0628
0629
0630 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
0631 {
0632 barrier();
0633 fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
0634 }
0635
0636 static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
0637 u8 segment, u16 index, u8 op,
0638 u8 update, u32 igu_addr)
0639 {
0640 struct igu_regular cmd_data = {0};
0641
0642 cmd_data.sb_id_and_flags =
0643 ((index << IGU_REGULAR_SB_INDEX_SHIFT) |
0644 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
0645 (update << IGU_REGULAR_BUPDATE_SHIFT) |
0646 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
0647
0648 DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
0649 cmd_data.sb_id_and_flags, igu_addr);
0650 REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
0651
0652
0653 barrier();
0654 }
0655
0656 static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
0657 u8 storm, u16 index, u8 op, u8 update)
0658 {
0659 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
0660 COMMAND_REG_INT_ACK);
0661 struct igu_ack_register igu_ack;
0662
0663 igu_ack.status_block_index = index;
0664 igu_ack.sb_id_and_flags =
0665 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
0666 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
0667 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
0668 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
0669
0670 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
0671
0672
0673 barrier();
0674 }
0675
0676 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
0677 u16 index, u8 op, u8 update)
0678 {
0679 if (bp->common.int_block == INT_BLOCK_HC)
0680 bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
0681 else {
0682 u8 segment;
0683
0684 if (CHIP_INT_MODE_IS_BC(bp))
0685 segment = storm;
0686 else if (igu_sb_id != bp->igu_dsb_id)
0687 segment = IGU_SEG_ACCESS_DEF;
0688 else if (storm == ATTENTION_ID)
0689 segment = IGU_SEG_ACCESS_ATTN;
0690 else
0691 segment = IGU_SEG_ACCESS_DEF;
0692 bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
0693 }
0694 }
0695
0696 static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
0697 {
0698 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
0699 COMMAND_REG_SIMD_MASK);
0700 u32 result = REG_RD(bp, hc_addr);
0701
0702 barrier();
0703 return result;
0704 }
0705
0706 static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
0707 {
0708 u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
0709 u32 result = REG_RD(bp, igu_addr);
0710
0711 DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
0712 result, igu_addr);
0713
0714 barrier();
0715 return result;
0716 }
0717
0718 static inline u16 bnx2x_ack_int(struct bnx2x *bp)
0719 {
0720 barrier();
0721 if (bp->common.int_block == INT_BLOCK_HC)
0722 return bnx2x_hc_ack_int(bp);
0723 else
0724 return bnx2x_igu_ack_int(bp);
0725 }
0726
0727 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
0728 {
0729
0730 barrier();
0731 return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
0732 }
0733
0734 static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
0735 struct bnx2x_fp_txdata *txdata)
0736 {
0737 s16 used;
0738 u16 prod;
0739 u16 cons;
0740
0741 prod = txdata->tx_bd_prod;
0742 cons = txdata->tx_bd_cons;
0743
0744 used = SUB_S16(prod, cons);
0745
0746 #ifdef BNX2X_STOP_ON_ERROR
0747 WARN_ON(used < 0);
0748 WARN_ON(used > txdata->tx_ring_size);
0749 WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL);
0750 #endif
0751
0752 return (s16)(txdata->tx_ring_size) - used;
0753 }
0754
0755 static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
0756 {
0757 u16 hw_cons;
0758
0759
0760 barrier();
0761 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
0762 return hw_cons != txdata->tx_pkt_cons;
0763 }
0764
0765 static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
0766 {
0767 u8 cos;
0768 for_each_cos_in_tx_queue(fp, cos)
0769 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
0770 return true;
0771 return false;
0772 }
0773
0774 #define BNX2X_IS_CQE_COMPLETED(cqe_fp) (cqe_fp->marker == 0x0)
0775 #define BNX2X_SEED_CQE(cqe_fp) (cqe_fp->marker = 0xFFFFFFFF)
0776 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
0777 {
0778 u16 cons;
0779 union eth_rx_cqe *cqe;
0780 struct eth_fast_path_rx_cqe *cqe_fp;
0781
0782 cons = RCQ_BD(fp->rx_comp_cons);
0783 cqe = &fp->rx_comp_ring[cons];
0784 cqe_fp = &cqe->fast_path_cqe;
0785 return BNX2X_IS_CQE_COMPLETED(cqe_fp);
0786 }
0787
0788
0789
0790
0791
0792
0793 static inline void bnx2x_tx_disable(struct bnx2x *bp)
0794 {
0795 netif_tx_disable(bp->dev);
0796 netif_carrier_off(bp->dev);
0797 }
0798
0799 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
0800 struct bnx2x_fastpath *fp, u16 index)
0801 {
0802 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
0803 struct page *page = sw_buf->page;
0804 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
0805
0806
0807 if (!page)
0808 return;
0809
0810
0811
0812
0813 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
0814 SGE_PAGE_SIZE, DMA_FROM_DEVICE);
0815
0816 put_page(page);
0817
0818 sw_buf->page = NULL;
0819 sge->addr_hi = 0;
0820 sge->addr_lo = 0;
0821 }
0822
0823 static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
0824 {
0825 int i;
0826
0827 for_each_rx_queue_cnic(bp, i) {
0828 __netif_napi_del(&bnx2x_fp(bp, i, napi));
0829 }
0830 synchronize_net();
0831 }
0832
0833 static inline void bnx2x_del_all_napi(struct bnx2x *bp)
0834 {
0835 int i;
0836
0837 for_each_eth_queue(bp, i) {
0838 __netif_napi_del(&bnx2x_fp(bp, i, napi));
0839 }
0840 synchronize_net();
0841 }
0842
0843 int bnx2x_set_int_mode(struct bnx2x *bp);
0844
0845 static inline void bnx2x_disable_msi(struct bnx2x *bp)
0846 {
0847 if (bp->flags & USING_MSIX_FLAG) {
0848 pci_disable_msix(bp->pdev);
0849 bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
0850 } else if (bp->flags & USING_MSI_FLAG) {
0851 pci_disable_msi(bp->pdev);
0852 bp->flags &= ~USING_MSI_FLAG;
0853 }
0854 }
0855
0856 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
0857 {
0858 int i, j;
0859
0860 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
0861 int idx = RX_SGE_CNT * i - 1;
0862
0863 for (j = 0; j < 2; j++) {
0864 BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
0865 idx--;
0866 }
0867 }
0868 }
0869
0870 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
0871 {
0872
0873 memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
0874
0875
0876
0877
0878
0879 bnx2x_clear_sge_mask_next_elems(fp);
0880 }
0881
0882
0883
0884
0885
0886
0887 static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
0888 u16 cons, u16 prod)
0889 {
0890 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
0891 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
0892 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
0893 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
0894
0895 dma_unmap_addr_set(prod_rx_buf, mapping,
0896 dma_unmap_addr(cons_rx_buf, mapping));
0897 prod_rx_buf->data = cons_rx_buf->data;
0898 *prod_bd = *cons_bd;
0899 }
0900
0901
0902
0903
0904 static inline int func_by_vn(struct bnx2x *bp, int vn)
0905 {
0906 return 2 * vn + BP_PORT(bp);
0907 }
0908
0909 static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
0910 {
0911 return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
0912 }
0913
0914
0915
0916
0917
0918
0919
0920
0921 static inline int bnx2x_func_start(struct bnx2x *bp)
0922 {
0923 struct bnx2x_func_state_params func_params = {NULL};
0924 struct bnx2x_func_start_params *start_params =
0925 &func_params.params.start;
0926 u16 port;
0927
0928
0929 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
0930
0931 func_params.f_obj = &bp->func_obj;
0932 func_params.cmd = BNX2X_F_CMD_START;
0933
0934
0935 start_params->mf_mode = bp->mf_mode;
0936 start_params->sd_vlan_tag = bp->mf_ov;
0937
0938
0939 if (IS_MF_BD(bp)) {
0940 DP(NETIF_MSG_IFUP, "Configuring ethertype 0x88a8 for BD\n");
0941 start_params->sd_vlan_eth_type = ETH_P_8021AD;
0942 REG_WR(bp, PRS_REG_VLAN_TYPE_0, ETH_P_8021AD);
0943 REG_WR(bp, PBF_REG_VLAN_TYPE_0, ETH_P_8021AD);
0944 REG_WR(bp, NIG_REG_LLH_E1HOV_TYPE_1, ETH_P_8021AD);
0945
0946 bnx2x_get_c2s_mapping(bp, start_params->c2s_pri,
0947 &start_params->c2s_pri_default);
0948 start_params->c2s_pri_valid = 1;
0949
0950 DP(NETIF_MSG_IFUP,
0951 "Inner-to-Outer priority: %02x %02x %02x %02x %02x %02x %02x %02x [Default %02x]\n",
0952 start_params->c2s_pri[0], start_params->c2s_pri[1],
0953 start_params->c2s_pri[2], start_params->c2s_pri[3],
0954 start_params->c2s_pri[4], start_params->c2s_pri[5],
0955 start_params->c2s_pri[6], start_params->c2s_pri[7],
0956 start_params->c2s_pri_default);
0957 }
0958
0959 if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
0960 start_params->network_cos_mode = STATIC_COS;
0961 else
0962 start_params->network_cos_mode = FW_WRR;
0963 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN]) {
0964 port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN];
0965 start_params->vxlan_dst_port = port;
0966 }
0967 if (bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE]) {
0968 port = bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE];
0969 start_params->geneve_dst_port = port;
0970 }
0971
0972 start_params->inner_rss = 1;
0973
0974 if (IS_MF_UFP(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
0975 start_params->class_fail_ethtype = ETH_P_FIP;
0976 start_params->class_fail = 1;
0977 start_params->no_added_tags = 1;
0978 }
0979
0980 return bnx2x_func_state_change(bp, &func_params);
0981 }
0982
0983
0984
0985
0986
0987
0988
0989
0990
0991 static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
0992 __le16 *fw_lo, u8 *mac)
0993 {
0994 ((u8 *)fw_hi)[0] = mac[1];
0995 ((u8 *)fw_hi)[1] = mac[0];
0996 ((u8 *)fw_mid)[0] = mac[3];
0997 ((u8 *)fw_mid)[1] = mac[2];
0998 ((u8 *)fw_lo)[0] = mac[5];
0999 ((u8 *)fw_lo)[1] = mac[4];
1000 }
1001
1002 static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
1003 struct bnx2x_alloc_pool *pool)
1004 {
1005 if (!pool->page)
1006 return;
1007
1008 put_page(pool->page);
1009
1010 pool->page = NULL;
1011 }
1012
1013 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1014 struct bnx2x_fastpath *fp, int last)
1015 {
1016 int i;
1017
1018 if (fp->mode == TPA_MODE_DISABLED)
1019 return;
1020
1021 for (i = 0; i < last; i++)
1022 bnx2x_free_rx_sge(bp, fp, i);
1023
1024 bnx2x_free_rx_mem_pool(bp, &fp->page_pool);
1025 }
1026
1027 static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
1028 {
1029 int i;
1030
1031 for (i = 1; i <= NUM_RX_RINGS; i++) {
1032 struct eth_rx_bd *rx_bd;
1033
1034 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
1035 rx_bd->addr_hi =
1036 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
1037 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
1038 rx_bd->addr_lo =
1039 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
1040 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
1041 }
1042 }
1043
1044
1045
1046
1047 static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
1048 {
1049 struct bnx2x *bp = fp->bp;
1050 if (!CHIP_IS_E1x(bp)) {
1051
1052 if (IS_FCOE_FP(fp))
1053 return bp->cnic_base_cl_id + (bp->pf_num >> 1);
1054 return fp->cl_id;
1055 }
1056 return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
1057 }
1058
1059 static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
1060 bnx2x_obj_type obj_type)
1061 {
1062 struct bnx2x *bp = fp->bp;
1063
1064
1065 bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id,
1066 fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
1067 bnx2x_sp_mapping(bp, mac_rdata),
1068 BNX2X_FILTER_MAC_PENDING,
1069 &bp->sp_state, obj_type,
1070 &bp->macs_pool);
1071
1072 if (!CHIP_IS_E1x(bp))
1073 bnx2x_init_vlan_obj(bp, &bnx2x_sp_obj(bp, fp).vlan_obj,
1074 fp->cl_id, fp->cid, BP_FUNC(bp),
1075 bnx2x_sp(bp, vlan_rdata),
1076 bnx2x_sp_mapping(bp, vlan_rdata),
1077 BNX2X_FILTER_VLAN_PENDING,
1078 &bp->sp_state, obj_type,
1079 &bp->vlans_pool);
1080 }
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090 static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
1091 {
1092 u8 func_num = 0, i;
1093
1094
1095 if (CHIP_IS_E1(bp))
1096 return 1;
1097
1098
1099
1100
1101 if (CHIP_REV_IS_SLOW(bp)) {
1102 if (IS_MF(bp))
1103 func_num = 4;
1104 else
1105 func_num = 2;
1106 } else {
1107 for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
1108 u32 func_config =
1109 MF_CFG_RD(bp,
1110 func_mf_config[BP_PATH(bp) + 2 * i].
1111 config);
1112 func_num +=
1113 ((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
1114 }
1115 }
1116
1117 WARN_ON(!func_num);
1118
1119 return func_num;
1120 }
1121
1122 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
1123 {
1124
1125 bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
1126
1127
1128 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
1129 BP_FUNC(bp), BP_FUNC(bp),
1130 bnx2x_sp(bp, mcast_rdata),
1131 bnx2x_sp_mapping(bp, mcast_rdata),
1132 BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
1133 BNX2X_OBJ_TYPE_RX);
1134
1135
1136 bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
1137 bnx2x_get_path_func_num(bp));
1138
1139 bnx2x_init_vlan_credit_pool(bp, &bp->vlans_pool, BP_FUNC(bp),
1140 bnx2x_get_path_func_num(bp));
1141
1142
1143 bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
1144 bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
1145 bnx2x_sp(bp, rss_rdata),
1146 bnx2x_sp_mapping(bp, rss_rdata),
1147 BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
1148 BNX2X_OBJ_TYPE_RX);
1149
1150 bp->vlan_credit = PF_VLAN_CREDIT_E2(bp, bnx2x_get_path_func_num(bp));
1151 }
1152
1153 static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
1154 {
1155 if (CHIP_IS_E1x(fp->bp))
1156 return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
1157 else
1158 return fp->cl_id;
1159 }
1160
1161 static inline void bnx2x_init_txdata(struct bnx2x *bp,
1162 struct bnx2x_fp_txdata *txdata, u32 cid,
1163 int txq_index, __le16 *tx_cons_sb,
1164 struct bnx2x_fastpath *fp)
1165 {
1166 txdata->cid = cid;
1167 txdata->txq_index = txq_index;
1168 txdata->tx_cons_sb = tx_cons_sb;
1169 txdata->parent_fp = fp;
1170 txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size;
1171
1172 DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
1173 txdata->cid, txdata->txq_index);
1174 }
1175
1176 static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
1177 {
1178 return bp->cnic_base_cl_id + cl_idx +
1179 (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
1180 }
1181
1182 static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
1183 {
1184
1185 return bp->base_fw_ndsb;
1186 }
1187
1188 static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
1189 {
1190 return bp->igu_base_sb;
1191 }
1192
1193 static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
1194 struct bnx2x_fp_txdata *txdata)
1195 {
1196 int cnt = 1000;
1197
1198 while (bnx2x_has_tx_work_unload(txdata)) {
1199 if (!cnt) {
1200 BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
1201 txdata->txq_index, txdata->tx_pkt_prod,
1202 txdata->tx_pkt_cons);
1203 #ifdef BNX2X_STOP_ON_ERROR
1204 bnx2x_panic();
1205 return -EBUSY;
1206 #else
1207 break;
1208 #endif
1209 }
1210 cnt--;
1211 usleep_range(1000, 2000);
1212 }
1213
1214 return 0;
1215 }
1216
1217 int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
1218
1219 static inline void __storm_memset_struct(struct bnx2x *bp,
1220 u32 addr, size_t size, u32 *data)
1221 {
1222 int i;
1223 for (i = 0; i < size/4; i++)
1224 REG_WR(bp, addr + (i * 4), data[i]);
1225 }
1226
1227
1228
1229
1230
1231
1232
1233 static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
1234 {
1235 int tout = 5000;
1236
1237 while (tout--) {
1238 smp_mb();
1239 netif_addr_lock_bh(bp->dev);
1240 if (!(bp->sp_state & mask)) {
1241 netif_addr_unlock_bh(bp->dev);
1242 return true;
1243 }
1244 netif_addr_unlock_bh(bp->dev);
1245
1246 usleep_range(1000, 2000);
1247 }
1248
1249 smp_mb();
1250
1251 netif_addr_lock_bh(bp->dev);
1252 if (bp->sp_state & mask) {
1253 BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
1254 bp->sp_state, mask);
1255 netif_addr_unlock_bh(bp->dev);
1256 return false;
1257 }
1258 netif_addr_unlock_bh(bp->dev);
1259
1260 return true;
1261 }
1262
1263
1264
1265
1266
1267
1268
1269
1270 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
1271 u32 cid);
1272
1273 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
1274 u8 sb_index, u8 disable, u16 usec);
1275 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1276 void bnx2x_release_phy_lock(struct bnx2x *bp);
1277
1278
1279
1280
1281
1282
1283
1284
1285 static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1286 {
1287 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1288 FUNC_MF_CFG_MAX_BW_SHIFT;
1289 if (!max_cfg) {
1290 DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL,
1291 "Max BW configured to 0 - using 100 instead\n");
1292 max_cfg = 100;
1293 }
1294 return max_cfg;
1295 }
1296
1297
1298 static inline bool bnx2x_mtu_allows_gro(int mtu)
1299 {
1300
1301 int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
1302
1303
1304
1305
1306
1307 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1308 }
1309
1310
1311
1312
1313
1314
1315
1316 void bnx2x_get_iscsi_info(struct bnx2x *bp);
1317
1318
1319
1320
1321
1322
1323
1324 static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
1325 {
1326 int func;
1327 int vn;
1328
1329
1330 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
1331 if (vn == BP_VN(bp))
1332 continue;
1333
1334 func = func_by_vn(bp, vn);
1335 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1336 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1337 }
1338 }
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348 static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
1349 {
1350 if (SHMEM2_HAS(bp, drv_flags)) {
1351 u32 drv_flags;
1352 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
1353 drv_flags = SHMEM2_RD(bp, drv_flags);
1354
1355 if (set)
1356 SET_FLAGS(drv_flags, flags);
1357 else
1358 RESET_FLAGS(drv_flags, flags);
1359
1360 SHMEM2_WR(bp, drv_flags, drv_flags);
1361 DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags);
1362 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
1363 }
1364 }
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
1377
1378 int bnx2x_drain_tx_queues(struct bnx2x *bp);
1379 void bnx2x_squeeze_objects(struct bnx2x *bp);
1380
1381 void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
1382 u32 verbose);
1383
1384
1385
1386
1387
1388
1389
1390 void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state);
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400 int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
1401 int buf_size);
1402
1403 #endif