Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Linux network driver for QLogic BR-series Converged Network Adapter.
0004  */
0005 /*
0006  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
0007  * Copyright (c) 2014-2015 QLogic Corporation
0008  * All rights reserved
0009  * www.qlogic.com
0010  */
0011 #include <linux/bitops.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/skbuff.h>
0014 #include <linux/etherdevice.h>
0015 #include <linux/in.h>
0016 #include <linux/ethtool.h>
0017 #include <linux/if_vlan.h>
0018 #include <linux/if_ether.h>
0019 #include <linux/ip.h>
0020 #include <linux/prefetch.h>
0021 #include <linux/module.h>
0022 
0023 #include "bnad.h"
0024 #include "bna.h"
0025 #include "cna.h"
0026 
0027 static DEFINE_MUTEX(bnad_fwimg_mutex);
0028 
0029 /*
0030  * Module params
0031  */
0032 static uint bnad_msix_disable;
0033 module_param(bnad_msix_disable, uint, 0444);
0034 MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
0035 
0036 static uint bnad_ioc_auto_recover = 1;
0037 module_param(bnad_ioc_auto_recover, uint, 0444);
0038 MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
0039 
0040 static uint bna_debugfs_enable = 1;
0041 module_param(bna_debugfs_enable, uint, 0644);
0042 MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
0043          " Range[false:0|true:1]");
0044 
0045 /*
0046  * Global variables
0047  */
0048 static u32 bnad_rxqs_per_cq = 2;
0049 static atomic_t bna_id;
0050 static const u8 bnad_bcast_addr[] __aligned(2) =
0051     { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
0052 
0053 /*
0054  * Local MACROS
0055  */
0056 #define BNAD_GET_MBOX_IRQ(_bnad)                \
0057     (((_bnad)->cfg_flags & BNAD_CF_MSIX) ?          \
0058      ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
0059      ((_bnad)->pcidev->irq))
0060 
0061 #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)    \
0062 do {                                \
0063     (_res_info)->res_type = BNA_RES_T_MEM;          \
0064     (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;   \
0065     (_res_info)->res_u.mem_info.num = (_num);       \
0066     (_res_info)->res_u.mem_info.len = (_size);      \
0067 } while (0)
0068 
0069 /*
0070  * Reinitialize completions in CQ, once Rx is taken down
0071  */
0072 static void
0073 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
0074 {
0075     struct bna_cq_entry *cmpl;
0076     int i;
0077 
0078     for (i = 0; i < ccb->q_depth; i++) {
0079         cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
0080         cmpl->valid = 0;
0081     }
0082 }
0083 
0084 /* Tx Datapath functions */
0085 
0086 
0087 /* Caller should ensure that the entry at unmap_q[index] is valid */
0088 static u32
0089 bnad_tx_buff_unmap(struct bnad *bnad,
0090                   struct bnad_tx_unmap *unmap_q,
0091                   u32 q_depth, u32 index)
0092 {
0093     struct bnad_tx_unmap *unmap;
0094     struct sk_buff *skb;
0095     int vector, nvecs;
0096 
0097     unmap = &unmap_q[index];
0098     nvecs = unmap->nvecs;
0099 
0100     skb = unmap->skb;
0101     unmap->skb = NULL;
0102     unmap->nvecs = 0;
0103     dma_unmap_single(&bnad->pcidev->dev,
0104         dma_unmap_addr(&unmap->vectors[0], dma_addr),
0105         skb_headlen(skb), DMA_TO_DEVICE);
0106     dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
0107     nvecs--;
0108 
0109     vector = 0;
0110     while (nvecs) {
0111         vector++;
0112         if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
0113             vector = 0;
0114             BNA_QE_INDX_INC(index, q_depth);
0115             unmap = &unmap_q[index];
0116         }
0117 
0118         dma_unmap_page(&bnad->pcidev->dev,
0119             dma_unmap_addr(&unmap->vectors[vector], dma_addr),
0120             dma_unmap_len(&unmap->vectors[vector], dma_len),
0121             DMA_TO_DEVICE);
0122         dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
0123         nvecs--;
0124     }
0125 
0126     BNA_QE_INDX_INC(index, q_depth);
0127 
0128     return index;
0129 }
0130 
0131 /*
0132  * Frees all pending Tx Bufs
0133  * At this point no activity is expected on the Q,
0134  * so DMA unmap & freeing is fine.
0135  */
0136 static void
0137 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
0138 {
0139     struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
0140     struct sk_buff *skb;
0141     int i;
0142 
0143     for (i = 0; i < tcb->q_depth; i++) {
0144         skb = unmap_q[i].skb;
0145         if (!skb)
0146             continue;
0147         bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
0148 
0149         dev_kfree_skb_any(skb);
0150     }
0151 }
0152 
0153 /*
0154  * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
0155  * Can be called in a) Interrupt context
0156  *          b) Sending context
0157  */
0158 static u32
0159 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
0160 {
0161     u32 sent_packets = 0, sent_bytes = 0;
0162     u32 wis, unmap_wis, hw_cons, cons, q_depth;
0163     struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
0164     struct bnad_tx_unmap *unmap;
0165     struct sk_buff *skb;
0166 
0167     /* Just return if TX is stopped */
0168     if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
0169         return 0;
0170 
0171     hw_cons = *(tcb->hw_consumer_index);
0172     rmb();
0173     cons = tcb->consumer_index;
0174     q_depth = tcb->q_depth;
0175 
0176     wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
0177     BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
0178 
0179     while (wis) {
0180         unmap = &unmap_q[cons];
0181 
0182         skb = unmap->skb;
0183 
0184         sent_packets++;
0185         sent_bytes += skb->len;
0186 
0187         unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
0188         wis -= unmap_wis;
0189 
0190         cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
0191         dev_kfree_skb_any(skb);
0192     }
0193 
0194     /* Update consumer pointers. */
0195     tcb->consumer_index = hw_cons;
0196 
0197     tcb->txq->tx_packets += sent_packets;
0198     tcb->txq->tx_bytes += sent_bytes;
0199 
0200     return sent_packets;
0201 }
0202 
0203 static u32
0204 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
0205 {
0206     struct net_device *netdev = bnad->netdev;
0207     u32 sent = 0;
0208 
0209     if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
0210         return 0;
0211 
0212     sent = bnad_txcmpl_process(bnad, tcb);
0213     if (sent) {
0214         if (netif_queue_stopped(netdev) &&
0215             netif_carrier_ok(netdev) &&
0216             BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
0217                     BNAD_NETIF_WAKE_THRESHOLD) {
0218             if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
0219                 netif_wake_queue(netdev);
0220                 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
0221             }
0222         }
0223     }
0224 
0225     if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
0226         bna_ib_ack(tcb->i_dbell, sent);
0227 
0228     smp_mb__before_atomic();
0229     clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
0230 
0231     return sent;
0232 }
0233 
0234 /* MSIX Tx Completion Handler */
0235 static irqreturn_t
0236 bnad_msix_tx(int irq, void *data)
0237 {
0238     struct bna_tcb *tcb = (struct bna_tcb *)data;
0239     struct bnad *bnad = tcb->bnad;
0240 
0241     bnad_tx_complete(bnad, tcb);
0242 
0243     return IRQ_HANDLED;
0244 }
0245 
0246 static inline void
0247 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
0248 {
0249     struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
0250 
0251     unmap_q->reuse_pi = -1;
0252     unmap_q->alloc_order = -1;
0253     unmap_q->map_size = 0;
0254     unmap_q->type = BNAD_RXBUF_NONE;
0255 }
0256 
0257 /* Default is page-based allocation. Multi-buffer support - TBD */
0258 static int
0259 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
0260 {
0261     struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
0262     int order;
0263 
0264     bnad_rxq_alloc_uninit(bnad, rcb);
0265 
0266     order = get_order(rcb->rxq->buffer_size);
0267 
0268     unmap_q->type = BNAD_RXBUF_PAGE;
0269 
0270     if (bna_is_small_rxq(rcb->id)) {
0271         unmap_q->alloc_order = 0;
0272         unmap_q->map_size = rcb->rxq->buffer_size;
0273     } else {
0274         if (rcb->rxq->multi_buffer) {
0275             unmap_q->alloc_order = 0;
0276             unmap_q->map_size = rcb->rxq->buffer_size;
0277             unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
0278         } else {
0279             unmap_q->alloc_order = order;
0280             unmap_q->map_size =
0281                 (rcb->rxq->buffer_size > 2048) ?
0282                 PAGE_SIZE << order : 2048;
0283         }
0284     }
0285 
0286     BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
0287 
0288     return 0;
0289 }
0290 
0291 static inline void
0292 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
0293 {
0294     if (!unmap->page)
0295         return;
0296 
0297     dma_unmap_page(&bnad->pcidev->dev,
0298             dma_unmap_addr(&unmap->vector, dma_addr),
0299             unmap->vector.len, DMA_FROM_DEVICE);
0300     put_page(unmap->page);
0301     unmap->page = NULL;
0302     dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
0303     unmap->vector.len = 0;
0304 }
0305 
0306 static inline void
0307 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
0308 {
0309     if (!unmap->skb)
0310         return;
0311 
0312     dma_unmap_single(&bnad->pcidev->dev,
0313             dma_unmap_addr(&unmap->vector, dma_addr),
0314             unmap->vector.len, DMA_FROM_DEVICE);
0315     dev_kfree_skb_any(unmap->skb);
0316     unmap->skb = NULL;
0317     dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
0318     unmap->vector.len = 0;
0319 }
0320 
0321 static void
0322 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
0323 {
0324     struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
0325     int i;
0326 
0327     for (i = 0; i < rcb->q_depth; i++) {
0328         struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
0329 
0330         if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
0331             bnad_rxq_cleanup_skb(bnad, unmap);
0332         else
0333             bnad_rxq_cleanup_page(bnad, unmap);
0334     }
0335     bnad_rxq_alloc_uninit(bnad, rcb);
0336 }
0337 
0338 static u32
0339 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
0340 {
0341     u32 alloced, prod, q_depth;
0342     struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
0343     struct bnad_rx_unmap *unmap, *prev;
0344     struct bna_rxq_entry *rxent;
0345     struct page *page;
0346     u32 page_offset, alloc_size;
0347     dma_addr_t dma_addr;
0348 
0349     prod = rcb->producer_index;
0350     q_depth = rcb->q_depth;
0351 
0352     alloc_size = PAGE_SIZE << unmap_q->alloc_order;
0353     alloced = 0;
0354 
0355     while (nalloc--) {
0356         unmap = &unmap_q->unmap[prod];
0357 
0358         if (unmap_q->reuse_pi < 0) {
0359             page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
0360                     unmap_q->alloc_order);
0361             page_offset = 0;
0362         } else {
0363             prev = &unmap_q->unmap[unmap_q->reuse_pi];
0364             page = prev->page;
0365             page_offset = prev->page_offset + unmap_q->map_size;
0366             get_page(page);
0367         }
0368 
0369         if (unlikely(!page)) {
0370             BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
0371             rcb->rxq->rxbuf_alloc_failed++;
0372             goto finishing;
0373         }
0374 
0375         dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
0376                     unmap_q->map_size, DMA_FROM_DEVICE);
0377         if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
0378             put_page(page);
0379             BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
0380             rcb->rxq->rxbuf_map_failed++;
0381             goto finishing;
0382         }
0383 
0384         unmap->page = page;
0385         unmap->page_offset = page_offset;
0386         dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
0387         unmap->vector.len = unmap_q->map_size;
0388         page_offset += unmap_q->map_size;
0389 
0390         if (page_offset < alloc_size)
0391             unmap_q->reuse_pi = prod;
0392         else
0393             unmap_q->reuse_pi = -1;
0394 
0395         rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
0396         BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
0397         BNA_QE_INDX_INC(prod, q_depth);
0398         alloced++;
0399     }
0400 
0401 finishing:
0402     if (likely(alloced)) {
0403         rcb->producer_index = prod;
0404         smp_mb();
0405         if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
0406             bna_rxq_prod_indx_doorbell(rcb);
0407     }
0408 
0409     return alloced;
0410 }
0411 
0412 static u32
0413 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
0414 {
0415     u32 alloced, prod, q_depth, buff_sz;
0416     struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
0417     struct bnad_rx_unmap *unmap;
0418     struct bna_rxq_entry *rxent;
0419     struct sk_buff *skb;
0420     dma_addr_t dma_addr;
0421 
0422     buff_sz = rcb->rxq->buffer_size;
0423     prod = rcb->producer_index;
0424     q_depth = rcb->q_depth;
0425 
0426     alloced = 0;
0427     while (nalloc--) {
0428         unmap = &unmap_q->unmap[prod];
0429 
0430         skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
0431 
0432         if (unlikely(!skb)) {
0433             BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
0434             rcb->rxq->rxbuf_alloc_failed++;
0435             goto finishing;
0436         }
0437 
0438         dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
0439                       buff_sz, DMA_FROM_DEVICE);
0440         if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
0441             dev_kfree_skb_any(skb);
0442             BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
0443             rcb->rxq->rxbuf_map_failed++;
0444             goto finishing;
0445         }
0446 
0447         unmap->skb = skb;
0448         dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
0449         unmap->vector.len = buff_sz;
0450 
0451         rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
0452         BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
0453         BNA_QE_INDX_INC(prod, q_depth);
0454         alloced++;
0455     }
0456 
0457 finishing:
0458     if (likely(alloced)) {
0459         rcb->producer_index = prod;
0460         smp_mb();
0461         if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
0462             bna_rxq_prod_indx_doorbell(rcb);
0463     }
0464 
0465     return alloced;
0466 }
0467 
0468 static inline void
0469 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
0470 {
0471     struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
0472     u32 to_alloc;
0473 
0474     to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
0475     if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
0476         return;
0477 
0478     if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
0479         bnad_rxq_refill_skb(bnad, rcb, to_alloc);
0480     else
0481         bnad_rxq_refill_page(bnad, rcb, to_alloc);
0482 }
0483 
0484 #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
0485                     BNA_CQ_EF_IPV6 | \
0486                     BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
0487                     BNA_CQ_EF_L4_CKSUM_OK)
0488 
0489 #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
0490                 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
0491 #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
0492                 BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
0493 #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
0494                 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
0495 #define flags_udp6 (BNA_CQ_EF_IPV6 | \
0496                 BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
0497 
0498 static void
0499 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
0500             u32 sop_ci, u32 nvecs)
0501 {
0502     struct bnad_rx_unmap_q *unmap_q;
0503     struct bnad_rx_unmap *unmap;
0504     u32 ci, vec;
0505 
0506     unmap_q = rcb->unmap_q;
0507     for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
0508         unmap = &unmap_q->unmap[ci];
0509         BNA_QE_INDX_INC(ci, rcb->q_depth);
0510 
0511         if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
0512             bnad_rxq_cleanup_skb(bnad, unmap);
0513         else
0514             bnad_rxq_cleanup_page(bnad, unmap);
0515     }
0516 }
0517 
0518 static void
0519 bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
0520 {
0521     struct bna_rcb *rcb;
0522     struct bnad *bnad;
0523     struct bnad_rx_unmap_q *unmap_q;
0524     struct bna_cq_entry *cq, *cmpl;
0525     u32 ci, pi, totlen = 0;
0526 
0527     cq = ccb->sw_q;
0528     pi = ccb->producer_index;
0529     cmpl = &cq[pi];
0530 
0531     rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
0532     unmap_q = rcb->unmap_q;
0533     bnad = rcb->bnad;
0534     ci = rcb->consumer_index;
0535 
0536     /* prefetch header */
0537     prefetch(page_address(unmap_q->unmap[ci].page) +
0538          unmap_q->unmap[ci].page_offset);
0539 
0540     while (nvecs--) {
0541         struct bnad_rx_unmap *unmap;
0542         u32 len;
0543 
0544         unmap = &unmap_q->unmap[ci];
0545         BNA_QE_INDX_INC(ci, rcb->q_depth);
0546 
0547         dma_unmap_page(&bnad->pcidev->dev,
0548                    dma_unmap_addr(&unmap->vector, dma_addr),
0549                    unmap->vector.len, DMA_FROM_DEVICE);
0550 
0551         len = ntohs(cmpl->length);
0552         skb->truesize += unmap->vector.len;
0553         totlen += len;
0554 
0555         skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
0556                    unmap->page, unmap->page_offset, len);
0557 
0558         unmap->page = NULL;
0559         unmap->vector.len = 0;
0560 
0561         BNA_QE_INDX_INC(pi, ccb->q_depth);
0562         cmpl = &cq[pi];
0563     }
0564 
0565     skb->len += totlen;
0566     skb->data_len += totlen;
0567 }
0568 
0569 static inline void
0570 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
0571           struct bnad_rx_unmap *unmap, u32 len)
0572 {
0573     prefetch(skb->data);
0574 
0575     dma_unmap_single(&bnad->pcidev->dev,
0576             dma_unmap_addr(&unmap->vector, dma_addr),
0577             unmap->vector.len, DMA_FROM_DEVICE);
0578 
0579     skb_put(skb, len);
0580     skb->protocol = eth_type_trans(skb, bnad->netdev);
0581 
0582     unmap->skb = NULL;
0583     unmap->vector.len = 0;
0584 }
0585 
0586 static u32
0587 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
0588 {
0589     struct bna_cq_entry *cq, *cmpl, *next_cmpl;
0590     struct bna_rcb *rcb = NULL;
0591     struct bnad_rx_unmap_q *unmap_q;
0592     struct bnad_rx_unmap *unmap = NULL;
0593     struct sk_buff *skb = NULL;
0594     struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
0595     struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
0596     u32 packets = 0, len = 0, totlen = 0;
0597     u32 pi, vec, sop_ci = 0, nvecs = 0;
0598     u32 flags, masked_flags;
0599 
0600     prefetch(bnad->netdev);
0601 
0602     cq = ccb->sw_q;
0603 
0604     while (packets < budget) {
0605         cmpl = &cq[ccb->producer_index];
0606         if (!cmpl->valid)
0607             break;
0608         /* The 'valid' field is set by the adapter, only after writing
0609          * the other fields of completion entry. Hence, do not load
0610          * other fields of completion entry *before* the 'valid' is
0611          * loaded. Adding the rmb() here prevents the compiler and/or
0612          * CPU from reordering the reads which would potentially result
0613          * in reading stale values in completion entry.
0614          */
0615         rmb();
0616 
0617         BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
0618 
0619         if (bna_is_small_rxq(cmpl->rxq_id))
0620             rcb = ccb->rcb[1];
0621         else
0622             rcb = ccb->rcb[0];
0623 
0624         unmap_q = rcb->unmap_q;
0625 
0626         /* start of packet ci */
0627         sop_ci = rcb->consumer_index;
0628 
0629         if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
0630             unmap = &unmap_q->unmap[sop_ci];
0631             skb = unmap->skb;
0632         } else {
0633             skb = napi_get_frags(&rx_ctrl->napi);
0634             if (unlikely(!skb))
0635                 break;
0636         }
0637         prefetch(skb);
0638 
0639         flags = ntohl(cmpl->flags);
0640         len = ntohs(cmpl->length);
0641         totlen = len;
0642         nvecs = 1;
0643 
0644         /* Check all the completions for this frame.
0645          * busy-wait doesn't help much, break here.
0646          */
0647         if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
0648             (flags & BNA_CQ_EF_EOP) == 0) {
0649             pi = ccb->producer_index;
0650             do {
0651                 BNA_QE_INDX_INC(pi, ccb->q_depth);
0652                 next_cmpl = &cq[pi];
0653 
0654                 if (!next_cmpl->valid)
0655                     break;
0656                 /* The 'valid' field is set by the adapter, only
0657                  * after writing the other fields of completion
0658                  * entry. Hence, do not load other fields of
0659                  * completion entry *before* the 'valid' is
0660                  * loaded. Adding the rmb() here prevents the
0661                  * compiler and/or CPU from reordering the reads
0662                  * which would potentially result in reading
0663                  * stale values in completion entry.
0664                  */
0665                 rmb();
0666 
0667                 len = ntohs(next_cmpl->length);
0668                 flags = ntohl(next_cmpl->flags);
0669 
0670                 nvecs++;
0671                 totlen += len;
0672             } while ((flags & BNA_CQ_EF_EOP) == 0);
0673 
0674             if (!next_cmpl->valid)
0675                 break;
0676         }
0677         packets++;
0678 
0679         /* TODO: BNA_CQ_EF_LOCAL ? */
0680         if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
0681                         BNA_CQ_EF_FCS_ERROR |
0682                         BNA_CQ_EF_TOO_LONG))) {
0683             bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
0684             rcb->rxq->rx_packets_with_error++;
0685 
0686             goto next;
0687         }
0688 
0689         if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
0690             bnad_cq_setup_skb(bnad, skb, unmap, len);
0691         else
0692             bnad_cq_setup_skb_frags(ccb, skb, nvecs);
0693 
0694         rcb->rxq->rx_packets++;
0695         rcb->rxq->rx_bytes += totlen;
0696         ccb->bytes_per_intr += totlen;
0697 
0698         masked_flags = flags & flags_cksum_prot_mask;
0699 
0700         if (likely
0701             ((bnad->netdev->features & NETIF_F_RXCSUM) &&
0702              ((masked_flags == flags_tcp4) ||
0703               (masked_flags == flags_udp4) ||
0704               (masked_flags == flags_tcp6) ||
0705               (masked_flags == flags_udp6))))
0706             skb->ip_summed = CHECKSUM_UNNECESSARY;
0707         else
0708             skb_checksum_none_assert(skb);
0709 
0710         if ((flags & BNA_CQ_EF_VLAN) &&
0711             (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
0712             __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
0713 
0714         if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
0715             netif_receive_skb(skb);
0716         else
0717             napi_gro_frags(&rx_ctrl->napi);
0718 
0719 next:
0720         BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
0721         for (vec = 0; vec < nvecs; vec++) {
0722             cmpl = &cq[ccb->producer_index];
0723             cmpl->valid = 0;
0724             BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
0725         }
0726     }
0727 
0728     napi_gro_flush(&rx_ctrl->napi, false);
0729     if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
0730         bna_ib_ack_disable_irq(ccb->i_dbell, packets);
0731 
0732     bnad_rxq_post(bnad, ccb->rcb[0]);
0733     if (ccb->rcb[1])
0734         bnad_rxq_post(bnad, ccb->rcb[1]);
0735 
0736     return packets;
0737 }
0738 
0739 static void
0740 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
0741 {
0742     struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
0743     struct napi_struct *napi = &rx_ctrl->napi;
0744 
0745     if (likely(napi_schedule_prep(napi))) {
0746         __napi_schedule(napi);
0747         rx_ctrl->rx_schedule++;
0748     }
0749 }
0750 
0751 /* MSIX Rx Path Handler */
0752 static irqreturn_t
0753 bnad_msix_rx(int irq, void *data)
0754 {
0755     struct bna_ccb *ccb = (struct bna_ccb *)data;
0756 
0757     if (ccb) {
0758         ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
0759         bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
0760     }
0761 
0762     return IRQ_HANDLED;
0763 }
0764 
0765 /* Interrupt handlers */
0766 
0767 /* Mbox Interrupt Handlers */
0768 static irqreturn_t
0769 bnad_msix_mbox_handler(int irq, void *data)
0770 {
0771     u32 intr_status;
0772     unsigned long flags;
0773     struct bnad *bnad = (struct bnad *)data;
0774 
0775     spin_lock_irqsave(&bnad->bna_lock, flags);
0776     if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
0777         spin_unlock_irqrestore(&bnad->bna_lock, flags);
0778         return IRQ_HANDLED;
0779     }
0780 
0781     bna_intr_status_get(&bnad->bna, intr_status);
0782 
0783     if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
0784         bna_mbox_handler(&bnad->bna, intr_status);
0785 
0786     spin_unlock_irqrestore(&bnad->bna_lock, flags);
0787 
0788     return IRQ_HANDLED;
0789 }
0790 
0791 static irqreturn_t
0792 bnad_isr(int irq, void *data)
0793 {
0794     int i, j;
0795     u32 intr_status;
0796     unsigned long flags;
0797     struct bnad *bnad = (struct bnad *)data;
0798     struct bnad_rx_info *rx_info;
0799     struct bnad_rx_ctrl *rx_ctrl;
0800     struct bna_tcb *tcb = NULL;
0801 
0802     spin_lock_irqsave(&bnad->bna_lock, flags);
0803     if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
0804         spin_unlock_irqrestore(&bnad->bna_lock, flags);
0805         return IRQ_NONE;
0806     }
0807 
0808     bna_intr_status_get(&bnad->bna, intr_status);
0809 
0810     if (unlikely(!intr_status)) {
0811         spin_unlock_irqrestore(&bnad->bna_lock, flags);
0812         return IRQ_NONE;
0813     }
0814 
0815     if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
0816         bna_mbox_handler(&bnad->bna, intr_status);
0817 
0818     spin_unlock_irqrestore(&bnad->bna_lock, flags);
0819 
0820     if (!BNA_IS_INTX_DATA_INTR(intr_status))
0821         return IRQ_HANDLED;
0822 
0823     /* Process data interrupts */
0824     /* Tx processing */
0825     for (i = 0; i < bnad->num_tx; i++) {
0826         for (j = 0; j < bnad->num_txq_per_tx; j++) {
0827             tcb = bnad->tx_info[i].tcb[j];
0828             if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
0829                 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
0830         }
0831     }
0832     /* Rx processing */
0833     for (i = 0; i < bnad->num_rx; i++) {
0834         rx_info = &bnad->rx_info[i];
0835         if (!rx_info->rx)
0836             continue;
0837         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
0838             rx_ctrl = &rx_info->rx_ctrl[j];
0839             if (rx_ctrl->ccb)
0840                 bnad_netif_rx_schedule_poll(bnad,
0841                                 rx_ctrl->ccb);
0842         }
0843     }
0844     return IRQ_HANDLED;
0845 }
0846 
0847 /*
0848  * Called in interrupt / callback context
0849  * with bna_lock held, so cfg_flags access is OK
0850  */
0851 static void
0852 bnad_enable_mbox_irq(struct bnad *bnad)
0853 {
0854     clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
0855 
0856     BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
0857 }
0858 
0859 /*
0860  * Called with bnad->bna_lock held b'cos of
0861  * bnad->cfg_flags access.
0862  */
0863 static void
0864 bnad_disable_mbox_irq(struct bnad *bnad)
0865 {
0866     set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
0867 
0868     BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
0869 }
0870 
0871 static void
0872 bnad_set_netdev_perm_addr(struct bnad *bnad)
0873 {
0874     struct net_device *netdev = bnad->netdev;
0875 
0876     ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
0877     if (is_zero_ether_addr(netdev->dev_addr))
0878         eth_hw_addr_set(netdev, bnad->perm_addr);
0879 }
0880 
0881 /* Control Path Handlers */
0882 
0883 /* Callbacks */
0884 void
0885 bnad_cb_mbox_intr_enable(struct bnad *bnad)
0886 {
0887     bnad_enable_mbox_irq(bnad);
0888 }
0889 
0890 void
0891 bnad_cb_mbox_intr_disable(struct bnad *bnad)
0892 {
0893     bnad_disable_mbox_irq(bnad);
0894 }
0895 
0896 void
0897 bnad_cb_ioceth_ready(struct bnad *bnad)
0898 {
0899     bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
0900     complete(&bnad->bnad_completions.ioc_comp);
0901 }
0902 
0903 void
0904 bnad_cb_ioceth_failed(struct bnad *bnad)
0905 {
0906     bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
0907     complete(&bnad->bnad_completions.ioc_comp);
0908 }
0909 
0910 void
0911 bnad_cb_ioceth_disabled(struct bnad *bnad)
0912 {
0913     bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
0914     complete(&bnad->bnad_completions.ioc_comp);
0915 }
0916 
0917 static void
0918 bnad_cb_enet_disabled(void *arg)
0919 {
0920     struct bnad *bnad = (struct bnad *)arg;
0921 
0922     netif_carrier_off(bnad->netdev);
0923     complete(&bnad->bnad_completions.enet_comp);
0924 }
0925 
0926 void
0927 bnad_cb_ethport_link_status(struct bnad *bnad,
0928             enum bna_link_status link_status)
0929 {
0930     bool link_up = false;
0931 
0932     link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
0933 
0934     if (link_status == BNA_CEE_UP) {
0935         if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
0936             BNAD_UPDATE_CTR(bnad, cee_toggle);
0937         set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
0938     } else {
0939         if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
0940             BNAD_UPDATE_CTR(bnad, cee_toggle);
0941         clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
0942     }
0943 
0944     if (link_up) {
0945         if (!netif_carrier_ok(bnad->netdev)) {
0946             uint tx_id, tcb_id;
0947             netdev_info(bnad->netdev, "link up\n");
0948             netif_carrier_on(bnad->netdev);
0949             BNAD_UPDATE_CTR(bnad, link_toggle);
0950             for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
0951                 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
0952                       tcb_id++) {
0953                     struct bna_tcb *tcb =
0954                     bnad->tx_info[tx_id].tcb[tcb_id];
0955                     u32 txq_id;
0956                     if (!tcb)
0957                         continue;
0958 
0959                     txq_id = tcb->id;
0960 
0961                     if (test_bit(BNAD_TXQ_TX_STARTED,
0962                              &tcb->flags)) {
0963                         /*
0964                          * Force an immediate
0965                          * Transmit Schedule */
0966                         netif_wake_subqueue(
0967                                 bnad->netdev,
0968                                 txq_id);
0969                         BNAD_UPDATE_CTR(bnad,
0970                             netif_queue_wakeup);
0971                     } else {
0972                         netif_stop_subqueue(
0973                                 bnad->netdev,
0974                                 txq_id);
0975                         BNAD_UPDATE_CTR(bnad,
0976                             netif_queue_stop);
0977                     }
0978                 }
0979             }
0980         }
0981     } else {
0982         if (netif_carrier_ok(bnad->netdev)) {
0983             netdev_info(bnad->netdev, "link down\n");
0984             netif_carrier_off(bnad->netdev);
0985             BNAD_UPDATE_CTR(bnad, link_toggle);
0986         }
0987     }
0988 }
0989 
0990 static void
0991 bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
0992 {
0993     struct bnad *bnad = (struct bnad *)arg;
0994 
0995     complete(&bnad->bnad_completions.tx_comp);
0996 }
0997 
0998 static void
0999 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1000 {
1001     struct bnad_tx_info *tx_info =
1002             (struct bnad_tx_info *)tcb->txq->tx->priv;
1003 
1004     tcb->priv = tcb;
1005     tx_info->tcb[tcb->id] = tcb;
1006 }
1007 
1008 static void
1009 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1010 {
1011     struct bnad_tx_info *tx_info =
1012             (struct bnad_tx_info *)tcb->txq->tx->priv;
1013 
1014     tx_info->tcb[tcb->id] = NULL;
1015     tcb->priv = NULL;
1016 }
1017 
1018 static void
1019 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1020 {
1021     struct bnad_rx_info *rx_info =
1022             (struct bnad_rx_info *)ccb->cq->rx->priv;
1023 
1024     rx_info->rx_ctrl[ccb->id].ccb = ccb;
1025     ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1026 }
1027 
1028 static void
1029 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1030 {
1031     struct bnad_rx_info *rx_info =
1032             (struct bnad_rx_info *)ccb->cq->rx->priv;
1033 
1034     rx_info->rx_ctrl[ccb->id].ccb = NULL;
1035 }
1036 
1037 static void
1038 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1039 {
1040     struct bnad_tx_info *tx_info =
1041             (struct bnad_tx_info *)tx->priv;
1042     struct bna_tcb *tcb;
1043     u32 txq_id;
1044     int i;
1045 
1046     for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1047         tcb = tx_info->tcb[i];
1048         if (!tcb)
1049             continue;
1050         txq_id = tcb->id;
1051         clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1052         netif_stop_subqueue(bnad->netdev, txq_id);
1053     }
1054 }
1055 
1056 static void
1057 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1058 {
1059     struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1060     struct bna_tcb *tcb;
1061     u32 txq_id;
1062     int i;
1063 
1064     for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1065         tcb = tx_info->tcb[i];
1066         if (!tcb)
1067             continue;
1068         txq_id = tcb->id;
1069 
1070         BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1071         set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1072         BUG_ON(*(tcb->hw_consumer_index) != 0);
1073 
1074         if (netif_carrier_ok(bnad->netdev)) {
1075             netif_wake_subqueue(bnad->netdev, txq_id);
1076             BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1077         }
1078     }
1079 
1080     /*
1081      * Workaround for first ioceth enable failure & we
1082      * get a 0 MAC address. We try to get the MAC address
1083      * again here.
1084      */
1085     if (is_zero_ether_addr(bnad->perm_addr)) {
1086         bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1087         bnad_set_netdev_perm_addr(bnad);
1088     }
1089 }
1090 
1091 /*
1092  * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1093  */
1094 static void
1095 bnad_tx_cleanup(struct delayed_work *work)
1096 {
1097     struct bnad_tx_info *tx_info =
1098         container_of(work, struct bnad_tx_info, tx_cleanup_work);
1099     struct bnad *bnad = NULL;
1100     struct bna_tcb *tcb;
1101     unsigned long flags;
1102     u32 i, pending = 0;
1103 
1104     for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1105         tcb = tx_info->tcb[i];
1106         if (!tcb)
1107             continue;
1108 
1109         bnad = tcb->bnad;
1110 
1111         if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1112             pending++;
1113             continue;
1114         }
1115 
1116         bnad_txq_cleanup(bnad, tcb);
1117 
1118         smp_mb__before_atomic();
1119         clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1120     }
1121 
1122     if (pending) {
1123         queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1124             msecs_to_jiffies(1));
1125         return;
1126     }
1127 
1128     spin_lock_irqsave(&bnad->bna_lock, flags);
1129     bna_tx_cleanup_complete(tx_info->tx);
1130     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1131 }
1132 
1133 static void
1134 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1135 {
1136     struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1137     struct bna_tcb *tcb;
1138     int i;
1139 
1140     for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1141         tcb = tx_info->tcb[i];
1142         if (!tcb)
1143             continue;
1144     }
1145 
1146     queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1147 }
1148 
1149 static void
1150 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1151 {
1152     struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1153     struct bna_ccb *ccb;
1154     struct bnad_rx_ctrl *rx_ctrl;
1155     int i;
1156 
1157     for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1158         rx_ctrl = &rx_info->rx_ctrl[i];
1159         ccb = rx_ctrl->ccb;
1160         if (!ccb)
1161             continue;
1162 
1163         clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1164 
1165         if (ccb->rcb[1])
1166             clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1167     }
1168 }
1169 
1170 /*
1171  * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1172  */
1173 static void
1174 bnad_rx_cleanup(void *work)
1175 {
1176     struct bnad_rx_info *rx_info =
1177         container_of(work, struct bnad_rx_info, rx_cleanup_work);
1178     struct bnad_rx_ctrl *rx_ctrl;
1179     struct bnad *bnad = NULL;
1180     unsigned long flags;
1181     u32 i;
1182 
1183     for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1184         rx_ctrl = &rx_info->rx_ctrl[i];
1185 
1186         if (!rx_ctrl->ccb)
1187             continue;
1188 
1189         bnad = rx_ctrl->ccb->bnad;
1190 
1191         /*
1192          * Wait till the poll handler has exited
1193          * and nothing can be scheduled anymore
1194          */
1195         napi_disable(&rx_ctrl->napi);
1196 
1197         bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1198         bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1199         if (rx_ctrl->ccb->rcb[1])
1200             bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1201     }
1202 
1203     spin_lock_irqsave(&bnad->bna_lock, flags);
1204     bna_rx_cleanup_complete(rx_info->rx);
1205     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1206 }
1207 
1208 static void
1209 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1210 {
1211     struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1212     struct bna_ccb *ccb;
1213     struct bnad_rx_ctrl *rx_ctrl;
1214     int i;
1215 
1216     for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1217         rx_ctrl = &rx_info->rx_ctrl[i];
1218         ccb = rx_ctrl->ccb;
1219         if (!ccb)
1220             continue;
1221 
1222         clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1223 
1224         if (ccb->rcb[1])
1225             clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1226     }
1227 
1228     queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1229 }
1230 
1231 static void
1232 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1233 {
1234     struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1235     struct bna_ccb *ccb;
1236     struct bna_rcb *rcb;
1237     struct bnad_rx_ctrl *rx_ctrl;
1238     int i, j;
1239 
1240     for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1241         rx_ctrl = &rx_info->rx_ctrl[i];
1242         ccb = rx_ctrl->ccb;
1243         if (!ccb)
1244             continue;
1245 
1246         napi_enable(&rx_ctrl->napi);
1247 
1248         for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1249             rcb = ccb->rcb[j];
1250             if (!rcb)
1251                 continue;
1252 
1253             bnad_rxq_alloc_init(bnad, rcb);
1254             set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1255             set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1256             bnad_rxq_post(bnad, rcb);
1257         }
1258     }
1259 }
1260 
1261 static void
1262 bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1263 {
1264     struct bnad *bnad = (struct bnad *)arg;
1265 
1266     complete(&bnad->bnad_completions.rx_comp);
1267 }
1268 
1269 static void
1270 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1271 {
1272     bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1273     complete(&bnad->bnad_completions.mcast_comp);
1274 }
1275 
1276 void
1277 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1278                struct bna_stats *stats)
1279 {
1280     if (status == BNA_CB_SUCCESS)
1281         BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1282 
1283     if (!netif_running(bnad->netdev) ||
1284         !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1285         return;
1286 
1287     mod_timer(&bnad->stats_timer,
1288           jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1289 }
1290 
1291 static void
1292 bnad_cb_enet_mtu_set(struct bnad *bnad)
1293 {
1294     bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1295     complete(&bnad->bnad_completions.mtu_comp);
1296 }
1297 
1298 void
1299 bnad_cb_completion(void *arg, enum bfa_status status)
1300 {
1301     struct bnad_iocmd_comp *iocmd_comp =
1302             (struct bnad_iocmd_comp *)arg;
1303 
1304     iocmd_comp->comp_status = (u32) status;
1305     complete(&iocmd_comp->comp);
1306 }
1307 
1308 /* Resource allocation, free functions */
1309 
1310 static void
1311 bnad_mem_free(struct bnad *bnad,
1312           struct bna_mem_info *mem_info)
1313 {
1314     int i;
1315     dma_addr_t dma_pa;
1316 
1317     if (mem_info->mdl == NULL)
1318         return;
1319 
1320     for (i = 0; i < mem_info->num; i++) {
1321         if (mem_info->mdl[i].kva != NULL) {
1322             if (mem_info->mem_type == BNA_MEM_T_DMA) {
1323                 BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1324                         dma_pa);
1325                 dma_free_coherent(&bnad->pcidev->dev,
1326                           mem_info->mdl[i].len,
1327                           mem_info->mdl[i].kva, dma_pa);
1328             } else
1329                 kfree(mem_info->mdl[i].kva);
1330         }
1331     }
1332     kfree(mem_info->mdl);
1333     mem_info->mdl = NULL;
1334 }
1335 
1336 static int
1337 bnad_mem_alloc(struct bnad *bnad,
1338            struct bna_mem_info *mem_info)
1339 {
1340     int i;
1341     dma_addr_t dma_pa;
1342 
1343     if ((mem_info->num == 0) || (mem_info->len == 0)) {
1344         mem_info->mdl = NULL;
1345         return 0;
1346     }
1347 
1348     mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1349                 GFP_KERNEL);
1350     if (mem_info->mdl == NULL)
1351         return -ENOMEM;
1352 
1353     if (mem_info->mem_type == BNA_MEM_T_DMA) {
1354         for (i = 0; i < mem_info->num; i++) {
1355             mem_info->mdl[i].len = mem_info->len;
1356             mem_info->mdl[i].kva =
1357                 dma_alloc_coherent(&bnad->pcidev->dev,
1358                            mem_info->len, &dma_pa,
1359                            GFP_KERNEL);
1360             if (mem_info->mdl[i].kva == NULL)
1361                 goto err_return;
1362 
1363             BNA_SET_DMA_ADDR(dma_pa,
1364                      &(mem_info->mdl[i].dma));
1365         }
1366     } else {
1367         for (i = 0; i < mem_info->num; i++) {
1368             mem_info->mdl[i].len = mem_info->len;
1369             mem_info->mdl[i].kva = kzalloc(mem_info->len,
1370                             GFP_KERNEL);
1371             if (mem_info->mdl[i].kva == NULL)
1372                 goto err_return;
1373         }
1374     }
1375 
1376     return 0;
1377 
1378 err_return:
1379     bnad_mem_free(bnad, mem_info);
1380     return -ENOMEM;
1381 }
1382 
1383 /* Free IRQ for Mailbox */
1384 static void
1385 bnad_mbox_irq_free(struct bnad *bnad)
1386 {
1387     int irq;
1388     unsigned long flags;
1389 
1390     spin_lock_irqsave(&bnad->bna_lock, flags);
1391     bnad_disable_mbox_irq(bnad);
1392     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1393 
1394     irq = BNAD_GET_MBOX_IRQ(bnad);
1395     free_irq(irq, bnad);
1396 }
1397 
1398 /*
1399  * Allocates IRQ for Mailbox, but keep it disabled
1400  * This will be enabled once we get the mbox enable callback
1401  * from bna
1402  */
1403 static int
1404 bnad_mbox_irq_alloc(struct bnad *bnad)
1405 {
1406     int     err = 0;
1407     unsigned long   irq_flags, flags;
1408     u32 irq;
1409     irq_handler_t   irq_handler;
1410 
1411     spin_lock_irqsave(&bnad->bna_lock, flags);
1412     if (bnad->cfg_flags & BNAD_CF_MSIX) {
1413         irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1414         irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1415         irq_flags = 0;
1416     } else {
1417         irq_handler = (irq_handler_t)bnad_isr;
1418         irq = bnad->pcidev->irq;
1419         irq_flags = IRQF_SHARED;
1420     }
1421 
1422     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423     sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1424 
1425     /*
1426      * Set the Mbox IRQ disable flag, so that the IRQ handler
1427      * called from request_irq() for SHARED IRQs do not execute
1428      */
1429     set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1430 
1431     BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1432 
1433     err = request_irq(irq, irq_handler, irq_flags,
1434               bnad->mbox_irq_name, bnad);
1435 
1436     return err;
1437 }
1438 
1439 static void
1440 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1441 {
1442     kfree(intr_info->idl);
1443     intr_info->idl = NULL;
1444 }
1445 
1446 /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1447 static int
1448 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1449             u32 txrx_id, struct bna_intr_info *intr_info)
1450 {
1451     int i, vector_start = 0;
1452     u32 cfg_flags;
1453     unsigned long flags;
1454 
1455     spin_lock_irqsave(&bnad->bna_lock, flags);
1456     cfg_flags = bnad->cfg_flags;
1457     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1458 
1459     if (cfg_flags & BNAD_CF_MSIX) {
1460         intr_info->intr_type = BNA_INTR_T_MSIX;
1461         intr_info->idl = kcalloc(intr_info->num,
1462                     sizeof(struct bna_intr_descr),
1463                     GFP_KERNEL);
1464         if (!intr_info->idl)
1465             return -ENOMEM;
1466 
1467         switch (src) {
1468         case BNAD_INTR_TX:
1469             vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1470             break;
1471 
1472         case BNAD_INTR_RX:
1473             vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1474                     (bnad->num_tx * bnad->num_txq_per_tx) +
1475                     txrx_id;
1476             break;
1477 
1478         default:
1479             BUG();
1480         }
1481 
1482         for (i = 0; i < intr_info->num; i++)
1483             intr_info->idl[i].vector = vector_start + i;
1484     } else {
1485         intr_info->intr_type = BNA_INTR_T_INTX;
1486         intr_info->num = 1;
1487         intr_info->idl = kcalloc(intr_info->num,
1488                     sizeof(struct bna_intr_descr),
1489                     GFP_KERNEL);
1490         if (!intr_info->idl)
1491             return -ENOMEM;
1492 
1493         switch (src) {
1494         case BNAD_INTR_TX:
1495             intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1496             break;
1497 
1498         case BNAD_INTR_RX:
1499             intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1500             break;
1501         }
1502     }
1503     return 0;
1504 }
1505 
1506 /* NOTE: Should be called for MSIX only
1507  * Unregisters Tx MSIX vector(s) from the kernel
1508  */
1509 static void
1510 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1511             int num_txqs)
1512 {
1513     int i;
1514     int vector_num;
1515 
1516     for (i = 0; i < num_txqs; i++) {
1517         if (tx_info->tcb[i] == NULL)
1518             continue;
1519 
1520         vector_num = tx_info->tcb[i]->intr_vector;
1521         free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1522     }
1523 }
1524 
1525 /* NOTE: Should be called for MSIX only
1526  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1527  */
1528 static int
1529 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1530             u32 tx_id, int num_txqs)
1531 {
1532     int i;
1533     int err;
1534     int vector_num;
1535 
1536     for (i = 0; i < num_txqs; i++) {
1537         vector_num = tx_info->tcb[i]->intr_vector;
1538         sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1539                 tx_id + tx_info->tcb[i]->id);
1540         err = request_irq(bnad->msix_table[vector_num].vector,
1541                   (irq_handler_t)bnad_msix_tx, 0,
1542                   tx_info->tcb[i]->name,
1543                   tx_info->tcb[i]);
1544         if (err)
1545             goto err_return;
1546     }
1547 
1548     return 0;
1549 
1550 err_return:
1551     if (i > 0)
1552         bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1553     return -1;
1554 }
1555 
1556 /* NOTE: Should be called for MSIX only
1557  * Unregisters Rx MSIX vector(s) from the kernel
1558  */
1559 static void
1560 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1561             int num_rxps)
1562 {
1563     int i;
1564     int vector_num;
1565 
1566     for (i = 0; i < num_rxps; i++) {
1567         if (rx_info->rx_ctrl[i].ccb == NULL)
1568             continue;
1569 
1570         vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1571         free_irq(bnad->msix_table[vector_num].vector,
1572              rx_info->rx_ctrl[i].ccb);
1573     }
1574 }
1575 
1576 /* NOTE: Should be called for MSIX only
1577  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1578  */
1579 static int
1580 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1581             u32 rx_id, int num_rxps)
1582 {
1583     int i;
1584     int err;
1585     int vector_num;
1586 
1587     for (i = 0; i < num_rxps; i++) {
1588         vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1589         sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1590             bnad->netdev->name,
1591             rx_id + rx_info->rx_ctrl[i].ccb->id);
1592         err = request_irq(bnad->msix_table[vector_num].vector,
1593                   (irq_handler_t)bnad_msix_rx, 0,
1594                   rx_info->rx_ctrl[i].ccb->name,
1595                   rx_info->rx_ctrl[i].ccb);
1596         if (err)
1597             goto err_return;
1598     }
1599 
1600     return 0;
1601 
1602 err_return:
1603     if (i > 0)
1604         bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1605     return -1;
1606 }
1607 
1608 /* Free Tx object Resources */
1609 static void
1610 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1611 {
1612     int i;
1613 
1614     for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1615         if (res_info[i].res_type == BNA_RES_T_MEM)
1616             bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1617         else if (res_info[i].res_type == BNA_RES_T_INTR)
1618             bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1619     }
1620 }
1621 
1622 /* Allocates memory and interrupt resources for Tx object */
1623 static int
1624 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1625           u32 tx_id)
1626 {
1627     int i, err = 0;
1628 
1629     for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1630         if (res_info[i].res_type == BNA_RES_T_MEM)
1631             err = bnad_mem_alloc(bnad,
1632                     &res_info[i].res_u.mem_info);
1633         else if (res_info[i].res_type == BNA_RES_T_INTR)
1634             err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1635                     &res_info[i].res_u.intr_info);
1636         if (err)
1637             goto err_return;
1638     }
1639     return 0;
1640 
1641 err_return:
1642     bnad_tx_res_free(bnad, res_info);
1643     return err;
1644 }
1645 
1646 /* Free Rx object Resources */
1647 static void
1648 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1649 {
1650     int i;
1651 
1652     for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1653         if (res_info[i].res_type == BNA_RES_T_MEM)
1654             bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1655         else if (res_info[i].res_type == BNA_RES_T_INTR)
1656             bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1657     }
1658 }
1659 
1660 /* Allocates memory and interrupt resources for Rx object */
1661 static int
1662 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1663           uint rx_id)
1664 {
1665     int i, err = 0;
1666 
1667     /* All memory needs to be allocated before setup_ccbs */
1668     for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1669         if (res_info[i].res_type == BNA_RES_T_MEM)
1670             err = bnad_mem_alloc(bnad,
1671                     &res_info[i].res_u.mem_info);
1672         else if (res_info[i].res_type == BNA_RES_T_INTR)
1673             err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1674                     &res_info[i].res_u.intr_info);
1675         if (err)
1676             goto err_return;
1677     }
1678     return 0;
1679 
1680 err_return:
1681     bnad_rx_res_free(bnad, res_info);
1682     return err;
1683 }
1684 
1685 /* Timer callbacks */
1686 /* a) IOC timer */
1687 static void
1688 bnad_ioc_timeout(struct timer_list *t)
1689 {
1690     struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
1691     unsigned long flags;
1692 
1693     spin_lock_irqsave(&bnad->bna_lock, flags);
1694     bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1695     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1696 }
1697 
1698 static void
1699 bnad_ioc_hb_check(struct timer_list *t)
1700 {
1701     struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
1702     unsigned long flags;
1703 
1704     spin_lock_irqsave(&bnad->bna_lock, flags);
1705     bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1706     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1707 }
1708 
1709 static void
1710 bnad_iocpf_timeout(struct timer_list *t)
1711 {
1712     struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
1713     unsigned long flags;
1714 
1715     spin_lock_irqsave(&bnad->bna_lock, flags);
1716     bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1717     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1718 }
1719 
1720 static void
1721 bnad_iocpf_sem_timeout(struct timer_list *t)
1722 {
1723     struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
1724     unsigned long flags;
1725 
1726     spin_lock_irqsave(&bnad->bna_lock, flags);
1727     bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1728     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1729 }
1730 
1731 /*
1732  * All timer routines use bnad->bna_lock to protect against
1733  * the following race, which may occur in case of no locking:
1734  *  Time    CPU m   CPU n
1735  *  0       1 = test_bit
1736  *  1           clear_bit
1737  *  2           del_timer_sync
1738  *  3   mod_timer
1739  */
1740 
1741 /* b) Dynamic Interrupt Moderation Timer */
1742 static void
1743 bnad_dim_timeout(struct timer_list *t)
1744 {
1745     struct bnad *bnad = from_timer(bnad, t, dim_timer);
1746     struct bnad_rx_info *rx_info;
1747     struct bnad_rx_ctrl *rx_ctrl;
1748     int i, j;
1749     unsigned long flags;
1750 
1751     if (!netif_carrier_ok(bnad->netdev))
1752         return;
1753 
1754     spin_lock_irqsave(&bnad->bna_lock, flags);
1755     for (i = 0; i < bnad->num_rx; i++) {
1756         rx_info = &bnad->rx_info[i];
1757         if (!rx_info->rx)
1758             continue;
1759         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1760             rx_ctrl = &rx_info->rx_ctrl[j];
1761             if (!rx_ctrl->ccb)
1762                 continue;
1763             bna_rx_dim_update(rx_ctrl->ccb);
1764         }
1765     }
1766 
1767     /* Check for BNAD_CF_DIM_ENABLED, does not eliminate a race */
1768     if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1769         mod_timer(&bnad->dim_timer,
1770               jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1771     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1772 }
1773 
1774 /* c)  Statistics Timer */
1775 static void
1776 bnad_stats_timeout(struct timer_list *t)
1777 {
1778     struct bnad *bnad = from_timer(bnad, t, stats_timer);
1779     unsigned long flags;
1780 
1781     if (!netif_running(bnad->netdev) ||
1782         !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1783         return;
1784 
1785     spin_lock_irqsave(&bnad->bna_lock, flags);
1786     bna_hw_stats_get(&bnad->bna);
1787     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1788 }
1789 
1790 /*
1791  * Set up timer for DIM
1792  * Called with bnad->bna_lock held
1793  */
1794 void
1795 bnad_dim_timer_start(struct bnad *bnad)
1796 {
1797     if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1798         !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1799         timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1800         set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1801         mod_timer(&bnad->dim_timer,
1802               jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1803     }
1804 }
1805 
1806 /*
1807  * Set up timer for statistics
1808  * Called with mutex_lock(&bnad->conf_mutex) held
1809  */
1810 static void
1811 bnad_stats_timer_start(struct bnad *bnad)
1812 {
1813     unsigned long flags;
1814 
1815     spin_lock_irqsave(&bnad->bna_lock, flags);
1816     if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1817         timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1818         mod_timer(&bnad->stats_timer,
1819               jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1820     }
1821     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1822 }
1823 
1824 /*
1825  * Stops the stats timer
1826  * Called with mutex_lock(&bnad->conf_mutex) held
1827  */
1828 static void
1829 bnad_stats_timer_stop(struct bnad *bnad)
1830 {
1831     int to_del = 0;
1832     unsigned long flags;
1833 
1834     spin_lock_irqsave(&bnad->bna_lock, flags);
1835     if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1836         to_del = 1;
1837     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1838     if (to_del)
1839         del_timer_sync(&bnad->stats_timer);
1840 }
1841 
1842 /* Utilities */
1843 
1844 static void
1845 bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1846 {
1847     int i = 1; /* Index 0 has broadcast address */
1848     struct netdev_hw_addr *mc_addr;
1849 
1850     netdev_for_each_mc_addr(mc_addr, netdev) {
1851         ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1852         i++;
1853     }
1854 }
1855 
1856 static int
1857 bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1858 {
1859     struct bnad_rx_ctrl *rx_ctrl =
1860         container_of(napi, struct bnad_rx_ctrl, napi);
1861     struct bnad *bnad = rx_ctrl->bnad;
1862     int rcvd = 0;
1863 
1864     rx_ctrl->rx_poll_ctr++;
1865 
1866     if (!netif_carrier_ok(bnad->netdev))
1867         goto poll_exit;
1868 
1869     rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1870     if (rcvd >= budget)
1871         return rcvd;
1872 
1873 poll_exit:
1874     napi_complete_done(napi, rcvd);
1875 
1876     rx_ctrl->rx_complete++;
1877 
1878     if (rx_ctrl->ccb)
1879         bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1880 
1881     return rcvd;
1882 }
1883 
1884 static void
1885 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1886 {
1887     struct bnad_rx_ctrl *rx_ctrl;
1888     int i;
1889 
1890     /* Initialize & enable NAPI */
1891     for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1892         rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1893         netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1894                    bnad_napi_poll_rx, NAPI_POLL_WEIGHT);
1895     }
1896 }
1897 
1898 static void
1899 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1900 {
1901     int i;
1902 
1903     /* First disable and then clean up */
1904     for (i = 0; i < bnad->num_rxp_per_rx; i++)
1905         netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1906 }
1907 
1908 /* Should be held with conf_lock held */
1909 void
1910 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1911 {
1912     struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1913     struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1914     unsigned long flags;
1915 
1916     if (!tx_info->tx)
1917         return;
1918 
1919     init_completion(&bnad->bnad_completions.tx_comp);
1920     spin_lock_irqsave(&bnad->bna_lock, flags);
1921     bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1922     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1923     wait_for_completion(&bnad->bnad_completions.tx_comp);
1924 
1925     if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1926         bnad_tx_msix_unregister(bnad, tx_info,
1927             bnad->num_txq_per_tx);
1928 
1929     spin_lock_irqsave(&bnad->bna_lock, flags);
1930     bna_tx_destroy(tx_info->tx);
1931     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1932 
1933     tx_info->tx = NULL;
1934     tx_info->tx_id = 0;
1935 
1936     bnad_tx_res_free(bnad, res_info);
1937 }
1938 
1939 /* Should be held with conf_lock held */
1940 int
1941 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1942 {
1943     int err;
1944     struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1945     struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1946     struct bna_intr_info *intr_info =
1947             &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1948     struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1949     static const struct bna_tx_event_cbfn tx_cbfn = {
1950         .tcb_setup_cbfn = bnad_cb_tcb_setup,
1951         .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1952         .tx_stall_cbfn = bnad_cb_tx_stall,
1953         .tx_resume_cbfn = bnad_cb_tx_resume,
1954         .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1955     };
1956 
1957     struct bna_tx *tx;
1958     unsigned long flags;
1959 
1960     tx_info->tx_id = tx_id;
1961 
1962     /* Initialize the Tx object configuration */
1963     tx_config->num_txq = bnad->num_txq_per_tx;
1964     tx_config->txq_depth = bnad->txq_depth;
1965     tx_config->tx_type = BNA_TX_T_REGULAR;
1966     tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1967 
1968     /* Get BNA's resource requirement for one tx object */
1969     spin_lock_irqsave(&bnad->bna_lock, flags);
1970     bna_tx_res_req(bnad->num_txq_per_tx,
1971         bnad->txq_depth, res_info);
1972     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1973 
1974     /* Fill Unmap Q memory requirements */
1975     BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1976             bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1977             bnad->txq_depth));
1978 
1979     /* Allocate resources */
1980     err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1981     if (err)
1982         return err;
1983 
1984     /* Ask BNA to create one Tx object, supplying required resources */
1985     spin_lock_irqsave(&bnad->bna_lock, flags);
1986     tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1987             tx_info);
1988     spin_unlock_irqrestore(&bnad->bna_lock, flags);
1989     if (!tx) {
1990         err = -ENOMEM;
1991         goto err_return;
1992     }
1993     tx_info->tx = tx;
1994 
1995     INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1996             (work_func_t)bnad_tx_cleanup);
1997 
1998     /* Register ISR for the Tx object */
1999     if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2000         err = bnad_tx_msix_register(bnad, tx_info,
2001             tx_id, bnad->num_txq_per_tx);
2002         if (err)
2003             goto cleanup_tx;
2004     }
2005 
2006     spin_lock_irqsave(&bnad->bna_lock, flags);
2007     bna_tx_enable(tx);
2008     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2009 
2010     return 0;
2011 
2012 cleanup_tx:
2013     spin_lock_irqsave(&bnad->bna_lock, flags);
2014     bna_tx_destroy(tx_info->tx);
2015     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2016     tx_info->tx = NULL;
2017     tx_info->tx_id = 0;
2018 err_return:
2019     bnad_tx_res_free(bnad, res_info);
2020     return err;
2021 }
2022 
2023 /* Setup the rx config for bna_rx_create */
2024 /* bnad decides the configuration */
2025 static void
2026 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2027 {
2028     memset(rx_config, 0, sizeof(*rx_config));
2029     rx_config->rx_type = BNA_RX_T_REGULAR;
2030     rx_config->num_paths = bnad->num_rxp_per_rx;
2031     rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2032 
2033     if (bnad->num_rxp_per_rx > 1) {
2034         rx_config->rss_status = BNA_STATUS_T_ENABLED;
2035         rx_config->rss_config.hash_type =
2036                 (BFI_ENET_RSS_IPV6 |
2037                  BFI_ENET_RSS_IPV6_TCP |
2038                  BFI_ENET_RSS_IPV4 |
2039                  BFI_ENET_RSS_IPV4_TCP);
2040         rx_config->rss_config.hash_mask =
2041                 bnad->num_rxp_per_rx - 1;
2042         netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2043             sizeof(rx_config->rss_config.toeplitz_hash_key));
2044     } else {
2045         rx_config->rss_status = BNA_STATUS_T_DISABLED;
2046         memset(&rx_config->rss_config, 0,
2047                sizeof(rx_config->rss_config));
2048     }
2049 
2050     rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2051     rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2052 
2053     /* BNA_RXP_SINGLE - one data-buffer queue
2054      * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2055      * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2056      */
2057     /* TODO: configurable param for queue type */
2058     rx_config->rxp_type = BNA_RXP_SLR;
2059 
2060     if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2061         rx_config->frame_size > 4096) {
2062         /* though size_routing_enable is set in SLR,
2063          * small packets may get routed to same rxq.
2064          * set buf_size to 2048 instead of PAGE_SIZE.
2065          */
2066         rx_config->q0_buf_size = 2048;
2067         /* this should be in multiples of 2 */
2068         rx_config->q0_num_vecs = 4;
2069         rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2070         rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2071     } else {
2072         rx_config->q0_buf_size = rx_config->frame_size;
2073         rx_config->q0_num_vecs = 1;
2074         rx_config->q0_depth = bnad->rxq_depth;
2075     }
2076 
2077     /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2078     if (rx_config->rxp_type == BNA_RXP_SLR) {
2079         rx_config->q1_depth = bnad->rxq_depth;
2080         rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2081     }
2082 
2083     rx_config->vlan_strip_status =
2084         (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2085         BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2086 }
2087 
2088 static void
2089 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2090 {
2091     struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2092     int i;
2093 
2094     for (i = 0; i < bnad->num_rxp_per_rx; i++)
2095         rx_info->rx_ctrl[i].bnad = bnad;
2096 }
2097 
2098 /* Called with mutex_lock(&bnad->conf_mutex) held */
2099 static u32
2100 bnad_reinit_rx(struct bnad *bnad)
2101 {
2102     struct net_device *netdev = bnad->netdev;
2103     u32 err = 0, current_err = 0;
2104     u32 rx_id = 0, count = 0;
2105     unsigned long flags;
2106 
2107     /* destroy and create new rx objects */
2108     for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2109         if (!bnad->rx_info[rx_id].rx)
2110             continue;
2111         bnad_destroy_rx(bnad, rx_id);
2112     }
2113 
2114     spin_lock_irqsave(&bnad->bna_lock, flags);
2115     bna_enet_mtu_set(&bnad->bna.enet,
2116              BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2117     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2118 
2119     for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2120         count++;
2121         current_err = bnad_setup_rx(bnad, rx_id);
2122         if (current_err && !err) {
2123             err = current_err;
2124             netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2125         }
2126     }
2127 
2128     /* restore rx configuration */
2129     if (bnad->rx_info[0].rx && !err) {
2130         bnad_restore_vlans(bnad, 0);
2131         bnad_enable_default_bcast(bnad);
2132         spin_lock_irqsave(&bnad->bna_lock, flags);
2133         bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2134         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2135         bnad_set_rx_mode(netdev);
2136     }
2137 
2138     return count;
2139 }
2140 
2141 /* Called with bnad_conf_lock() held */
2142 void
2143 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2144 {
2145     struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2146     struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2147     struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2148     unsigned long flags;
2149     int to_del = 0;
2150 
2151     if (!rx_info->rx)
2152         return;
2153 
2154     if (0 == rx_id) {
2155         spin_lock_irqsave(&bnad->bna_lock, flags);
2156         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2157             test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2158             clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2159             to_del = 1;
2160         }
2161         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2162         if (to_del)
2163             del_timer_sync(&bnad->dim_timer);
2164     }
2165 
2166     init_completion(&bnad->bnad_completions.rx_comp);
2167     spin_lock_irqsave(&bnad->bna_lock, flags);
2168     bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2169     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2170     wait_for_completion(&bnad->bnad_completions.rx_comp);
2171 
2172     if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2173         bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2174 
2175     bnad_napi_delete(bnad, rx_id);
2176 
2177     spin_lock_irqsave(&bnad->bna_lock, flags);
2178     bna_rx_destroy(rx_info->rx);
2179 
2180     rx_info->rx = NULL;
2181     rx_info->rx_id = 0;
2182     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2183 
2184     bnad_rx_res_free(bnad, res_info);
2185 }
2186 
2187 /* Called with mutex_lock(&bnad->conf_mutex) held */
2188 int
2189 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2190 {
2191     int err;
2192     struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2193     struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2194     struct bna_intr_info *intr_info =
2195             &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2196     struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2197     static const struct bna_rx_event_cbfn rx_cbfn = {
2198         .rcb_setup_cbfn = NULL,
2199         .rcb_destroy_cbfn = NULL,
2200         .ccb_setup_cbfn = bnad_cb_ccb_setup,
2201         .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2202         .rx_stall_cbfn = bnad_cb_rx_stall,
2203         .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2204         .rx_post_cbfn = bnad_cb_rx_post,
2205     };
2206     struct bna_rx *rx;
2207     unsigned long flags;
2208 
2209     rx_info->rx_id = rx_id;
2210 
2211     /* Initialize the Rx object configuration */
2212     bnad_init_rx_config(bnad, rx_config);
2213 
2214     /* Get BNA's resource requirement for one Rx object */
2215     spin_lock_irqsave(&bnad->bna_lock, flags);
2216     bna_rx_res_req(rx_config, res_info);
2217     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2218 
2219     /* Fill Unmap Q memory requirements */
2220     BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2221                  rx_config->num_paths,
2222             (rx_config->q0_depth *
2223              sizeof(struct bnad_rx_unmap)) +
2224              sizeof(struct bnad_rx_unmap_q));
2225 
2226     if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2227         BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2228                      rx_config->num_paths,
2229                 (rx_config->q1_depth *
2230                  sizeof(struct bnad_rx_unmap) +
2231                  sizeof(struct bnad_rx_unmap_q)));
2232     }
2233     /* Allocate resource */
2234     err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2235     if (err)
2236         return err;
2237 
2238     bnad_rx_ctrl_init(bnad, rx_id);
2239 
2240     /* Ask BNA to create one Rx object, supplying required resources */
2241     spin_lock_irqsave(&bnad->bna_lock, flags);
2242     rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2243             rx_info);
2244     if (!rx) {
2245         err = -ENOMEM;
2246         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2247         goto err_return;
2248     }
2249     rx_info->rx = rx;
2250     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2251 
2252     INIT_WORK(&rx_info->rx_cleanup_work,
2253             (work_func_t)(bnad_rx_cleanup));
2254 
2255     /*
2256      * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2257      * so that IRQ handler cannot schedule NAPI at this point.
2258      */
2259     bnad_napi_add(bnad, rx_id);
2260 
2261     /* Register ISR for the Rx object */
2262     if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2263         err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2264                         rx_config->num_paths);
2265         if (err)
2266             goto err_return;
2267     }
2268 
2269     spin_lock_irqsave(&bnad->bna_lock, flags);
2270     if (0 == rx_id) {
2271         /* Set up Dynamic Interrupt Moderation Vector */
2272         if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2273             bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2274 
2275         /* Enable VLAN filtering only on the default Rx */
2276         bna_rx_vlanfilter_enable(rx);
2277 
2278         /* Start the DIM timer */
2279         bnad_dim_timer_start(bnad);
2280     }
2281 
2282     bna_rx_enable(rx);
2283     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2284 
2285     return 0;
2286 
2287 err_return:
2288     bnad_destroy_rx(bnad, rx_id);
2289     return err;
2290 }
2291 
2292 /* Called with conf_lock & bnad->bna_lock held */
2293 void
2294 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2295 {
2296     struct bnad_tx_info *tx_info;
2297 
2298     tx_info = &bnad->tx_info[0];
2299     if (!tx_info->tx)
2300         return;
2301 
2302     bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2303 }
2304 
2305 /* Called with conf_lock & bnad->bna_lock held */
2306 void
2307 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2308 {
2309     struct bnad_rx_info *rx_info;
2310     int i;
2311 
2312     for (i = 0; i < bnad->num_rx; i++) {
2313         rx_info = &bnad->rx_info[i];
2314         if (!rx_info->rx)
2315             continue;
2316         bna_rx_coalescing_timeo_set(rx_info->rx,
2317                 bnad->rx_coalescing_timeo);
2318     }
2319 }
2320 
2321 /*
2322  * Called with bnad->bna_lock held
2323  */
2324 int
2325 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2326 {
2327     int ret;
2328 
2329     if (!is_valid_ether_addr(mac_addr))
2330         return -EADDRNOTAVAIL;
2331 
2332     /* If datapath is down, pretend everything went through */
2333     if (!bnad->rx_info[0].rx)
2334         return 0;
2335 
2336     ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2337     if (ret != BNA_CB_SUCCESS)
2338         return -EADDRNOTAVAIL;
2339 
2340     return 0;
2341 }
2342 
2343 /* Should be called with conf_lock held */
2344 int
2345 bnad_enable_default_bcast(struct bnad *bnad)
2346 {
2347     struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2348     int ret;
2349     unsigned long flags;
2350 
2351     init_completion(&bnad->bnad_completions.mcast_comp);
2352 
2353     spin_lock_irqsave(&bnad->bna_lock, flags);
2354     ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2355                    bnad_cb_rx_mcast_add);
2356     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2357 
2358     if (ret == BNA_CB_SUCCESS)
2359         wait_for_completion(&bnad->bnad_completions.mcast_comp);
2360     else
2361         return -ENODEV;
2362 
2363     if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2364         return -ENODEV;
2365 
2366     return 0;
2367 }
2368 
2369 /* Called with mutex_lock(&bnad->conf_mutex) held */
2370 void
2371 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2372 {
2373     u16 vid;
2374     unsigned long flags;
2375 
2376     for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2377         spin_lock_irqsave(&bnad->bna_lock, flags);
2378         bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2379         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2380     }
2381 }
2382 
2383 /* Statistics utilities */
2384 void
2385 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2386 {
2387     int i, j;
2388 
2389     for (i = 0; i < bnad->num_rx; i++) {
2390         for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2391             if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2392                 stats->rx_packets += bnad->rx_info[i].
2393                 rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2394                 stats->rx_bytes += bnad->rx_info[i].
2395                     rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2396                 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2397                     bnad->rx_info[i].rx_ctrl[j].ccb->
2398                     rcb[1]->rxq) {
2399                     stats->rx_packets +=
2400                         bnad->rx_info[i].rx_ctrl[j].
2401                         ccb->rcb[1]->rxq->rx_packets;
2402                     stats->rx_bytes +=
2403                         bnad->rx_info[i].rx_ctrl[j].
2404                         ccb->rcb[1]->rxq->rx_bytes;
2405                 }
2406             }
2407         }
2408     }
2409     for (i = 0; i < bnad->num_tx; i++) {
2410         for (j = 0; j < bnad->num_txq_per_tx; j++) {
2411             if (bnad->tx_info[i].tcb[j]) {
2412                 stats->tx_packets +=
2413                 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2414                 stats->tx_bytes +=
2415                     bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2416             }
2417         }
2418     }
2419 }
2420 
2421 /*
2422  * Must be called with the bna_lock held.
2423  */
2424 void
2425 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2426 {
2427     struct bfi_enet_stats_mac *mac_stats;
2428     u32 bmap;
2429     int i;
2430 
2431     mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2432     stats->rx_errors =
2433         mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2434         mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2435         mac_stats->rx_undersize;
2436     stats->tx_errors = mac_stats->tx_fcs_error +
2437                     mac_stats->tx_undersize;
2438     stats->rx_dropped = mac_stats->rx_drop;
2439     stats->tx_dropped = mac_stats->tx_drop;
2440     stats->multicast = mac_stats->rx_multicast;
2441     stats->collisions = mac_stats->tx_total_collision;
2442 
2443     stats->rx_length_errors = mac_stats->rx_frame_length_error;
2444 
2445     /* receive ring buffer overflow  ?? */
2446 
2447     stats->rx_crc_errors = mac_stats->rx_fcs_error;
2448     stats->rx_frame_errors = mac_stats->rx_alignment_error;
2449     /* recv'r fifo overrun */
2450     bmap = bna_rx_rid_mask(&bnad->bna);
2451     for (i = 0; bmap; i++) {
2452         if (bmap & 1) {
2453             stats->rx_fifo_errors +=
2454                 bnad->stats.bna_stats->
2455                     hw_stats.rxf_stats[i].frame_drops;
2456             break;
2457         }
2458         bmap >>= 1;
2459     }
2460 }
2461 
2462 static void
2463 bnad_mbox_irq_sync(struct bnad *bnad)
2464 {
2465     u32 irq;
2466     unsigned long flags;
2467 
2468     spin_lock_irqsave(&bnad->bna_lock, flags);
2469     if (bnad->cfg_flags & BNAD_CF_MSIX)
2470         irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2471     else
2472         irq = bnad->pcidev->irq;
2473     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2474 
2475     synchronize_irq(irq);
2476 }
2477 
2478 /* Utility used by bnad_start_xmit, for doing TSO */
2479 static int
2480 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2481 {
2482     int err;
2483 
2484     err = skb_cow_head(skb, 0);
2485     if (err < 0) {
2486         BNAD_UPDATE_CTR(bnad, tso_err);
2487         return err;
2488     }
2489 
2490     /*
2491      * For TSO, the TCP checksum field is seeded with pseudo-header sum
2492      * excluding the length field.
2493      */
2494     if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2495         struct iphdr *iph = ip_hdr(skb);
2496 
2497         /* Do we really need these? */
2498         iph->tot_len = 0;
2499         iph->check = 0;
2500 
2501         tcp_hdr(skb)->check =
2502             ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2503                        IPPROTO_TCP, 0);
2504         BNAD_UPDATE_CTR(bnad, tso4);
2505     } else {
2506         tcp_v6_gso_csum_prep(skb);
2507         BNAD_UPDATE_CTR(bnad, tso6);
2508     }
2509 
2510     return 0;
2511 }
2512 
2513 /*
2514  * Initialize Q numbers depending on Rx Paths
2515  * Called with bnad->bna_lock held, because of cfg_flags
2516  * access.
2517  */
2518 static void
2519 bnad_q_num_init(struct bnad *bnad)
2520 {
2521     int rxps;
2522 
2523     rxps = min((uint)num_online_cpus(),
2524             (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2525 
2526     if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2527         rxps = 1;   /* INTx */
2528 
2529     bnad->num_rx = 1;
2530     bnad->num_tx = 1;
2531     bnad->num_rxp_per_rx = rxps;
2532     bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2533 }
2534 
2535 /*
2536  * Adjusts the Q numbers, given a number of msix vectors
2537  * Give preference to RSS as opposed to Tx priority Queues,
2538  * in such a case, just use 1 Tx Q
2539  * Called with bnad->bna_lock held b'cos of cfg_flags access
2540  */
2541 static void
2542 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2543 {
2544     bnad->num_txq_per_tx = 1;
2545     if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2546          bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2547         (bnad->cfg_flags & BNAD_CF_MSIX)) {
2548         bnad->num_rxp_per_rx = msix_vectors -
2549             (bnad->num_tx * bnad->num_txq_per_tx) -
2550             BNAD_MAILBOX_MSIX_VECTORS;
2551     } else
2552         bnad->num_rxp_per_rx = 1;
2553 }
2554 
2555 /* Enable / disable ioceth */
2556 static int
2557 bnad_ioceth_disable(struct bnad *bnad)
2558 {
2559     unsigned long flags;
2560     int err = 0;
2561 
2562     spin_lock_irqsave(&bnad->bna_lock, flags);
2563     init_completion(&bnad->bnad_completions.ioc_comp);
2564     bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2565     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2566 
2567     wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2568         msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2569 
2570     err = bnad->bnad_completions.ioc_comp_status;
2571     return err;
2572 }
2573 
2574 static int
2575 bnad_ioceth_enable(struct bnad *bnad)
2576 {
2577     int err = 0;
2578     unsigned long flags;
2579 
2580     spin_lock_irqsave(&bnad->bna_lock, flags);
2581     init_completion(&bnad->bnad_completions.ioc_comp);
2582     bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2583     bna_ioceth_enable(&bnad->bna.ioceth);
2584     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2585 
2586     wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2587         msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2588 
2589     err = bnad->bnad_completions.ioc_comp_status;
2590 
2591     return err;
2592 }
2593 
2594 /* Free BNA resources */
2595 static void
2596 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2597         u32 res_val_max)
2598 {
2599     int i;
2600 
2601     for (i = 0; i < res_val_max; i++)
2602         bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2603 }
2604 
2605 /* Allocates memory and interrupt resources for BNA */
2606 static int
2607 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2608         u32 res_val_max)
2609 {
2610     int i, err;
2611 
2612     for (i = 0; i < res_val_max; i++) {
2613         err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2614         if (err)
2615             goto err_return;
2616     }
2617     return 0;
2618 
2619 err_return:
2620     bnad_res_free(bnad, res_info, res_val_max);
2621     return err;
2622 }
2623 
2624 /* Interrupt enable / disable */
2625 static void
2626 bnad_enable_msix(struct bnad *bnad)
2627 {
2628     int i, ret;
2629     unsigned long flags;
2630 
2631     spin_lock_irqsave(&bnad->bna_lock, flags);
2632     if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2633         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2634         return;
2635     }
2636     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2637 
2638     if (bnad->msix_table)
2639         return;
2640 
2641     bnad->msix_table =
2642         kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2643 
2644     if (!bnad->msix_table)
2645         goto intx_mode;
2646 
2647     for (i = 0; i < bnad->msix_num; i++)
2648         bnad->msix_table[i].entry = i;
2649 
2650     ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2651                     1, bnad->msix_num);
2652     if (ret < 0) {
2653         goto intx_mode;
2654     } else if (ret < bnad->msix_num) {
2655         dev_warn(&bnad->pcidev->dev,
2656              "%d MSI-X vectors allocated < %d requested\n",
2657              ret, bnad->msix_num);
2658 
2659         spin_lock_irqsave(&bnad->bna_lock, flags);
2660         /* ret = #of vectors that we got */
2661         bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2662             (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2663         spin_unlock_irqrestore(&bnad->bna_lock, flags);
2664 
2665         bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2666              BNAD_MAILBOX_MSIX_VECTORS;
2667 
2668         if (bnad->msix_num > ret) {
2669             pci_disable_msix(bnad->pcidev);
2670             goto intx_mode;
2671         }
2672     }
2673 
2674     pci_intx(bnad->pcidev, 0);
2675 
2676     return;
2677 
2678 intx_mode:
2679     dev_warn(&bnad->pcidev->dev,
2680          "MSI-X enable failed - operating in INTx mode\n");
2681 
2682     kfree(bnad->msix_table);
2683     bnad->msix_table = NULL;
2684     bnad->msix_num = 0;
2685     spin_lock_irqsave(&bnad->bna_lock, flags);
2686     bnad->cfg_flags &= ~BNAD_CF_MSIX;
2687     bnad_q_num_init(bnad);
2688     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2689 }
2690 
2691 static void
2692 bnad_disable_msix(struct bnad *bnad)
2693 {
2694     u32 cfg_flags;
2695     unsigned long flags;
2696 
2697     spin_lock_irqsave(&bnad->bna_lock, flags);
2698     cfg_flags = bnad->cfg_flags;
2699     if (bnad->cfg_flags & BNAD_CF_MSIX)
2700         bnad->cfg_flags &= ~BNAD_CF_MSIX;
2701     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2702 
2703     if (cfg_flags & BNAD_CF_MSIX) {
2704         pci_disable_msix(bnad->pcidev);
2705         kfree(bnad->msix_table);
2706         bnad->msix_table = NULL;
2707     }
2708 }
2709 
2710 /* Netdev entry points */
2711 static int
2712 bnad_open(struct net_device *netdev)
2713 {
2714     int err;
2715     struct bnad *bnad = netdev_priv(netdev);
2716     struct bna_pause_config pause_config;
2717     unsigned long flags;
2718 
2719     mutex_lock(&bnad->conf_mutex);
2720 
2721     /* Tx */
2722     err = bnad_setup_tx(bnad, 0);
2723     if (err)
2724         goto err_return;
2725 
2726     /* Rx */
2727     err = bnad_setup_rx(bnad, 0);
2728     if (err)
2729         goto cleanup_tx;
2730 
2731     /* Port */
2732     pause_config.tx_pause = 0;
2733     pause_config.rx_pause = 0;
2734 
2735     spin_lock_irqsave(&bnad->bna_lock, flags);
2736     bna_enet_mtu_set(&bnad->bna.enet,
2737              BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2738     bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2739     bna_enet_enable(&bnad->bna.enet);
2740     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2741 
2742     /* Enable broadcast */
2743     bnad_enable_default_bcast(bnad);
2744 
2745     /* Restore VLANs, if any */
2746     bnad_restore_vlans(bnad, 0);
2747 
2748     /* Set the UCAST address */
2749     spin_lock_irqsave(&bnad->bna_lock, flags);
2750     bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2751     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2752 
2753     /* Start the stats timer */
2754     bnad_stats_timer_start(bnad);
2755 
2756     mutex_unlock(&bnad->conf_mutex);
2757 
2758     return 0;
2759 
2760 cleanup_tx:
2761     bnad_destroy_tx(bnad, 0);
2762 
2763 err_return:
2764     mutex_unlock(&bnad->conf_mutex);
2765     return err;
2766 }
2767 
2768 static int
2769 bnad_stop(struct net_device *netdev)
2770 {
2771     struct bnad *bnad = netdev_priv(netdev);
2772     unsigned long flags;
2773 
2774     mutex_lock(&bnad->conf_mutex);
2775 
2776     /* Stop the stats timer */
2777     bnad_stats_timer_stop(bnad);
2778 
2779     init_completion(&bnad->bnad_completions.enet_comp);
2780 
2781     spin_lock_irqsave(&bnad->bna_lock, flags);
2782     bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2783             bnad_cb_enet_disabled);
2784     spin_unlock_irqrestore(&bnad->bna_lock, flags);
2785 
2786     wait_for_completion(&bnad->bnad_completions.enet_comp);
2787 
2788     bnad_destroy_tx(bnad, 0);
2789     bnad_destroy_rx(bnad, 0);
2790 
2791     /* Synchronize mailbox IRQ */
2792     bnad_mbox_irq_sync(bnad);
2793 
2794     mutex_unlock(&bnad->conf_mutex);
2795 
2796     return 0;
2797 }
2798 
2799 /* TX */
2800 /* Returns 0 for success */
2801 static int
2802 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2803             struct sk_buff *skb, struct bna_txq_entry *txqent)
2804 {
2805     u16 flags = 0;
2806     u32 gso_size;
2807     u16 vlan_tag = 0;
2808 
2809     if (skb_vlan_tag_present(skb)) {
2810         vlan_tag = (u16)skb_vlan_tag_get(skb);
2811         flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2812     }
2813     if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2814         vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2815                 | (vlan_tag & 0x1fff);
2816         flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2817     }
2818     txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2819 
2820     if (skb_is_gso(skb)) {
2821         gso_size = skb_shinfo(skb)->gso_size;
2822         if (unlikely(gso_size > bnad->netdev->mtu)) {
2823             BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2824             return -EINVAL;
2825         }
2826         if (unlikely((gso_size + skb_tcp_all_headers(skb)) >= skb->len)) {
2827             txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2828             txqent->hdr.wi.lso_mss = 0;
2829             BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2830         } else {
2831             txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2832             txqent->hdr.wi.lso_mss = htons(gso_size);
2833         }
2834 
2835         if (bnad_tso_prepare(bnad, skb)) {
2836             BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2837             return -EINVAL;
2838         }
2839 
2840         flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2841         txqent->hdr.wi.l4_hdr_size_n_offset =
2842             htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2843             tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2844     } else  {
2845         txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2846         txqent->hdr.wi.lso_mss = 0;
2847 
2848         if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2849             BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2850             return -EINVAL;
2851         }
2852 
2853         if (skb->ip_summed == CHECKSUM_PARTIAL) {
2854             __be16 net_proto = vlan_get_protocol(skb);
2855             u8 proto = 0;
2856 
2857             if (net_proto == htons(ETH_P_IP))
2858                 proto = ip_hdr(skb)->protocol;
2859 #ifdef NETIF_F_IPV6_CSUM
2860             else if (net_proto == htons(ETH_P_IPV6)) {
2861                 /* nexthdr may not be TCP immediately. */
2862                 proto = ipv6_hdr(skb)->nexthdr;
2863             }
2864 #endif
2865             if (proto == IPPROTO_TCP) {
2866                 flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2867                 txqent->hdr.wi.l4_hdr_size_n_offset =
2868                     htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2869                           (0, skb_transport_offset(skb)));
2870 
2871                 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2872 
2873                 if (unlikely(skb_headlen(skb) <
2874                         skb_tcp_all_headers(skb))) {
2875                     BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2876                     return -EINVAL;
2877                 }
2878             } else if (proto == IPPROTO_UDP) {
2879                 flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2880                 txqent->hdr.wi.l4_hdr_size_n_offset =
2881                     htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2882                           (0, skb_transport_offset(skb)));
2883 
2884                 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2885                 if (unlikely(skb_headlen(skb) <
2886                         skb_transport_offset(skb) +
2887                     sizeof(struct udphdr))) {
2888                     BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2889                     return -EINVAL;
2890                 }
2891             } else {
2892 
2893                 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2894                 return -EINVAL;
2895             }
2896         } else
2897             txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2898     }
2899 
2900     txqent->hdr.wi.flags = htons(flags);
2901     txqent->hdr.wi.frame_length = htonl(skb->len);
2902 
2903     return 0;
2904 }
2905 
2906 /*
2907  * bnad_start_xmit : Netdev entry point for Transmit
2908  *           Called under lock held by net_device
2909  */
2910 static netdev_tx_t
2911 bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2912 {
2913     struct bnad *bnad = netdev_priv(netdev);
2914     u32 txq_id = 0;
2915     struct bna_tcb *tcb = NULL;
2916     struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2917     u32     prod, q_depth, vect_id;
2918     u32     wis, vectors, len;
2919     int     i;
2920     dma_addr_t      dma_addr;
2921     struct bna_txq_entry *txqent;
2922 
2923     len = skb_headlen(skb);
2924 
2925     /* Sanity checks for the skb */
2926 
2927     if (unlikely(skb->len <= ETH_HLEN)) {
2928         dev_kfree_skb_any(skb);
2929         BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2930         return NETDEV_TX_OK;
2931     }
2932     if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2933         dev_kfree_skb_any(skb);
2934         BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2935         return NETDEV_TX_OK;
2936     }
2937     if (unlikely(len == 0)) {
2938         dev_kfree_skb_any(skb);
2939         BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2940         return NETDEV_TX_OK;
2941     }
2942 
2943     tcb = bnad->tx_info[0].tcb[txq_id];
2944 
2945     /*
2946      * Takes care of the Tx that is scheduled between clearing the flag
2947      * and the netif_tx_stop_all_queues() call.
2948      */
2949     if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2950         dev_kfree_skb_any(skb);
2951         BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2952         return NETDEV_TX_OK;
2953     }
2954 
2955     q_depth = tcb->q_depth;
2956     prod = tcb->producer_index;
2957     unmap_q = tcb->unmap_q;
2958 
2959     vectors = 1 + skb_shinfo(skb)->nr_frags;
2960     wis = BNA_TXQ_WI_NEEDED(vectors);   /* 4 vectors per work item */
2961 
2962     if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2963         dev_kfree_skb_any(skb);
2964         BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2965         return NETDEV_TX_OK;
2966     }
2967 
2968     /* Check for available TxQ resources */
2969     if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2970         if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2971             !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2972             u32 sent;
2973             sent = bnad_txcmpl_process(bnad, tcb);
2974             if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2975                 bna_ib_ack(tcb->i_dbell, sent);
2976             smp_mb__before_atomic();
2977             clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2978         } else {
2979             netif_stop_queue(netdev);
2980             BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2981         }
2982 
2983         smp_mb();
2984         /*
2985          * Check again to deal with race condition between
2986          * netif_stop_queue here, and netif_wake_queue in
2987          * interrupt handler which is not inside netif tx lock.
2988          */
2989         if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2990             BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2991             return NETDEV_TX_BUSY;
2992         } else {
2993             netif_wake_queue(netdev);
2994             BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2995         }
2996     }
2997 
2998     txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
2999     head_unmap = &unmap_q[prod];
3000 
3001     /* Program the opcode, flags, frame_len, num_vectors in WI */
3002     if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3003         dev_kfree_skb_any(skb);
3004         return NETDEV_TX_OK;
3005     }
3006     txqent->hdr.wi.reserved = 0;
3007     txqent->hdr.wi.num_vectors = vectors;
3008 
3009     head_unmap->skb = skb;
3010     head_unmap->nvecs = 0;
3011 
3012     /* Program the vectors */
3013     unmap = head_unmap;
3014     dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3015                   len, DMA_TO_DEVICE);
3016     if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3017         dev_kfree_skb_any(skb);
3018         BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3019         return NETDEV_TX_OK;
3020     }
3021     BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3022     txqent->vector[0].length = htons(len);
3023     dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3024     head_unmap->nvecs++;
3025 
3026     for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3027         const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3028         u32     size = skb_frag_size(frag);
3029 
3030         if (unlikely(size == 0)) {
3031             /* Undo the changes starting at tcb->producer_index */
3032             bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3033                 tcb->producer_index);
3034             dev_kfree_skb_any(skb);
3035             BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3036             return NETDEV_TX_OK;
3037         }
3038 
3039         len += size;
3040 
3041         vect_id++;
3042         if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3043             vect_id = 0;
3044             BNA_QE_INDX_INC(prod, q_depth);
3045             txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3046             txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3047             unmap = &unmap_q[prod];
3048         }
3049 
3050         dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3051                         0, size, DMA_TO_DEVICE);
3052         if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3053             /* Undo the changes starting at tcb->producer_index */
3054             bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3055                        tcb->producer_index);
3056             dev_kfree_skb_any(skb);
3057             BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3058             return NETDEV_TX_OK;
3059         }
3060 
3061         dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3062         BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3063         txqent->vector[vect_id].length = htons(size);
3064         dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3065                    dma_addr);
3066         head_unmap->nvecs++;
3067     }
3068 
3069     if (unlikely(len != skb->len)) {
3070         /* Undo the changes starting at tcb->producer_index */
3071         bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3072         dev_kfree_skb_any(skb);
3073         BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3074         return NETDEV_TX_OK;
3075     }
3076 
3077     BNA_QE_INDX_INC(prod, q_depth);
3078     tcb->producer_index = prod;
3079 
3080     wmb();
3081 
3082     if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3083         return NETDEV_TX_OK;
3084 
3085     skb_tx_timestamp(skb);
3086 
3087     bna_txq_prod_indx_doorbell(tcb);
3088 
3089     return NETDEV_TX_OK;
3090 }
3091 
3092 /*
3093  * Used spin_lock to synchronize reading of stats structures, which
3094  * is written by BNA under the same lock.
3095  */
3096 static void
3097 bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3098 {
3099     struct bnad *bnad = netdev_priv(netdev);
3100     unsigned long flags;
3101 
3102     spin_lock_irqsave(&bnad->bna_lock, flags);
3103 
3104     bnad_netdev_qstats_fill(bnad, stats);
3105     bnad_netdev_hwstats_fill(bnad, stats);
3106 
3107     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3108 }
3109 
3110 static void
3111 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3112 {
3113     struct net_device *netdev = bnad->netdev;
3114     int uc_count = netdev_uc_count(netdev);
3115     enum bna_cb_status ret;
3116     u8 *mac_list;
3117     struct netdev_hw_addr *ha;
3118     int entry;
3119 
3120     if (netdev_uc_empty(bnad->netdev)) {
3121         bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3122         return;
3123     }
3124 
3125     if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3126         goto mode_default;
3127 
3128     mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC);
3129     if (mac_list == NULL)
3130         goto mode_default;
3131 
3132     entry = 0;
3133     netdev_for_each_uc_addr(ha, netdev) {
3134         ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3135         entry++;
3136     }
3137 
3138     ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3139     kfree(mac_list);
3140 
3141     if (ret != BNA_CB_SUCCESS)
3142         goto mode_default;
3143 
3144     return;
3145 
3146     /* ucast packets not in UCAM are routed to default function */
3147 mode_default:
3148     bnad->cfg_flags |= BNAD_CF_DEFAULT;
3149     bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3150 }
3151 
3152 static void
3153 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3154 {
3155     struct net_device *netdev = bnad->netdev;
3156     int mc_count = netdev_mc_count(netdev);
3157     enum bna_cb_status ret;
3158     u8 *mac_list;
3159 
3160     if (netdev->flags & IFF_ALLMULTI)
3161         goto mode_allmulti;
3162 
3163     if (netdev_mc_empty(netdev))
3164         return;
3165 
3166     if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3167         goto mode_allmulti;
3168 
3169     mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC);
3170 
3171     if (mac_list == NULL)
3172         goto mode_allmulti;
3173 
3174     ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3175 
3176     /* copy rest of the MCAST addresses */
3177     bnad_netdev_mc_list_get(netdev, mac_list);
3178     ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3179     kfree(mac_list);
3180 
3181     if (ret != BNA_CB_SUCCESS)
3182         goto mode_allmulti;
3183 
3184     return;
3185 
3186 mode_allmulti:
3187     bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3188     bna_rx_mcast_delall(bnad->rx_info[0].rx);
3189 }
3190 
3191 void
3192 bnad_set_rx_mode(struct net_device *netdev)
3193 {
3194     struct bnad *bnad = netdev_priv(netdev);
3195     enum bna_rxmode new_mode, mode_mask;
3196     unsigned long flags;
3197 
3198     spin_lock_irqsave(&bnad->bna_lock, flags);
3199 
3200     if (bnad->rx_info[0].rx == NULL) {
3201         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3202         return;
3203     }
3204 
3205     /* clear bnad flags to update it with new settings */
3206     bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3207             BNAD_CF_ALLMULTI);
3208 
3209     new_mode = 0;
3210     if (netdev->flags & IFF_PROMISC) {
3211         new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3212         bnad->cfg_flags |= BNAD_CF_PROMISC;
3213     } else {
3214         bnad_set_rx_mcast_fltr(bnad);
3215 
3216         if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3217             new_mode |= BNA_RXMODE_ALLMULTI;
3218 
3219         bnad_set_rx_ucast_fltr(bnad);
3220 
3221         if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3222             new_mode |= BNA_RXMODE_DEFAULT;
3223     }
3224 
3225     mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3226             BNA_RXMODE_ALLMULTI;
3227     bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3228 
3229     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3230 }
3231 
3232 /*
3233  * bna_lock is used to sync writes to netdev->addr
3234  * conf_lock cannot be used since this call may be made
3235  * in a non-blocking context.
3236  */
3237 static int
3238 bnad_set_mac_address(struct net_device *netdev, void *addr)
3239 {
3240     int err;
3241     struct bnad *bnad = netdev_priv(netdev);
3242     struct sockaddr *sa = (struct sockaddr *)addr;
3243     unsigned long flags;
3244 
3245     spin_lock_irqsave(&bnad->bna_lock, flags);
3246 
3247     err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3248     if (!err)
3249         eth_hw_addr_set(netdev, sa->sa_data);
3250 
3251     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3252 
3253     return err;
3254 }
3255 
3256 static int
3257 bnad_mtu_set(struct bnad *bnad, int frame_size)
3258 {
3259     unsigned long flags;
3260 
3261     init_completion(&bnad->bnad_completions.mtu_comp);
3262 
3263     spin_lock_irqsave(&bnad->bna_lock, flags);
3264     bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3265     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3266 
3267     wait_for_completion(&bnad->bnad_completions.mtu_comp);
3268 
3269     return bnad->bnad_completions.mtu_comp_status;
3270 }
3271 
3272 static int
3273 bnad_change_mtu(struct net_device *netdev, int new_mtu)
3274 {
3275     int err, mtu;
3276     struct bnad *bnad = netdev_priv(netdev);
3277     u32 frame, new_frame;
3278 
3279     mutex_lock(&bnad->conf_mutex);
3280 
3281     mtu = netdev->mtu;
3282     netdev->mtu = new_mtu;
3283 
3284     frame = BNAD_FRAME_SIZE(mtu);
3285     new_frame = BNAD_FRAME_SIZE(new_mtu);
3286 
3287     /* check if multi-buffer needs to be enabled */
3288     if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3289         netif_running(bnad->netdev)) {
3290         /* only when transition is over 4K */
3291         if ((frame <= 4096 && new_frame > 4096) ||
3292             (frame > 4096 && new_frame <= 4096))
3293             bnad_reinit_rx(bnad);
3294     }
3295 
3296     err = bnad_mtu_set(bnad, new_frame);
3297     if (err)
3298         err = -EBUSY;
3299 
3300     mutex_unlock(&bnad->conf_mutex);
3301     return err;
3302 }
3303 
3304 static int
3305 bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3306 {
3307     struct bnad *bnad = netdev_priv(netdev);
3308     unsigned long flags;
3309 
3310     if (!bnad->rx_info[0].rx)
3311         return 0;
3312 
3313     mutex_lock(&bnad->conf_mutex);
3314 
3315     spin_lock_irqsave(&bnad->bna_lock, flags);
3316     bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3317     set_bit(vid, bnad->active_vlans);
3318     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3319 
3320     mutex_unlock(&bnad->conf_mutex);
3321 
3322     return 0;
3323 }
3324 
3325 static int
3326 bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3327 {
3328     struct bnad *bnad = netdev_priv(netdev);
3329     unsigned long flags;
3330 
3331     if (!bnad->rx_info[0].rx)
3332         return 0;
3333 
3334     mutex_lock(&bnad->conf_mutex);
3335 
3336     spin_lock_irqsave(&bnad->bna_lock, flags);
3337     clear_bit(vid, bnad->active_vlans);
3338     bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3339     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3340 
3341     mutex_unlock(&bnad->conf_mutex);
3342 
3343     return 0;
3344 }
3345 
3346 static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3347 {
3348     struct bnad *bnad = netdev_priv(dev);
3349     netdev_features_t changed = features ^ dev->features;
3350 
3351     if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3352         unsigned long flags;
3353 
3354         spin_lock_irqsave(&bnad->bna_lock, flags);
3355 
3356         if (features & NETIF_F_HW_VLAN_CTAG_RX)
3357             bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3358         else
3359             bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3360 
3361         spin_unlock_irqrestore(&bnad->bna_lock, flags);
3362     }
3363 
3364     return 0;
3365 }
3366 
3367 #ifdef CONFIG_NET_POLL_CONTROLLER
3368 static void
3369 bnad_netpoll(struct net_device *netdev)
3370 {
3371     struct bnad *bnad = netdev_priv(netdev);
3372     struct bnad_rx_info *rx_info;
3373     struct bnad_rx_ctrl *rx_ctrl;
3374     u32 curr_mask;
3375     int i, j;
3376 
3377     if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3378         bna_intx_disable(&bnad->bna, curr_mask);
3379         bnad_isr(bnad->pcidev->irq, netdev);
3380         bna_intx_enable(&bnad->bna, curr_mask);
3381     } else {
3382         /*
3383          * Tx processing may happen in sending context, so no need
3384          * to explicitly process completions here
3385          */
3386 
3387         /* Rx processing */
3388         for (i = 0; i < bnad->num_rx; i++) {
3389             rx_info = &bnad->rx_info[i];
3390             if (!rx_info->rx)
3391                 continue;
3392             for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3393                 rx_ctrl = &rx_info->rx_ctrl[j];
3394                 if (rx_ctrl->ccb)
3395                     bnad_netif_rx_schedule_poll(bnad,
3396                                 rx_ctrl->ccb);
3397             }
3398         }
3399     }
3400 }
3401 #endif
3402 
3403 static const struct net_device_ops bnad_netdev_ops = {
3404     .ndo_open       = bnad_open,
3405     .ndo_stop       = bnad_stop,
3406     .ndo_start_xmit     = bnad_start_xmit,
3407     .ndo_get_stats64    = bnad_get_stats64,
3408     .ndo_set_rx_mode    = bnad_set_rx_mode,
3409     .ndo_validate_addr      = eth_validate_addr,
3410     .ndo_set_mac_address    = bnad_set_mac_address,
3411     .ndo_change_mtu     = bnad_change_mtu,
3412     .ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3413     .ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3414     .ndo_set_features   = bnad_set_features,
3415 #ifdef CONFIG_NET_POLL_CONTROLLER
3416     .ndo_poll_controller    = bnad_netpoll
3417 #endif
3418 };
3419 
3420 static void
3421 bnad_netdev_init(struct bnad *bnad)
3422 {
3423     struct net_device *netdev = bnad->netdev;
3424 
3425     netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3426         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3427         NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3428         NETIF_F_HW_VLAN_CTAG_RX;
3429 
3430     netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3431         NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3432         NETIF_F_TSO | NETIF_F_TSO6;
3433 
3434     netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER |
3435                 NETIF_F_HIGHDMA;
3436 
3437     netdev->mem_start = bnad->mmio_start;
3438     netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3439 
3440     /* MTU range: 46 - 9000 */
3441     netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
3442     netdev->max_mtu = BNAD_JUMBO_MTU;
3443 
3444     netdev->netdev_ops = &bnad_netdev_ops;
3445     bnad_set_ethtool_ops(netdev);
3446 }
3447 
3448 /*
3449  * 1. Initialize the bnad structure
3450  * 2. Setup netdev pointer in pci_dev
3451  * 3. Initialize no. of TxQ & CQs & MSIX vectors
3452  * 4. Initialize work queue.
3453  */
3454 static int
3455 bnad_init(struct bnad *bnad,
3456       struct pci_dev *pdev, struct net_device *netdev)
3457 {
3458     unsigned long flags;
3459 
3460     SET_NETDEV_DEV(netdev, &pdev->dev);
3461     pci_set_drvdata(pdev, netdev);
3462 
3463     bnad->netdev = netdev;
3464     bnad->pcidev = pdev;
3465     bnad->mmio_start = pci_resource_start(pdev, 0);
3466     bnad->mmio_len = pci_resource_len(pdev, 0);
3467     bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
3468     if (!bnad->bar0) {
3469         dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3470         return -ENOMEM;
3471     }
3472     dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3473          (unsigned long long) bnad->mmio_len);
3474 
3475     spin_lock_irqsave(&bnad->bna_lock, flags);
3476     if (!bnad_msix_disable)
3477         bnad->cfg_flags = BNAD_CF_MSIX;
3478 
3479     bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3480 
3481     bnad_q_num_init(bnad);
3482     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3483 
3484     bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3485         (bnad->num_rx * bnad->num_rxp_per_rx) +
3486              BNAD_MAILBOX_MSIX_VECTORS;
3487 
3488     bnad->txq_depth = BNAD_TXQ_DEPTH;
3489     bnad->rxq_depth = BNAD_RXQ_DEPTH;
3490 
3491     bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3492     bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3493 
3494     sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3495     bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3496     if (!bnad->work_q) {
3497         iounmap(bnad->bar0);
3498         return -ENOMEM;
3499     }
3500 
3501     return 0;
3502 }
3503 
3504 /*
3505  * Must be called after bnad_pci_uninit()
3506  * so that iounmap() and pci_set_drvdata(NULL)
3507  * happens only after PCI uninitialization.
3508  */
3509 static void
3510 bnad_uninit(struct bnad *bnad)
3511 {
3512     if (bnad->work_q) {
3513         destroy_workqueue(bnad->work_q);
3514         bnad->work_q = NULL;
3515     }
3516 
3517     if (bnad->bar0)
3518         iounmap(bnad->bar0);
3519 }
3520 
3521 /*
3522  * Initialize locks
3523     a) Per ioceth mutes used for serializing configuration
3524        changes from OS interface
3525     b) spin lock used to protect bna state machine
3526  */
3527 static void
3528 bnad_lock_init(struct bnad *bnad)
3529 {
3530     spin_lock_init(&bnad->bna_lock);
3531     mutex_init(&bnad->conf_mutex);
3532 }
3533 
3534 static void
3535 bnad_lock_uninit(struct bnad *bnad)
3536 {
3537     mutex_destroy(&bnad->conf_mutex);
3538 }
3539 
3540 /* PCI Initialization */
3541 static int
3542 bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev)
3543 {
3544     int err;
3545 
3546     err = pci_enable_device(pdev);
3547     if (err)
3548         return err;
3549     err = pci_request_regions(pdev, BNAD_NAME);
3550     if (err)
3551         goto disable_device;
3552     err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3553     if (err)
3554         goto release_regions;
3555     pci_set_master(pdev);
3556     return 0;
3557 
3558 release_regions:
3559     pci_release_regions(pdev);
3560 disable_device:
3561     pci_disable_device(pdev);
3562 
3563     return err;
3564 }
3565 
3566 static void
3567 bnad_pci_uninit(struct pci_dev *pdev)
3568 {
3569     pci_release_regions(pdev);
3570     pci_disable_device(pdev);
3571 }
3572 
3573 static int
3574 bnad_pci_probe(struct pci_dev *pdev,
3575         const struct pci_device_id *pcidev_id)
3576 {
3577     int err;
3578     struct bnad *bnad;
3579     struct bna *bna;
3580     struct net_device *netdev;
3581     struct bfa_pcidev pcidev_info;
3582     unsigned long flags;
3583 
3584     mutex_lock(&bnad_fwimg_mutex);
3585     if (!cna_get_firmware_buf(pdev)) {
3586         mutex_unlock(&bnad_fwimg_mutex);
3587         dev_err(&pdev->dev, "failed to load firmware image!\n");
3588         return -ENODEV;
3589     }
3590     mutex_unlock(&bnad_fwimg_mutex);
3591 
3592     /*
3593      * Allocates sizeof(struct net_device + struct bnad)
3594      * bnad = netdev->priv
3595      */
3596     netdev = alloc_etherdev(sizeof(struct bnad));
3597     if (!netdev) {
3598         err = -ENOMEM;
3599         return err;
3600     }
3601     bnad = netdev_priv(netdev);
3602     bnad_lock_init(bnad);
3603     bnad->id = atomic_inc_return(&bna_id) - 1;
3604 
3605     mutex_lock(&bnad->conf_mutex);
3606     /* PCI initialization */
3607     err = bnad_pci_init(bnad, pdev);
3608     if (err)
3609         goto unlock_mutex;
3610 
3611     /*
3612      * Initialize bnad structure
3613      * Setup relation between pci_dev & netdev
3614      */
3615     err = bnad_init(bnad, pdev, netdev);
3616     if (err)
3617         goto pci_uninit;
3618 
3619     /* Initialize netdev structure, set up ethtool ops */
3620     bnad_netdev_init(bnad);
3621 
3622     /* Set link to down state */
3623     netif_carrier_off(netdev);
3624 
3625     /* Setup the debugfs node for this bfad */
3626     if (bna_debugfs_enable)
3627         bnad_debugfs_init(bnad);
3628 
3629     /* Get resource requirement form bna */
3630     spin_lock_irqsave(&bnad->bna_lock, flags);
3631     bna_res_req(&bnad->res_info[0]);
3632     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3633 
3634     /* Allocate resources from bna */
3635     err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3636     if (err)
3637         goto drv_uninit;
3638 
3639     bna = &bnad->bna;
3640 
3641     /* Setup pcidev_info for bna_init() */
3642     pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3643     pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3644     pcidev_info.device_id = bnad->pcidev->device;
3645     pcidev_info.pci_bar_kva = bnad->bar0;
3646 
3647     spin_lock_irqsave(&bnad->bna_lock, flags);
3648     bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3649     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3650 
3651     bnad->stats.bna_stats = &bna->stats;
3652 
3653     bnad_enable_msix(bnad);
3654     err = bnad_mbox_irq_alloc(bnad);
3655     if (err)
3656         goto res_free;
3657 
3658     /* Set up timers */
3659     timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
3660     timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
3661     timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
3662     timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3663             0);
3664 
3665     /*
3666      * Start the chip
3667      * If the call back comes with error, we bail out.
3668      * This is a catastrophic error.
3669      */
3670     err = bnad_ioceth_enable(bnad);
3671     if (err) {
3672         dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3673         goto probe_success;
3674     }
3675 
3676     spin_lock_irqsave(&bnad->bna_lock, flags);
3677     if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3678         bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3679         bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3680             bna_attr(bna)->num_rxp - 1);
3681         if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3682             bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3683             err = -EIO;
3684     }
3685     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3686     if (err)
3687         goto disable_ioceth;
3688 
3689     spin_lock_irqsave(&bnad->bna_lock, flags);
3690     bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3691     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3692 
3693     err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3694     if (err) {
3695         err = -EIO;
3696         goto disable_ioceth;
3697     }
3698 
3699     spin_lock_irqsave(&bnad->bna_lock, flags);
3700     bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3701     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3702 
3703     /* Get the burnt-in mac */
3704     spin_lock_irqsave(&bnad->bna_lock, flags);
3705     bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3706     bnad_set_netdev_perm_addr(bnad);
3707     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3708 
3709     mutex_unlock(&bnad->conf_mutex);
3710 
3711     /* Finally, reguister with net_device layer */
3712     err = register_netdev(netdev);
3713     if (err) {
3714         dev_err(&pdev->dev, "registering net device failed\n");
3715         goto probe_uninit;
3716     }
3717     set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3718 
3719     return 0;
3720 
3721 probe_success:
3722     mutex_unlock(&bnad->conf_mutex);
3723     return 0;
3724 
3725 probe_uninit:
3726     mutex_lock(&bnad->conf_mutex);
3727     bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3728 disable_ioceth:
3729     bnad_ioceth_disable(bnad);
3730     del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3731     del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3732     del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3733     spin_lock_irqsave(&bnad->bna_lock, flags);
3734     bna_uninit(bna);
3735     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3736     bnad_mbox_irq_free(bnad);
3737     bnad_disable_msix(bnad);
3738 res_free:
3739     bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3740 drv_uninit:
3741     /* Remove the debugfs node for this bnad */
3742     kfree(bnad->regdata);
3743     bnad_debugfs_uninit(bnad);
3744     bnad_uninit(bnad);
3745 pci_uninit:
3746     bnad_pci_uninit(pdev);
3747 unlock_mutex:
3748     mutex_unlock(&bnad->conf_mutex);
3749     bnad_lock_uninit(bnad);
3750     free_netdev(netdev);
3751     return err;
3752 }
3753 
3754 static void
3755 bnad_pci_remove(struct pci_dev *pdev)
3756 {
3757     struct net_device *netdev = pci_get_drvdata(pdev);
3758     struct bnad *bnad;
3759     struct bna *bna;
3760     unsigned long flags;
3761 
3762     if (!netdev)
3763         return;
3764 
3765     bnad = netdev_priv(netdev);
3766     bna = &bnad->bna;
3767 
3768     if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3769         unregister_netdev(netdev);
3770 
3771     mutex_lock(&bnad->conf_mutex);
3772     bnad_ioceth_disable(bnad);
3773     del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3774     del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3775     del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3776     spin_lock_irqsave(&bnad->bna_lock, flags);
3777     bna_uninit(bna);
3778     spin_unlock_irqrestore(&bnad->bna_lock, flags);
3779 
3780     bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3781     bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3782     bnad_mbox_irq_free(bnad);
3783     bnad_disable_msix(bnad);
3784     bnad_pci_uninit(pdev);
3785     mutex_unlock(&bnad->conf_mutex);
3786     bnad_lock_uninit(bnad);
3787     /* Remove the debugfs node for this bnad */
3788     kfree(bnad->regdata);
3789     bnad_debugfs_uninit(bnad);
3790     bnad_uninit(bnad);
3791     free_netdev(netdev);
3792 }
3793 
3794 static const struct pci_device_id bnad_pci_id_table[] = {
3795     {
3796         PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3797             PCI_DEVICE_ID_BROCADE_CT),
3798         .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3799         .class_mask =  0xffff00
3800     },
3801     {
3802         PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3803             BFA_PCI_DEVICE_ID_CT2),
3804         .class = PCI_CLASS_NETWORK_ETHERNET << 8,
3805         .class_mask =  0xffff00
3806     },
3807     {0,  },
3808 };
3809 
3810 MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3811 
3812 static struct pci_driver bnad_pci_driver = {
3813     .name = BNAD_NAME,
3814     .id_table = bnad_pci_id_table,
3815     .probe = bnad_pci_probe,
3816     .remove = bnad_pci_remove,
3817 };
3818 
3819 static int __init
3820 bnad_module_init(void)
3821 {
3822     int err;
3823 
3824     bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3825 
3826     err = pci_register_driver(&bnad_pci_driver);
3827     if (err < 0) {
3828         pr_err("bna: PCI driver registration failed err=%d\n", err);
3829         return err;
3830     }
3831 
3832     return 0;
3833 }
3834 
3835 static void __exit
3836 bnad_module_exit(void)
3837 {
3838     pci_unregister_driver(&bnad_pci_driver);
3839     release_firmware(bfi_fw);
3840 }
3841 
3842 module_init(bnad_module_init);
3843 module_exit(bnad_module_exit);
3844 
3845 MODULE_AUTHOR("Brocade");
3846 MODULE_LICENSE("GPL");
3847 MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3848 MODULE_FIRMWARE(CNA_FW_FILE_CT);
3849 MODULE_FIRMWARE(CNA_FW_FILE_CT2);