Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 /*
0003  * Linux network driver for QLogic BR-series Converged Network Adapter.
0004   */
0005 /*
0006  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
0007  * Copyright (c) 2014-2015 QLogic Corporation
0008  * All rights reserved
0009  * www.qlogic.com
0010  */
0011 #include "bna.h"
0012 #include "bfi.h"
0013 
0014 /* IB */
0015 static void
0016 bna_ib_coalescing_timeo_set(struct bna_ib *ib, u8 coalescing_timeo)
0017 {
0018     ib->coalescing_timeo = coalescing_timeo;
0019     ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
0020                 (u32)ib->coalescing_timeo, 0);
0021 }
0022 
0023 /* RXF */
0024 
0025 #define bna_rxf_vlan_cfg_soft_reset(rxf)                \
0026 do {                                    \
0027     (rxf)->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;       \
0028     (rxf)->vlan_strip_pending = true;               \
0029 } while (0)
0030 
0031 #define bna_rxf_rss_cfg_soft_reset(rxf)                 \
0032 do {                                    \
0033     if ((rxf)->rss_status == BNA_STATUS_T_ENABLED)          \
0034         (rxf)->rss_pending = (BNA_RSS_F_RIT_PENDING |       \
0035                 BNA_RSS_F_CFG_PENDING |         \
0036                 BNA_RSS_F_STATUS_PENDING);      \
0037 } while (0)
0038 
0039 static int bna_rxf_cfg_apply(struct bna_rxf *rxf);
0040 static void bna_rxf_cfg_reset(struct bna_rxf *rxf);
0041 static int bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf);
0042 static int bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf);
0043 static int bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf);
0044 static int bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf);
0045 static int bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf,
0046                     enum bna_cleanup_type cleanup);
0047 static int bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf,
0048                     enum bna_cleanup_type cleanup);
0049 static int bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf,
0050                     enum bna_cleanup_type cleanup);
0051 
0052 bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
0053             enum bna_rxf_event);
0054 bfa_fsm_state_decl(bna_rxf, cfg_wait, struct bna_rxf,
0055             enum bna_rxf_event);
0056 bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
0057             enum bna_rxf_event);
0058 bfa_fsm_state_decl(bna_rxf, last_resp_wait, struct bna_rxf,
0059             enum bna_rxf_event);
0060 
0061 static void
0062 bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
0063 {
0064     call_rxf_stop_cbfn(rxf);
0065 }
0066 
0067 static void
0068 bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
0069 {
0070     switch (event) {
0071     case RXF_E_START:
0072         bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
0073         break;
0074 
0075     case RXF_E_STOP:
0076         call_rxf_stop_cbfn(rxf);
0077         break;
0078 
0079     case RXF_E_FAIL:
0080         /* No-op */
0081         break;
0082 
0083     case RXF_E_CONFIG:
0084         call_rxf_cam_fltr_cbfn(rxf);
0085         break;
0086 
0087     default:
0088         bfa_sm_fault(event);
0089     }
0090 }
0091 
0092 static void
0093 bna_rxf_sm_cfg_wait_entry(struct bna_rxf *rxf)
0094 {
0095     if (!bna_rxf_cfg_apply(rxf)) {
0096         /* No more pending config updates */
0097         bfa_fsm_set_state(rxf, bna_rxf_sm_started);
0098     }
0099 }
0100 
0101 static void
0102 bna_rxf_sm_cfg_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
0103 {
0104     switch (event) {
0105     case RXF_E_STOP:
0106         bfa_fsm_set_state(rxf, bna_rxf_sm_last_resp_wait);
0107         break;
0108 
0109     case RXF_E_FAIL:
0110         bna_rxf_cfg_reset(rxf);
0111         call_rxf_start_cbfn(rxf);
0112         call_rxf_cam_fltr_cbfn(rxf);
0113         bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
0114         break;
0115 
0116     case RXF_E_CONFIG:
0117         /* No-op */
0118         break;
0119 
0120     case RXF_E_FW_RESP:
0121         if (!bna_rxf_cfg_apply(rxf)) {
0122             /* No more pending config updates */
0123             bfa_fsm_set_state(rxf, bna_rxf_sm_started);
0124         }
0125         break;
0126 
0127     default:
0128         bfa_sm_fault(event);
0129     }
0130 }
0131 
0132 static void
0133 bna_rxf_sm_started_entry(struct bna_rxf *rxf)
0134 {
0135     call_rxf_start_cbfn(rxf);
0136     call_rxf_cam_fltr_cbfn(rxf);
0137 }
0138 
0139 static void
0140 bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
0141 {
0142     switch (event) {
0143     case RXF_E_STOP:
0144     case RXF_E_FAIL:
0145         bna_rxf_cfg_reset(rxf);
0146         bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
0147         break;
0148 
0149     case RXF_E_CONFIG:
0150         bfa_fsm_set_state(rxf, bna_rxf_sm_cfg_wait);
0151         break;
0152 
0153     default:
0154         bfa_sm_fault(event);
0155     }
0156 }
0157 
0158 static void
0159 bna_rxf_sm_last_resp_wait_entry(struct bna_rxf *rxf)
0160 {
0161 }
0162 
0163 static void
0164 bna_rxf_sm_last_resp_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
0165 {
0166     switch (event) {
0167     case RXF_E_FAIL:
0168     case RXF_E_FW_RESP:
0169         bna_rxf_cfg_reset(rxf);
0170         bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
0171         break;
0172 
0173     default:
0174         bfa_sm_fault(event);
0175     }
0176 }
0177 
0178 static void
0179 bna_bfi_ucast_req(struct bna_rxf *rxf, struct bna_mac *mac,
0180         enum bfi_enet_h2i_msgs req_type)
0181 {
0182     struct bfi_enet_ucast_req *req = &rxf->bfi_enet_cmd.ucast_req;
0183 
0184     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, req_type, 0, rxf->rx->rid);
0185     req->mh.num_entries = htons(
0186     bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_ucast_req)));
0187     ether_addr_copy(req->mac_addr, mac->addr);
0188     bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
0189         sizeof(struct bfi_enet_ucast_req), &req->mh);
0190     bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
0191 }
0192 
0193 static void
0194 bna_bfi_mcast_add_req(struct bna_rxf *rxf, struct bna_mac *mac)
0195 {
0196     struct bfi_enet_mcast_add_req *req =
0197         &rxf->bfi_enet_cmd.mcast_add_req;
0198 
0199     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_ADD_REQ,
0200         0, rxf->rx->rid);
0201     req->mh.num_entries = htons(
0202     bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_add_req)));
0203     ether_addr_copy(req->mac_addr, mac->addr);
0204     bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
0205         sizeof(struct bfi_enet_mcast_add_req), &req->mh);
0206     bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
0207 }
0208 
0209 static void
0210 bna_bfi_mcast_del_req(struct bna_rxf *rxf, u16 handle)
0211 {
0212     struct bfi_enet_mcast_del_req *req =
0213         &rxf->bfi_enet_cmd.mcast_del_req;
0214 
0215     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET, BFI_ENET_H2I_MAC_MCAST_DEL_REQ,
0216         0, rxf->rx->rid);
0217     req->mh.num_entries = htons(
0218     bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_mcast_del_req)));
0219     req->handle = htons(handle);
0220     bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
0221         sizeof(struct bfi_enet_mcast_del_req), &req->mh);
0222     bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
0223 }
0224 
0225 static void
0226 bna_bfi_mcast_filter_req(struct bna_rxf *rxf, enum bna_status status)
0227 {
0228     struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
0229 
0230     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
0231         BFI_ENET_H2I_MAC_MCAST_FILTER_REQ, 0, rxf->rx->rid);
0232     req->mh.num_entries = htons(
0233         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
0234     req->enable = status;
0235     bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
0236         sizeof(struct bfi_enet_enable_req), &req->mh);
0237     bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
0238 }
0239 
0240 static void
0241 bna_bfi_rx_promisc_req(struct bna_rxf *rxf, enum bna_status status)
0242 {
0243     struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
0244 
0245     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
0246         BFI_ENET_H2I_RX_PROMISCUOUS_REQ, 0, rxf->rx->rid);
0247     req->mh.num_entries = htons(
0248         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
0249     req->enable = status;
0250     bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
0251         sizeof(struct bfi_enet_enable_req), &req->mh);
0252     bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
0253 }
0254 
0255 static void
0256 bna_bfi_rx_vlan_filter_set(struct bna_rxf *rxf, u8 block_idx)
0257 {
0258     struct bfi_enet_rx_vlan_req *req = &rxf->bfi_enet_cmd.vlan_req;
0259     int i;
0260     int j;
0261 
0262     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
0263         BFI_ENET_H2I_RX_VLAN_SET_REQ, 0, rxf->rx->rid);
0264     req->mh.num_entries = htons(
0265         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_vlan_req)));
0266     req->block_idx = block_idx;
0267     for (i = 0; i < (BFI_ENET_VLAN_BLOCK_SIZE / 32); i++) {
0268         j = (block_idx * (BFI_ENET_VLAN_BLOCK_SIZE / 32)) + i;
0269         if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED)
0270             req->bit_mask[i] =
0271                 htonl(rxf->vlan_filter_table[j]);
0272         else
0273             req->bit_mask[i] = 0xFFFFFFFF;
0274     }
0275     bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
0276         sizeof(struct bfi_enet_rx_vlan_req), &req->mh);
0277     bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
0278 }
0279 
0280 static void
0281 bna_bfi_vlan_strip_enable(struct bna_rxf *rxf)
0282 {
0283     struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
0284 
0285     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
0286         BFI_ENET_H2I_RX_VLAN_STRIP_ENABLE_REQ, 0, rxf->rx->rid);
0287     req->mh.num_entries = htons(
0288         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
0289     req->enable = rxf->vlan_strip_status;
0290     bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
0291         sizeof(struct bfi_enet_enable_req), &req->mh);
0292     bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
0293 }
0294 
0295 static void
0296 bna_bfi_rit_cfg(struct bna_rxf *rxf)
0297 {
0298     struct bfi_enet_rit_req *req = &rxf->bfi_enet_cmd.rit_req;
0299 
0300     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
0301         BFI_ENET_H2I_RIT_CFG_REQ, 0, rxf->rx->rid);
0302     req->mh.num_entries = htons(
0303         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rit_req)));
0304     req->size = htons(rxf->rit_size);
0305     memcpy(&req->table[0], rxf->rit, rxf->rit_size);
0306     bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
0307         sizeof(struct bfi_enet_rit_req), &req->mh);
0308     bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
0309 }
0310 
0311 static void
0312 bna_bfi_rss_cfg(struct bna_rxf *rxf)
0313 {
0314     struct bfi_enet_rss_cfg_req *req = &rxf->bfi_enet_cmd.rss_req;
0315     int i;
0316 
0317     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
0318         BFI_ENET_H2I_RSS_CFG_REQ, 0, rxf->rx->rid);
0319     req->mh.num_entries = htons(
0320         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rss_cfg_req)));
0321     req->cfg.type = rxf->rss_cfg.hash_type;
0322     req->cfg.mask = rxf->rss_cfg.hash_mask;
0323     for (i = 0; i < BFI_ENET_RSS_KEY_LEN; i++)
0324         req->cfg.key[i] =
0325             htonl(rxf->rss_cfg.toeplitz_hash_key[i]);
0326     bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
0327         sizeof(struct bfi_enet_rss_cfg_req), &req->mh);
0328     bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
0329 }
0330 
0331 static void
0332 bna_bfi_rss_enable(struct bna_rxf *rxf)
0333 {
0334     struct bfi_enet_enable_req *req = &rxf->bfi_enet_cmd.req;
0335 
0336     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
0337         BFI_ENET_H2I_RSS_ENABLE_REQ, 0, rxf->rx->rid);
0338     req->mh.num_entries = htons(
0339         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
0340     req->enable = rxf->rss_status;
0341     bfa_msgq_cmd_set(&rxf->msgq_cmd, NULL, NULL,
0342         sizeof(struct bfi_enet_enable_req), &req->mh);
0343     bfa_msgq_cmd_post(&rxf->rx->bna->msgq, &rxf->msgq_cmd);
0344 }
0345 
0346 /* This function gets the multicast MAC that has already been added to CAM */
0347 static struct bna_mac *
0348 bna_rxf_mcmac_get(struct bna_rxf *rxf, const u8 *mac_addr)
0349 {
0350     struct bna_mac *mac;
0351 
0352     list_for_each_entry(mac, &rxf->mcast_active_q, qe)
0353         if (ether_addr_equal(mac->addr, mac_addr))
0354             return mac;
0355 
0356     list_for_each_entry(mac, &rxf->mcast_pending_del_q, qe)
0357         if (ether_addr_equal(mac->addr, mac_addr))
0358             return mac;
0359 
0360     return NULL;
0361 }
0362 
0363 static struct bna_mcam_handle *
0364 bna_rxf_mchandle_get(struct bna_rxf *rxf, int handle)
0365 {
0366     struct bna_mcam_handle *mchandle;
0367 
0368     list_for_each_entry(mchandle, &rxf->mcast_handle_q, qe)
0369         if (mchandle->handle == handle)
0370             return mchandle;
0371 
0372     return NULL;
0373 }
0374 
0375 static void
0376 bna_rxf_mchandle_attach(struct bna_rxf *rxf, u8 *mac_addr, int handle)
0377 {
0378     struct bna_mac *mcmac;
0379     struct bna_mcam_handle *mchandle;
0380 
0381     mcmac = bna_rxf_mcmac_get(rxf, mac_addr);
0382     mchandle = bna_rxf_mchandle_get(rxf, handle);
0383     if (mchandle == NULL) {
0384         mchandle = bna_mcam_mod_handle_get(&rxf->rx->bna->mcam_mod);
0385         mchandle->handle = handle;
0386         mchandle->refcnt = 0;
0387         list_add_tail(&mchandle->qe, &rxf->mcast_handle_q);
0388     }
0389     mchandle->refcnt++;
0390     mcmac->handle = mchandle;
0391 }
0392 
0393 static int
0394 bna_rxf_mcast_del(struct bna_rxf *rxf, struct bna_mac *mac,
0395         enum bna_cleanup_type cleanup)
0396 {
0397     struct bna_mcam_handle *mchandle;
0398     int ret = 0;
0399 
0400     mchandle = mac->handle;
0401     if (mchandle == NULL)
0402         return ret;
0403 
0404     mchandle->refcnt--;
0405     if (mchandle->refcnt == 0) {
0406         if (cleanup == BNA_HARD_CLEANUP) {
0407             bna_bfi_mcast_del_req(rxf, mchandle->handle);
0408             ret = 1;
0409         }
0410         list_del(&mchandle->qe);
0411         bna_mcam_mod_handle_put(&rxf->rx->bna->mcam_mod, mchandle);
0412     }
0413     mac->handle = NULL;
0414 
0415     return ret;
0416 }
0417 
0418 static int
0419 bna_rxf_mcast_cfg_apply(struct bna_rxf *rxf)
0420 {
0421     struct bna_mac *mac = NULL;
0422     int ret;
0423 
0424     /* First delete multicast entries to maintain the count */
0425     while (!list_empty(&rxf->mcast_pending_del_q)) {
0426         mac = list_first_entry(&rxf->mcast_pending_del_q,
0427                        struct bna_mac, qe);
0428         ret = bna_rxf_mcast_del(rxf, mac, BNA_HARD_CLEANUP);
0429         list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
0430         if (ret)
0431             return ret;
0432     }
0433 
0434     /* Add multicast entries */
0435     if (!list_empty(&rxf->mcast_pending_add_q)) {
0436         mac = list_first_entry(&rxf->mcast_pending_add_q,
0437                        struct bna_mac, qe);
0438         list_move_tail(&mac->qe, &rxf->mcast_active_q);
0439         bna_bfi_mcast_add_req(rxf, mac);
0440         return 1;
0441     }
0442 
0443     return 0;
0444 }
0445 
0446 static int
0447 bna_rxf_vlan_cfg_apply(struct bna_rxf *rxf)
0448 {
0449     u8 vlan_pending_bitmask;
0450     int block_idx = 0;
0451 
0452     if (rxf->vlan_pending_bitmask) {
0453         vlan_pending_bitmask = rxf->vlan_pending_bitmask;
0454         while (!(vlan_pending_bitmask & 0x1)) {
0455             block_idx++;
0456             vlan_pending_bitmask >>= 1;
0457         }
0458         rxf->vlan_pending_bitmask &= ~BIT(block_idx);
0459         bna_bfi_rx_vlan_filter_set(rxf, block_idx);
0460         return 1;
0461     }
0462 
0463     return 0;
0464 }
0465 
0466 static int
0467 bna_rxf_mcast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
0468 {
0469     struct bna_mac *mac;
0470     int ret;
0471 
0472     /* Throw away delete pending mcast entries */
0473     while (!list_empty(&rxf->mcast_pending_del_q)) {
0474         mac = list_first_entry(&rxf->mcast_pending_del_q,
0475                        struct bna_mac, qe);
0476         ret = bna_rxf_mcast_del(rxf, mac, cleanup);
0477         list_move_tail(&mac->qe, bna_mcam_mod_del_q(rxf->rx->bna));
0478         if (ret)
0479             return ret;
0480     }
0481 
0482     /* Move active mcast entries to pending_add_q */
0483     while (!list_empty(&rxf->mcast_active_q)) {
0484         mac = list_first_entry(&rxf->mcast_active_q,
0485                        struct bna_mac, qe);
0486         list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
0487         if (bna_rxf_mcast_del(rxf, mac, cleanup))
0488             return 1;
0489     }
0490 
0491     return 0;
0492 }
0493 
0494 static int
0495 bna_rxf_rss_cfg_apply(struct bna_rxf *rxf)
0496 {
0497     if (rxf->rss_pending) {
0498         if (rxf->rss_pending & BNA_RSS_F_RIT_PENDING) {
0499             rxf->rss_pending &= ~BNA_RSS_F_RIT_PENDING;
0500             bna_bfi_rit_cfg(rxf);
0501             return 1;
0502         }
0503 
0504         if (rxf->rss_pending & BNA_RSS_F_CFG_PENDING) {
0505             rxf->rss_pending &= ~BNA_RSS_F_CFG_PENDING;
0506             bna_bfi_rss_cfg(rxf);
0507             return 1;
0508         }
0509 
0510         if (rxf->rss_pending & BNA_RSS_F_STATUS_PENDING) {
0511             rxf->rss_pending &= ~BNA_RSS_F_STATUS_PENDING;
0512             bna_bfi_rss_enable(rxf);
0513             return 1;
0514         }
0515     }
0516 
0517     return 0;
0518 }
0519 
0520 static int
0521 bna_rxf_cfg_apply(struct bna_rxf *rxf)
0522 {
0523     if (bna_rxf_ucast_cfg_apply(rxf))
0524         return 1;
0525 
0526     if (bna_rxf_mcast_cfg_apply(rxf))
0527         return 1;
0528 
0529     if (bna_rxf_promisc_cfg_apply(rxf))
0530         return 1;
0531 
0532     if (bna_rxf_allmulti_cfg_apply(rxf))
0533         return 1;
0534 
0535     if (bna_rxf_vlan_cfg_apply(rxf))
0536         return 1;
0537 
0538     if (bna_rxf_vlan_strip_cfg_apply(rxf))
0539         return 1;
0540 
0541     if (bna_rxf_rss_cfg_apply(rxf))
0542         return 1;
0543 
0544     return 0;
0545 }
0546 
0547 static void
0548 bna_rxf_cfg_reset(struct bna_rxf *rxf)
0549 {
0550     bna_rxf_ucast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
0551     bna_rxf_mcast_cfg_reset(rxf, BNA_SOFT_CLEANUP);
0552     bna_rxf_promisc_cfg_reset(rxf, BNA_SOFT_CLEANUP);
0553     bna_rxf_allmulti_cfg_reset(rxf, BNA_SOFT_CLEANUP);
0554     bna_rxf_vlan_cfg_soft_reset(rxf);
0555     bna_rxf_rss_cfg_soft_reset(rxf);
0556 }
0557 
0558 static void
0559 bna_rit_init(struct bna_rxf *rxf, int rit_size)
0560 {
0561     struct bna_rx *rx = rxf->rx;
0562     struct bna_rxp *rxp;
0563     int offset = 0;
0564 
0565     rxf->rit_size = rit_size;
0566     list_for_each_entry(rxp, &rx->rxp_q, qe) {
0567         rxf->rit[offset] = rxp->cq.ccb->id;
0568         offset++;
0569     }
0570 }
0571 
0572 void
0573 bna_bfi_rxf_cfg_rsp(struct bna_rxf *rxf, struct bfi_msgq_mhdr *msghdr)
0574 {
0575     bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
0576 }
0577 
0578 void
0579 bna_bfi_rxf_ucast_set_rsp(struct bna_rxf *rxf,
0580             struct bfi_msgq_mhdr *msghdr)
0581 {
0582     struct bfi_enet_rsp *rsp =
0583         container_of(msghdr, struct bfi_enet_rsp, mh);
0584 
0585     if (rsp->error) {
0586         /* Clear ucast from cache */
0587         rxf->ucast_active_set = 0;
0588     }
0589 
0590     bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
0591 }
0592 
0593 void
0594 bna_bfi_rxf_mcast_add_rsp(struct bna_rxf *rxf,
0595             struct bfi_msgq_mhdr *msghdr)
0596 {
0597     struct bfi_enet_mcast_add_req *req =
0598         &rxf->bfi_enet_cmd.mcast_add_req;
0599     struct bfi_enet_mcast_add_rsp *rsp =
0600         container_of(msghdr, struct bfi_enet_mcast_add_rsp, mh);
0601 
0602     bna_rxf_mchandle_attach(rxf, (u8 *)&req->mac_addr,
0603         ntohs(rsp->handle));
0604     bfa_fsm_send_event(rxf, RXF_E_FW_RESP);
0605 }
0606 
0607 static void
0608 bna_rxf_init(struct bna_rxf *rxf,
0609         struct bna_rx *rx,
0610         struct bna_rx_config *q_config,
0611         struct bna_res_info *res_info)
0612 {
0613     rxf->rx = rx;
0614 
0615     INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
0616     INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
0617     rxf->ucast_pending_set = 0;
0618     rxf->ucast_active_set = 0;
0619     INIT_LIST_HEAD(&rxf->ucast_active_q);
0620     rxf->ucast_pending_mac = NULL;
0621 
0622     INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
0623     INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
0624     INIT_LIST_HEAD(&rxf->mcast_active_q);
0625     INIT_LIST_HEAD(&rxf->mcast_handle_q);
0626 
0627     rxf->rit = (u8 *)
0628         res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info.mdl[0].kva;
0629     bna_rit_init(rxf, q_config->num_paths);
0630 
0631     rxf->rss_status = q_config->rss_status;
0632     if (rxf->rss_status == BNA_STATUS_T_ENABLED) {
0633         rxf->rss_cfg = q_config->rss_config;
0634         rxf->rss_pending |= BNA_RSS_F_CFG_PENDING;
0635         rxf->rss_pending |= BNA_RSS_F_RIT_PENDING;
0636         rxf->rss_pending |= BNA_RSS_F_STATUS_PENDING;
0637     }
0638 
0639     rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
0640     memset(rxf->vlan_filter_table, 0,
0641             (sizeof(u32) * (BFI_ENET_VLAN_ID_MAX / 32)));
0642     rxf->vlan_filter_table[0] |= 1; /* for pure priority tagged frames */
0643     rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
0644 
0645     rxf->vlan_strip_status = q_config->vlan_strip_status;
0646 
0647     bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
0648 }
0649 
0650 static void
0651 bna_rxf_uninit(struct bna_rxf *rxf)
0652 {
0653     struct bna_mac *mac;
0654 
0655     rxf->ucast_pending_set = 0;
0656     rxf->ucast_active_set = 0;
0657 
0658     while (!list_empty(&rxf->ucast_pending_add_q)) {
0659         mac = list_first_entry(&rxf->ucast_pending_add_q,
0660                        struct bna_mac, qe);
0661         list_move_tail(&mac->qe, bna_ucam_mod_free_q(rxf->rx->bna));
0662     }
0663 
0664     if (rxf->ucast_pending_mac) {
0665         list_add_tail(&rxf->ucast_pending_mac->qe,
0666                   bna_ucam_mod_free_q(rxf->rx->bna));
0667         rxf->ucast_pending_mac = NULL;
0668     }
0669 
0670     while (!list_empty(&rxf->mcast_pending_add_q)) {
0671         mac = list_first_entry(&rxf->mcast_pending_add_q,
0672                        struct bna_mac, qe);
0673         list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
0674     }
0675 
0676     rxf->rxmode_pending = 0;
0677     rxf->rxmode_pending_bitmask = 0;
0678     if (rxf->rx->bna->promisc_rid == rxf->rx->rid)
0679         rxf->rx->bna->promisc_rid = BFI_INVALID_RID;
0680     if (rxf->rx->bna->default_mode_rid == rxf->rx->rid)
0681         rxf->rx->bna->default_mode_rid = BFI_INVALID_RID;
0682 
0683     rxf->rss_pending = 0;
0684     rxf->vlan_strip_pending = false;
0685 
0686     rxf->rx = NULL;
0687 }
0688 
0689 static void
0690 bna_rx_cb_rxf_started(struct bna_rx *rx)
0691 {
0692     bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
0693 }
0694 
0695 static void
0696 bna_rxf_start(struct bna_rxf *rxf)
0697 {
0698     rxf->start_cbfn = bna_rx_cb_rxf_started;
0699     rxf->start_cbarg = rxf->rx;
0700     bfa_fsm_send_event(rxf, RXF_E_START);
0701 }
0702 
0703 static void
0704 bna_rx_cb_rxf_stopped(struct bna_rx *rx)
0705 {
0706     bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
0707 }
0708 
0709 static void
0710 bna_rxf_stop(struct bna_rxf *rxf)
0711 {
0712     rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
0713     rxf->stop_cbarg = rxf->rx;
0714     bfa_fsm_send_event(rxf, RXF_E_STOP);
0715 }
0716 
0717 static void
0718 bna_rxf_fail(struct bna_rxf *rxf)
0719 {
0720     bfa_fsm_send_event(rxf, RXF_E_FAIL);
0721 }
0722 
0723 enum bna_cb_status
0724 bna_rx_ucast_set(struct bna_rx *rx, const u8 *ucmac)
0725 {
0726     struct bna_rxf *rxf = &rx->rxf;
0727 
0728     if (rxf->ucast_pending_mac == NULL) {
0729         rxf->ucast_pending_mac =
0730             bna_cam_mod_mac_get(bna_ucam_mod_free_q(rxf->rx->bna));
0731         if (rxf->ucast_pending_mac == NULL)
0732             return BNA_CB_UCAST_CAM_FULL;
0733     }
0734 
0735     ether_addr_copy(rxf->ucast_pending_mac->addr, ucmac);
0736     rxf->ucast_pending_set = 1;
0737     rxf->cam_fltr_cbfn = NULL;
0738     rxf->cam_fltr_cbarg = rx->bna->bnad;
0739 
0740     bfa_fsm_send_event(rxf, RXF_E_CONFIG);
0741 
0742     return BNA_CB_SUCCESS;
0743 }
0744 
0745 enum bna_cb_status
0746 bna_rx_mcast_add(struct bna_rx *rx, const u8 *addr,
0747          void (*cbfn)(struct bnad *, struct bna_rx *))
0748 {
0749     struct bna_rxf *rxf = &rx->rxf;
0750     struct bna_mac *mac;
0751 
0752     /* Check if already added or pending addition */
0753     if (bna_mac_find(&rxf->mcast_active_q, addr) ||
0754         bna_mac_find(&rxf->mcast_pending_add_q, addr)) {
0755         if (cbfn)
0756             cbfn(rx->bna->bnad, rx);
0757         return BNA_CB_SUCCESS;
0758     }
0759 
0760     mac = bna_cam_mod_mac_get(bna_mcam_mod_free_q(rxf->rx->bna));
0761     if (mac == NULL)
0762         return BNA_CB_MCAST_LIST_FULL;
0763     ether_addr_copy(mac->addr, addr);
0764     list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
0765 
0766     rxf->cam_fltr_cbfn = cbfn;
0767     rxf->cam_fltr_cbarg = rx->bna->bnad;
0768 
0769     bfa_fsm_send_event(rxf, RXF_E_CONFIG);
0770 
0771     return BNA_CB_SUCCESS;
0772 }
0773 
0774 enum bna_cb_status
0775 bna_rx_ucast_listset(struct bna_rx *rx, int count, const u8 *uclist)
0776 {
0777     struct bna_ucam_mod *ucam_mod = &rx->bna->ucam_mod;
0778     struct bna_rxf *rxf = &rx->rxf;
0779     struct list_head list_head;
0780     const u8 *mcaddr;
0781     struct bna_mac *mac, *del_mac;
0782     int i;
0783 
0784     /* Purge the pending_add_q */
0785     while (!list_empty(&rxf->ucast_pending_add_q)) {
0786         mac = list_first_entry(&rxf->ucast_pending_add_q,
0787                        struct bna_mac, qe);
0788         list_move_tail(&mac->qe, &ucam_mod->free_q);
0789     }
0790 
0791     /* Schedule active_q entries for deletion */
0792     while (!list_empty(&rxf->ucast_active_q)) {
0793         mac = list_first_entry(&rxf->ucast_active_q,
0794                        struct bna_mac, qe);
0795         del_mac = bna_cam_mod_mac_get(&ucam_mod->del_q);
0796         ether_addr_copy(del_mac->addr, mac->addr);
0797         del_mac->handle = mac->handle;
0798         list_add_tail(&del_mac->qe, &rxf->ucast_pending_del_q);
0799         list_move_tail(&mac->qe, &ucam_mod->free_q);
0800     }
0801 
0802     /* Allocate nodes */
0803     INIT_LIST_HEAD(&list_head);
0804     for (i = 0, mcaddr = uclist; i < count; i++) {
0805         mac = bna_cam_mod_mac_get(&ucam_mod->free_q);
0806         if (mac == NULL)
0807             goto err_return;
0808         ether_addr_copy(mac->addr, mcaddr);
0809         list_add_tail(&mac->qe, &list_head);
0810         mcaddr += ETH_ALEN;
0811     }
0812 
0813     /* Add the new entries */
0814     while (!list_empty(&list_head)) {
0815         mac = list_first_entry(&list_head, struct bna_mac, qe);
0816         list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
0817     }
0818 
0819     bfa_fsm_send_event(rxf, RXF_E_CONFIG);
0820 
0821     return BNA_CB_SUCCESS;
0822 
0823 err_return:
0824     while (!list_empty(&list_head)) {
0825         mac = list_first_entry(&list_head, struct bna_mac, qe);
0826         list_move_tail(&mac->qe, &ucam_mod->free_q);
0827     }
0828 
0829     return BNA_CB_UCAST_CAM_FULL;
0830 }
0831 
0832 enum bna_cb_status
0833 bna_rx_mcast_listset(struct bna_rx *rx, int count, const u8 *mclist)
0834 {
0835     struct bna_mcam_mod *mcam_mod = &rx->bna->mcam_mod;
0836     struct bna_rxf *rxf = &rx->rxf;
0837     struct list_head list_head;
0838     const u8 *mcaddr;
0839     struct bna_mac *mac, *del_mac;
0840     int i;
0841 
0842     /* Purge the pending_add_q */
0843     while (!list_empty(&rxf->mcast_pending_add_q)) {
0844         mac = list_first_entry(&rxf->mcast_pending_add_q,
0845                        struct bna_mac, qe);
0846         list_move_tail(&mac->qe, &mcam_mod->free_q);
0847     }
0848 
0849     /* Schedule active_q entries for deletion */
0850     while (!list_empty(&rxf->mcast_active_q)) {
0851         mac = list_first_entry(&rxf->mcast_active_q,
0852                        struct bna_mac, qe);
0853         del_mac = bna_cam_mod_mac_get(&mcam_mod->del_q);
0854         ether_addr_copy(del_mac->addr, mac->addr);
0855         del_mac->handle = mac->handle;
0856         list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
0857         mac->handle = NULL;
0858         list_move_tail(&mac->qe, &mcam_mod->free_q);
0859     }
0860 
0861     /* Allocate nodes */
0862     INIT_LIST_HEAD(&list_head);
0863     for (i = 0, mcaddr = mclist; i < count; i++) {
0864         mac = bna_cam_mod_mac_get(&mcam_mod->free_q);
0865         if (mac == NULL)
0866             goto err_return;
0867         ether_addr_copy(mac->addr, mcaddr);
0868         list_add_tail(&mac->qe, &list_head);
0869 
0870         mcaddr += ETH_ALEN;
0871     }
0872 
0873     /* Add the new entries */
0874     while (!list_empty(&list_head)) {
0875         mac = list_first_entry(&list_head, struct bna_mac, qe);
0876         list_move_tail(&mac->qe, &rxf->mcast_pending_add_q);
0877     }
0878 
0879     bfa_fsm_send_event(rxf, RXF_E_CONFIG);
0880 
0881     return BNA_CB_SUCCESS;
0882 
0883 err_return:
0884     while (!list_empty(&list_head)) {
0885         mac = list_first_entry(&list_head, struct bna_mac, qe);
0886         list_move_tail(&mac->qe, &mcam_mod->free_q);
0887     }
0888 
0889     return BNA_CB_MCAST_LIST_FULL;
0890 }
0891 
0892 void
0893 bna_rx_mcast_delall(struct bna_rx *rx)
0894 {
0895     struct bna_rxf *rxf = &rx->rxf;
0896     struct bna_mac *mac, *del_mac;
0897     int need_hw_config = 0;
0898 
0899     /* Purge all entries from pending_add_q */
0900     while (!list_empty(&rxf->mcast_pending_add_q)) {
0901         mac = list_first_entry(&rxf->mcast_pending_add_q,
0902                        struct bna_mac, qe);
0903         list_move_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
0904     }
0905 
0906     /* Schedule all entries in active_q for deletion */
0907     while (!list_empty(&rxf->mcast_active_q)) {
0908         mac = list_first_entry(&rxf->mcast_active_q,
0909                        struct bna_mac, qe);
0910         list_del(&mac->qe);
0911         del_mac = bna_cam_mod_mac_get(bna_mcam_mod_del_q(rxf->rx->bna));
0912         memcpy(del_mac, mac, sizeof(*del_mac));
0913         list_add_tail(&del_mac->qe, &rxf->mcast_pending_del_q);
0914         mac->handle = NULL;
0915         list_add_tail(&mac->qe, bna_mcam_mod_free_q(rxf->rx->bna));
0916         need_hw_config = 1;
0917     }
0918 
0919     if (need_hw_config)
0920         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
0921 }
0922 
0923 void
0924 bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
0925 {
0926     struct bna_rxf *rxf = &rx->rxf;
0927     int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
0928     int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK);
0929     int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
0930 
0931     rxf->vlan_filter_table[index] |= bit;
0932     if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
0933         rxf->vlan_pending_bitmask |= BIT(group_id);
0934         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
0935     }
0936 }
0937 
0938 void
0939 bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
0940 {
0941     struct bna_rxf *rxf = &rx->rxf;
0942     int index = (vlan_id >> BFI_VLAN_WORD_SHIFT);
0943     int bit = BIT(vlan_id & BFI_VLAN_WORD_MASK);
0944     int group_id = (vlan_id >> BFI_VLAN_BLOCK_SHIFT);
0945 
0946     rxf->vlan_filter_table[index] &= ~bit;
0947     if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
0948         rxf->vlan_pending_bitmask |= BIT(group_id);
0949         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
0950     }
0951 }
0952 
0953 static int
0954 bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
0955 {
0956     struct bna_mac *mac = NULL;
0957 
0958     /* Delete MAC addresses previousely added */
0959     if (!list_empty(&rxf->ucast_pending_del_q)) {
0960         mac = list_first_entry(&rxf->ucast_pending_del_q,
0961                        struct bna_mac, qe);
0962         bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
0963         list_move_tail(&mac->qe, bna_ucam_mod_del_q(rxf->rx->bna));
0964         return 1;
0965     }
0966 
0967     /* Set default unicast MAC */
0968     if (rxf->ucast_pending_set) {
0969         rxf->ucast_pending_set = 0;
0970         ether_addr_copy(rxf->ucast_active_mac.addr,
0971                 rxf->ucast_pending_mac->addr);
0972         rxf->ucast_active_set = 1;
0973         bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
0974             BFI_ENET_H2I_MAC_UCAST_SET_REQ);
0975         return 1;
0976     }
0977 
0978     /* Add additional MAC entries */
0979     if (!list_empty(&rxf->ucast_pending_add_q)) {
0980         mac = list_first_entry(&rxf->ucast_pending_add_q,
0981                        struct bna_mac, qe);
0982         list_move_tail(&mac->qe, &rxf->ucast_active_q);
0983         bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
0984         return 1;
0985     }
0986 
0987     return 0;
0988 }
0989 
0990 static int
0991 bna_rxf_ucast_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
0992 {
0993     struct bna_mac *mac;
0994 
0995     /* Throw away delete pending ucast entries */
0996     while (!list_empty(&rxf->ucast_pending_del_q)) {
0997         mac = list_first_entry(&rxf->ucast_pending_del_q,
0998                        struct bna_mac, qe);
0999         if (cleanup == BNA_SOFT_CLEANUP)
1000             list_move_tail(&mac->qe,
1001                        bna_ucam_mod_del_q(rxf->rx->bna));
1002         else {
1003             bna_bfi_ucast_req(rxf, mac,
1004                       BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1005             list_move_tail(&mac->qe,
1006                        bna_ucam_mod_del_q(rxf->rx->bna));
1007             return 1;
1008         }
1009     }
1010 
1011     /* Move active ucast entries to pending_add_q */
1012     while (!list_empty(&rxf->ucast_active_q)) {
1013         mac = list_first_entry(&rxf->ucast_active_q,
1014                        struct bna_mac, qe);
1015         list_move_tail(&mac->qe, &rxf->ucast_pending_add_q);
1016         if (cleanup == BNA_HARD_CLEANUP) {
1017             bna_bfi_ucast_req(rxf, mac,
1018                 BFI_ENET_H2I_MAC_UCAST_DEL_REQ);
1019             return 1;
1020         }
1021     }
1022 
1023     if (rxf->ucast_active_set) {
1024         rxf->ucast_pending_set = 1;
1025         rxf->ucast_active_set = 0;
1026         if (cleanup == BNA_HARD_CLEANUP) {
1027             bna_bfi_ucast_req(rxf, &rxf->ucast_active_mac,
1028                 BFI_ENET_H2I_MAC_UCAST_CLR_REQ);
1029             return 1;
1030         }
1031     }
1032 
1033     return 0;
1034 }
1035 
1036 static int
1037 bna_rxf_promisc_cfg_apply(struct bna_rxf *rxf)
1038 {
1039     struct bna *bna = rxf->rx->bna;
1040 
1041     /* Enable/disable promiscuous mode */
1042     if (is_promisc_enable(rxf->rxmode_pending,
1043                 rxf->rxmode_pending_bitmask)) {
1044         /* move promisc configuration from pending -> active */
1045         promisc_inactive(rxf->rxmode_pending,
1046                 rxf->rxmode_pending_bitmask);
1047         rxf->rxmode_active |= BNA_RXMODE_PROMISC;
1048         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_ENABLED);
1049         return 1;
1050     } else if (is_promisc_disable(rxf->rxmode_pending,
1051                 rxf->rxmode_pending_bitmask)) {
1052         /* move promisc configuration from pending -> active */
1053         promisc_inactive(rxf->rxmode_pending,
1054                 rxf->rxmode_pending_bitmask);
1055         rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1056         bna->promisc_rid = BFI_INVALID_RID;
1057         bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1058         return 1;
1059     }
1060 
1061     return 0;
1062 }
1063 
1064 static int
1065 bna_rxf_promisc_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1066 {
1067     struct bna *bna = rxf->rx->bna;
1068 
1069     /* Clear pending promisc mode disable */
1070     if (is_promisc_disable(rxf->rxmode_pending,
1071                 rxf->rxmode_pending_bitmask)) {
1072         promisc_inactive(rxf->rxmode_pending,
1073                 rxf->rxmode_pending_bitmask);
1074         rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1075         bna->promisc_rid = BFI_INVALID_RID;
1076         if (cleanup == BNA_HARD_CLEANUP) {
1077             bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1078             return 1;
1079         }
1080     }
1081 
1082     /* Move promisc mode config from active -> pending */
1083     if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1084         promisc_enable(rxf->rxmode_pending,
1085                 rxf->rxmode_pending_bitmask);
1086         rxf->rxmode_active &= ~BNA_RXMODE_PROMISC;
1087         if (cleanup == BNA_HARD_CLEANUP) {
1088             bna_bfi_rx_promisc_req(rxf, BNA_STATUS_T_DISABLED);
1089             return 1;
1090         }
1091     }
1092 
1093     return 0;
1094 }
1095 
1096 static int
1097 bna_rxf_allmulti_cfg_apply(struct bna_rxf *rxf)
1098 {
1099     /* Enable/disable allmulti mode */
1100     if (is_allmulti_enable(rxf->rxmode_pending,
1101                 rxf->rxmode_pending_bitmask)) {
1102         /* move allmulti configuration from pending -> active */
1103         allmulti_inactive(rxf->rxmode_pending,
1104                 rxf->rxmode_pending_bitmask);
1105         rxf->rxmode_active |= BNA_RXMODE_ALLMULTI;
1106         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_DISABLED);
1107         return 1;
1108     } else if (is_allmulti_disable(rxf->rxmode_pending,
1109                     rxf->rxmode_pending_bitmask)) {
1110         /* move allmulti configuration from pending -> active */
1111         allmulti_inactive(rxf->rxmode_pending,
1112                 rxf->rxmode_pending_bitmask);
1113         rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1114         bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1115         return 1;
1116     }
1117 
1118     return 0;
1119 }
1120 
1121 static int
1122 bna_rxf_allmulti_cfg_reset(struct bna_rxf *rxf, enum bna_cleanup_type cleanup)
1123 {
1124     /* Clear pending allmulti mode disable */
1125     if (is_allmulti_disable(rxf->rxmode_pending,
1126                 rxf->rxmode_pending_bitmask)) {
1127         allmulti_inactive(rxf->rxmode_pending,
1128                 rxf->rxmode_pending_bitmask);
1129         rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1130         if (cleanup == BNA_HARD_CLEANUP) {
1131             bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1132             return 1;
1133         }
1134     }
1135 
1136     /* Move allmulti mode config from active -> pending */
1137     if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1138         allmulti_enable(rxf->rxmode_pending,
1139                 rxf->rxmode_pending_bitmask);
1140         rxf->rxmode_active &= ~BNA_RXMODE_ALLMULTI;
1141         if (cleanup == BNA_HARD_CLEANUP) {
1142             bna_bfi_mcast_filter_req(rxf, BNA_STATUS_T_ENABLED);
1143             return 1;
1144         }
1145     }
1146 
1147     return 0;
1148 }
1149 
1150 static int
1151 bna_rxf_promisc_enable(struct bna_rxf *rxf)
1152 {
1153     struct bna *bna = rxf->rx->bna;
1154     int ret = 0;
1155 
1156     if (is_promisc_enable(rxf->rxmode_pending,
1157                 rxf->rxmode_pending_bitmask) ||
1158         (rxf->rxmode_active & BNA_RXMODE_PROMISC)) {
1159         /* Do nothing if pending enable or already enabled */
1160     } else if (is_promisc_disable(rxf->rxmode_pending,
1161                     rxf->rxmode_pending_bitmask)) {
1162         /* Turn off pending disable command */
1163         promisc_inactive(rxf->rxmode_pending,
1164             rxf->rxmode_pending_bitmask);
1165     } else {
1166         /* Schedule enable */
1167         promisc_enable(rxf->rxmode_pending,
1168                 rxf->rxmode_pending_bitmask);
1169         bna->promisc_rid = rxf->rx->rid;
1170         ret = 1;
1171     }
1172 
1173     return ret;
1174 }
1175 
1176 static int
1177 bna_rxf_promisc_disable(struct bna_rxf *rxf)
1178 {
1179     struct bna *bna = rxf->rx->bna;
1180     int ret = 0;
1181 
1182     if (is_promisc_disable(rxf->rxmode_pending,
1183                 rxf->rxmode_pending_bitmask) ||
1184         (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))) {
1185         /* Do nothing if pending disable or already disabled */
1186     } else if (is_promisc_enable(rxf->rxmode_pending,
1187                     rxf->rxmode_pending_bitmask)) {
1188         /* Turn off pending enable command */
1189         promisc_inactive(rxf->rxmode_pending,
1190                 rxf->rxmode_pending_bitmask);
1191         bna->promisc_rid = BFI_INVALID_RID;
1192     } else if (rxf->rxmode_active & BNA_RXMODE_PROMISC) {
1193         /* Schedule disable */
1194         promisc_disable(rxf->rxmode_pending,
1195                 rxf->rxmode_pending_bitmask);
1196         ret = 1;
1197     }
1198 
1199     return ret;
1200 }
1201 
1202 static int
1203 bna_rxf_allmulti_enable(struct bna_rxf *rxf)
1204 {
1205     int ret = 0;
1206 
1207     if (is_allmulti_enable(rxf->rxmode_pending,
1208             rxf->rxmode_pending_bitmask) ||
1209             (rxf->rxmode_active & BNA_RXMODE_ALLMULTI)) {
1210         /* Do nothing if pending enable or already enabled */
1211     } else if (is_allmulti_disable(rxf->rxmode_pending,
1212                     rxf->rxmode_pending_bitmask)) {
1213         /* Turn off pending disable command */
1214         allmulti_inactive(rxf->rxmode_pending,
1215             rxf->rxmode_pending_bitmask);
1216     } else {
1217         /* Schedule enable */
1218         allmulti_enable(rxf->rxmode_pending,
1219                 rxf->rxmode_pending_bitmask);
1220         ret = 1;
1221     }
1222 
1223     return ret;
1224 }
1225 
1226 static int
1227 bna_rxf_allmulti_disable(struct bna_rxf *rxf)
1228 {
1229     int ret = 0;
1230 
1231     if (is_allmulti_disable(rxf->rxmode_pending,
1232                 rxf->rxmode_pending_bitmask) ||
1233         (!(rxf->rxmode_active & BNA_RXMODE_ALLMULTI))) {
1234         /* Do nothing if pending disable or already disabled */
1235     } else if (is_allmulti_enable(rxf->rxmode_pending,
1236                     rxf->rxmode_pending_bitmask)) {
1237         /* Turn off pending enable command */
1238         allmulti_inactive(rxf->rxmode_pending,
1239                 rxf->rxmode_pending_bitmask);
1240     } else if (rxf->rxmode_active & BNA_RXMODE_ALLMULTI) {
1241         /* Schedule disable */
1242         allmulti_disable(rxf->rxmode_pending,
1243                 rxf->rxmode_pending_bitmask);
1244         ret = 1;
1245     }
1246 
1247     return ret;
1248 }
1249 
1250 static int
1251 bna_rxf_vlan_strip_cfg_apply(struct bna_rxf *rxf)
1252 {
1253     if (rxf->vlan_strip_pending) {
1254             rxf->vlan_strip_pending = false;
1255             bna_bfi_vlan_strip_enable(rxf);
1256             return 1;
1257     }
1258 
1259     return 0;
1260 }
1261 
1262 /* RX */
1263 
1264 #define BNA_GET_RXQS(qcfg)  (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
1265     (qcfg)->num_paths : ((qcfg)->num_paths * 2))
1266 
1267 #define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
1268     (PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
1269 
1270 #define call_rx_stop_cbfn(rx)                       \
1271 do {                                    \
1272     if ((rx)->stop_cbfn) {                      \
1273         void (*cbfn)(void *, struct bna_rx *);    \
1274         void *cbarg;                        \
1275         cbfn = (rx)->stop_cbfn;              \
1276         cbarg = (rx)->stop_cbarg;                  \
1277         (rx)->stop_cbfn = NULL;                 \
1278         (rx)->stop_cbarg = NULL;                \
1279         cbfn(cbarg, rx);                    \
1280     }                                  \
1281 } while (0)
1282 
1283 #define call_rx_stall_cbfn(rx)                      \
1284 do {                                    \
1285     if ((rx)->rx_stall_cbfn)                    \
1286         (rx)->rx_stall_cbfn((rx)->bna->bnad, (rx));     \
1287 } while (0)
1288 
1289 #define bfi_enet_datapath_q_init(bfi_q, bna_qpt)            \
1290 do {                                    \
1291     struct bna_dma_addr cur_q_addr =                \
1292         *((struct bna_dma_addr *)((bna_qpt)->kv_qpt_ptr));  \
1293     (bfi_q)->pg_tbl.a32.addr_lo = (bna_qpt)->hw_qpt_ptr.lsb;    \
1294     (bfi_q)->pg_tbl.a32.addr_hi = (bna_qpt)->hw_qpt_ptr.msb;    \
1295     (bfi_q)->first_entry.a32.addr_lo = cur_q_addr.lsb;      \
1296     (bfi_q)->first_entry.a32.addr_hi = cur_q_addr.msb;      \
1297     (bfi_q)->pages = htons((u16)(bna_qpt)->page_count); \
1298     (bfi_q)->page_sz = htons((u16)(bna_qpt)->page_size);\
1299 } while (0)
1300 
1301 static void bna_bfi_rx_enet_start(struct bna_rx *rx);
1302 static void bna_rx_enet_stop(struct bna_rx *rx);
1303 static void bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx);
1304 
1305 bfa_fsm_state_decl(bna_rx, stopped,
1306     struct bna_rx, enum bna_rx_event);
1307 bfa_fsm_state_decl(bna_rx, start_wait,
1308     struct bna_rx, enum bna_rx_event);
1309 bfa_fsm_state_decl(bna_rx, start_stop_wait,
1310     struct bna_rx, enum bna_rx_event);
1311 bfa_fsm_state_decl(bna_rx, rxf_start_wait,
1312     struct bna_rx, enum bna_rx_event);
1313 bfa_fsm_state_decl(bna_rx, started,
1314     struct bna_rx, enum bna_rx_event);
1315 bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
1316     struct bna_rx, enum bna_rx_event);
1317 bfa_fsm_state_decl(bna_rx, stop_wait,
1318     struct bna_rx, enum bna_rx_event);
1319 bfa_fsm_state_decl(bna_rx, cleanup_wait,
1320     struct bna_rx, enum bna_rx_event);
1321 bfa_fsm_state_decl(bna_rx, failed,
1322     struct bna_rx, enum bna_rx_event);
1323 bfa_fsm_state_decl(bna_rx, quiesce_wait,
1324     struct bna_rx, enum bna_rx_event);
1325 
1326 static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
1327 {
1328     call_rx_stop_cbfn(rx);
1329 }
1330 
1331 static void bna_rx_sm_stopped(struct bna_rx *rx,
1332                 enum bna_rx_event event)
1333 {
1334     switch (event) {
1335     case RX_E_START:
1336         bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1337         break;
1338 
1339     case RX_E_STOP:
1340         call_rx_stop_cbfn(rx);
1341         break;
1342 
1343     case RX_E_FAIL:
1344         /* no-op */
1345         break;
1346 
1347     default:
1348         bfa_sm_fault(event);
1349         break;
1350     }
1351 }
1352 
1353 static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
1354 {
1355     bna_bfi_rx_enet_start(rx);
1356 }
1357 
1358 static void
1359 bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
1360 {
1361 }
1362 
1363 static void
1364 bna_rx_sm_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1365 {
1366     switch (event) {
1367     case RX_E_FAIL:
1368     case RX_E_STOPPED:
1369         bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1370         rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1371         break;
1372 
1373     case RX_E_STARTED:
1374         bna_rx_enet_stop(rx);
1375         break;
1376 
1377     default:
1378         bfa_sm_fault(event);
1379         break;
1380     }
1381 }
1382 
1383 static void bna_rx_sm_start_wait(struct bna_rx *rx,
1384                 enum bna_rx_event event)
1385 {
1386     switch (event) {
1387     case RX_E_STOP:
1388         bfa_fsm_set_state(rx, bna_rx_sm_start_stop_wait);
1389         break;
1390 
1391     case RX_E_FAIL:
1392         bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1393         break;
1394 
1395     case RX_E_STARTED:
1396         bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
1397         break;
1398 
1399     default:
1400         bfa_sm_fault(event);
1401         break;
1402     }
1403 }
1404 
1405 static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
1406 {
1407     rx->rx_post_cbfn(rx->bna->bnad, rx);
1408     bna_rxf_start(&rx->rxf);
1409 }
1410 
1411 static void
1412 bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
1413 {
1414 }
1415 
1416 static void
1417 bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1418 {
1419     switch (event) {
1420     case RX_E_FAIL:
1421         bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1422         bna_rxf_fail(&rx->rxf);
1423         call_rx_stall_cbfn(rx);
1424         rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1425         break;
1426 
1427     case RX_E_RXF_STARTED:
1428         bna_rxf_stop(&rx->rxf);
1429         break;
1430 
1431     case RX_E_RXF_STOPPED:
1432         bfa_fsm_set_state(rx, bna_rx_sm_stop_wait);
1433         call_rx_stall_cbfn(rx);
1434         bna_rx_enet_stop(rx);
1435         break;
1436 
1437     default:
1438         bfa_sm_fault(event);
1439         break;
1440     }
1441 
1442 }
1443 
1444 static void
1445 bna_rx_sm_start_stop_wait_entry(struct bna_rx *rx)
1446 {
1447 }
1448 
1449 static void
1450 bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
1451 {
1452     switch (event) {
1453     case RX_E_FAIL:
1454     case RX_E_STOPPED:
1455         bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1456         break;
1457 
1458     case RX_E_STARTED:
1459         bna_rx_enet_stop(rx);
1460         break;
1461 
1462     default:
1463         bfa_sm_fault(event);
1464     }
1465 }
1466 
1467 static void
1468 bna_rx_sm_started_entry(struct bna_rx *rx)
1469 {
1470     struct bna_rxp *rxp;
1471     int is_regular = (rx->type == BNA_RX_T_REGULAR);
1472 
1473     /* Start IB */
1474     list_for_each_entry(rxp, &rx->rxp_q, qe)
1475         bna_ib_start(rx->bna, &rxp->cq.ib, is_regular);
1476 
1477     bna_ethport_cb_rx_started(&rx->bna->ethport);
1478 }
1479 
1480 static void
1481 bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1482 {
1483     switch (event) {
1484     case RX_E_STOP:
1485         bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1486         bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1487         bna_rxf_stop(&rx->rxf);
1488         break;
1489 
1490     case RX_E_FAIL:
1491         bfa_fsm_set_state(rx, bna_rx_sm_failed);
1492         bna_ethport_cb_rx_stopped(&rx->bna->ethport);
1493         bna_rxf_fail(&rx->rxf);
1494         call_rx_stall_cbfn(rx);
1495         rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1496         break;
1497 
1498     default:
1499         bfa_sm_fault(event);
1500         break;
1501     }
1502 }
1503 
1504 static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
1505                 enum bna_rx_event event)
1506 {
1507     switch (event) {
1508     case RX_E_STOP:
1509         bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1510         break;
1511 
1512     case RX_E_FAIL:
1513         bfa_fsm_set_state(rx, bna_rx_sm_failed);
1514         bna_rxf_fail(&rx->rxf);
1515         call_rx_stall_cbfn(rx);
1516         rx->rx_cleanup_cbfn(rx->bna->bnad, rx);
1517         break;
1518 
1519     case RX_E_RXF_STARTED:
1520         bfa_fsm_set_state(rx, bna_rx_sm_started);
1521         break;
1522 
1523     default:
1524         bfa_sm_fault(event);
1525         break;
1526     }
1527 }
1528 
1529 static void
1530 bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
1531 {
1532 }
1533 
1534 static void
1535 bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
1536 {
1537     switch (event) {
1538     case RX_E_FAIL:
1539     case RX_E_RXF_STOPPED:
1540         /* No-op */
1541         break;
1542 
1543     case RX_E_CLEANUP_DONE:
1544         bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1545         break;
1546 
1547     default:
1548         bfa_sm_fault(event);
1549         break;
1550     }
1551 }
1552 
1553 static void
1554 bna_rx_sm_failed_entry(struct bna_rx *rx)
1555 {
1556 }
1557 
1558 static void
1559 bna_rx_sm_failed(struct bna_rx *rx, enum bna_rx_event event)
1560 {
1561     switch (event) {
1562     case RX_E_START:
1563         bfa_fsm_set_state(rx, bna_rx_sm_quiesce_wait);
1564         break;
1565 
1566     case RX_E_STOP:
1567         bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1568         break;
1569 
1570     case RX_E_FAIL:
1571     case RX_E_RXF_STARTED:
1572     case RX_E_RXF_STOPPED:
1573         /* No-op */
1574         break;
1575 
1576     case RX_E_CLEANUP_DONE:
1577         bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1578         break;
1579 
1580     default:
1581         bfa_sm_fault(event);
1582         break;
1583 }   }
1584 
1585 static void
1586 bna_rx_sm_quiesce_wait_entry(struct bna_rx *rx)
1587 {
1588 }
1589 
1590 static void
1591 bna_rx_sm_quiesce_wait(struct bna_rx *rx, enum bna_rx_event event)
1592 {
1593     switch (event) {
1594     case RX_E_STOP:
1595         bfa_fsm_set_state(rx, bna_rx_sm_cleanup_wait);
1596         break;
1597 
1598     case RX_E_FAIL:
1599         bfa_fsm_set_state(rx, bna_rx_sm_failed);
1600         break;
1601 
1602     case RX_E_CLEANUP_DONE:
1603         bfa_fsm_set_state(rx, bna_rx_sm_start_wait);
1604         break;
1605 
1606     default:
1607         bfa_sm_fault(event);
1608         break;
1609     }
1610 }
1611 
1612 static void
1613 bna_bfi_rx_enet_start(struct bna_rx *rx)
1614 {
1615     struct bfi_enet_rx_cfg_req *cfg_req = &rx->bfi_enet_cmd.cfg_req;
1616     struct bna_rxp *rxp = NULL;
1617     struct bna_rxq *q0 = NULL, *q1 = NULL;
1618     int i;
1619 
1620     bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
1621         BFI_ENET_H2I_RX_CFG_SET_REQ, 0, rx->rid);
1622     cfg_req->mh.num_entries = htons(
1623         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_rx_cfg_req)));
1624 
1625     cfg_req->rx_cfg.frame_size = bna_enet_mtu_get(&rx->bna->enet);
1626     cfg_req->num_queue_sets = rx->num_paths;
1627     for (i = 0; i < rx->num_paths; i++) {
1628         rxp = rxp ? list_next_entry(rxp, qe)
1629             : list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
1630         GET_RXQS(rxp, q0, q1);
1631         switch (rxp->type) {
1632         case BNA_RXP_SLR:
1633         case BNA_RXP_HDS:
1634             /* Small RxQ */
1635             bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q,
1636                         &q1->qpt);
1637             cfg_req->q_cfg[i].qs.rx_buffer_size =
1638                 htons((u16)q1->buffer_size);
1639             fallthrough;
1640 
1641         case BNA_RXP_SINGLE:
1642             /* Large/Single RxQ */
1643             bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q,
1644                         &q0->qpt);
1645             if (q0->multi_buffer)
1646                 /* multi-buffer is enabled by allocating
1647                  * a new rx with new set of resources.
1648                  * q0->buffer_size should be initialized to
1649                  * fragment size.
1650                  */
1651                 cfg_req->rx_cfg.multi_buffer =
1652                     BNA_STATUS_T_ENABLED;
1653             else
1654                 q0->buffer_size =
1655                     bna_enet_mtu_get(&rx->bna->enet);
1656             cfg_req->q_cfg[i].ql.rx_buffer_size =
1657                 htons((u16)q0->buffer_size);
1658             break;
1659 
1660         default:
1661             BUG_ON(1);
1662         }
1663 
1664         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q,
1665                     &rxp->cq.qpt);
1666 
1667         cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
1668             rxp->cq.ib.ib_seg_host_addr.lsb;
1669         cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
1670             rxp->cq.ib.ib_seg_host_addr.msb;
1671         cfg_req->q_cfg[i].ib.intr.msix_index =
1672             htons((u16)rxp->cq.ib.intr_vector);
1673     }
1674 
1675     cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_DISABLED;
1676     cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
1677     cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
1678     cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_DISABLED;
1679     cfg_req->ib_cfg.msix = (rxp->cq.ib.intr_type == BNA_INTR_T_MSIX)
1680                 ? BNA_STATUS_T_ENABLED :
1681                 BNA_STATUS_T_DISABLED;
1682     cfg_req->ib_cfg.coalescing_timeout =
1683             htonl((u32)rxp->cq.ib.coalescing_timeo);
1684     cfg_req->ib_cfg.inter_pkt_timeout =
1685             htonl((u32)rxp->cq.ib.interpkt_timeo);
1686     cfg_req->ib_cfg.inter_pkt_count = (u8)rxp->cq.ib.interpkt_count;
1687 
1688     switch (rxp->type) {
1689     case BNA_RXP_SLR:
1690         cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_LARGE_SMALL;
1691         break;
1692 
1693     case BNA_RXP_HDS:
1694         cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_HDS;
1695         cfg_req->rx_cfg.hds.type = rx->hds_cfg.hdr_type;
1696         cfg_req->rx_cfg.hds.force_offset = rx->hds_cfg.forced_offset;
1697         cfg_req->rx_cfg.hds.max_header_size = rx->hds_cfg.forced_offset;
1698         break;
1699 
1700     case BNA_RXP_SINGLE:
1701         cfg_req->rx_cfg.rxq_type = BFI_ENET_RXQ_SINGLE;
1702         break;
1703 
1704     default:
1705         BUG_ON(1);
1706     }
1707     cfg_req->rx_cfg.strip_vlan = rx->rxf.vlan_strip_status;
1708 
1709     bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL,
1710         sizeof(struct bfi_enet_rx_cfg_req), &cfg_req->mh);
1711     bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1712 }
1713 
1714 static void
1715 bna_bfi_rx_enet_stop(struct bna_rx *rx)
1716 {
1717     struct bfi_enet_req *req = &rx->bfi_enet_cmd.req;
1718 
1719     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
1720         BFI_ENET_H2I_RX_CFG_CLR_REQ, 0, rx->rid);
1721     req->mh.num_entries = htons(
1722         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
1723     bfa_msgq_cmd_set(&rx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
1724         &req->mh);
1725     bfa_msgq_cmd_post(&rx->bna->msgq, &rx->msgq_cmd);
1726 }
1727 
1728 static void
1729 bna_rx_enet_stop(struct bna_rx *rx)
1730 {
1731     struct bna_rxp *rxp;
1732 
1733     /* Stop IB */
1734     list_for_each_entry(rxp, &rx->rxp_q, qe)
1735         bna_ib_stop(rx->bna, &rxp->cq.ib);
1736 
1737     bna_bfi_rx_enet_stop(rx);
1738 }
1739 
1740 static int
1741 bna_rx_res_check(struct bna_rx_mod *rx_mod, struct bna_rx_config *rx_cfg)
1742 {
1743     if ((rx_mod->rx_free_count == 0) ||
1744         (rx_mod->rxp_free_count == 0) ||
1745         (rx_mod->rxq_free_count == 0))
1746         return 0;
1747 
1748     if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
1749         if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1750             (rx_mod->rxq_free_count < rx_cfg->num_paths))
1751                 return 0;
1752     } else {
1753         if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
1754             (rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
1755             return 0;
1756     }
1757 
1758     return 1;
1759 }
1760 
1761 static struct bna_rxq *
1762 bna_rxq_get(struct bna_rx_mod *rx_mod)
1763 {
1764     struct bna_rxq *rxq = NULL;
1765 
1766     rxq = list_first_entry(&rx_mod->rxq_free_q, struct bna_rxq, qe);
1767     list_del(&rxq->qe);
1768     rx_mod->rxq_free_count--;
1769 
1770     return rxq;
1771 }
1772 
1773 static void
1774 bna_rxq_put(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
1775 {
1776     list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
1777     rx_mod->rxq_free_count++;
1778 }
1779 
1780 static struct bna_rxp *
1781 bna_rxp_get(struct bna_rx_mod *rx_mod)
1782 {
1783     struct bna_rxp *rxp = NULL;
1784 
1785     rxp = list_first_entry(&rx_mod->rxp_free_q, struct bna_rxp, qe);
1786     list_del(&rxp->qe);
1787     rx_mod->rxp_free_count--;
1788 
1789     return rxp;
1790 }
1791 
1792 static void
1793 bna_rxp_put(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
1794 {
1795     list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
1796     rx_mod->rxp_free_count++;
1797 }
1798 
1799 static struct bna_rx *
1800 bna_rx_get(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1801 {
1802     struct bna_rx *rx = NULL;
1803 
1804     BUG_ON(list_empty(&rx_mod->rx_free_q));
1805     if (type == BNA_RX_T_REGULAR)
1806         rx = list_first_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
1807     else
1808         rx = list_last_entry(&rx_mod->rx_free_q, struct bna_rx, qe);
1809 
1810     rx_mod->rx_free_count--;
1811     list_move_tail(&rx->qe, &rx_mod->rx_active_q);
1812     rx->type = type;
1813 
1814     return rx;
1815 }
1816 
1817 static void
1818 bna_rx_put(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
1819 {
1820     struct list_head *qe;
1821 
1822     list_for_each_prev(qe, &rx_mod->rx_free_q)
1823         if (((struct bna_rx *)qe)->rid < rx->rid)
1824             break;
1825 
1826     list_add(&rx->qe, qe);
1827     rx_mod->rx_free_count++;
1828 }
1829 
1830 static void
1831 bna_rxp_add_rxqs(struct bna_rxp *rxp, struct bna_rxq *q0,
1832         struct bna_rxq *q1)
1833 {
1834     switch (rxp->type) {
1835     case BNA_RXP_SINGLE:
1836         rxp->rxq.single.only = q0;
1837         rxp->rxq.single.reserved = NULL;
1838         break;
1839     case BNA_RXP_SLR:
1840         rxp->rxq.slr.large = q0;
1841         rxp->rxq.slr.small = q1;
1842         break;
1843     case BNA_RXP_HDS:
1844         rxp->rxq.hds.data = q0;
1845         rxp->rxq.hds.hdr = q1;
1846         break;
1847     default:
1848         break;
1849     }
1850 }
1851 
1852 static void
1853 bna_rxq_qpt_setup(struct bna_rxq *rxq,
1854         struct bna_rxp *rxp,
1855         u32 page_count,
1856         u32 page_size,
1857         struct bna_mem_descr *qpt_mem,
1858         struct bna_mem_descr *swqpt_mem,
1859         struct bna_mem_descr *page_mem)
1860 {
1861     u8 *kva;
1862     u64 dma;
1863     struct bna_dma_addr bna_dma;
1864     int i;
1865 
1866     rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1867     rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1868     rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
1869     rxq->qpt.page_count = page_count;
1870     rxq->qpt.page_size = page_size;
1871 
1872     rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
1873     rxq->rcb->sw_q = page_mem->kva;
1874 
1875     kva = page_mem->kva;
1876     BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1877 
1878     for (i = 0; i < rxq->qpt.page_count; i++) {
1879         rxq->rcb->sw_qpt[i] = kva;
1880         kva += PAGE_SIZE;
1881 
1882         BNA_SET_DMA_ADDR(dma, &bna_dma);
1883         ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
1884             bna_dma.lsb;
1885         ((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
1886             bna_dma.msb;
1887         dma += PAGE_SIZE;
1888     }
1889 }
1890 
1891 static void
1892 bna_rxp_cqpt_setup(struct bna_rxp *rxp,
1893         u32 page_count,
1894         u32 page_size,
1895         struct bna_mem_descr *qpt_mem,
1896         struct bna_mem_descr *swqpt_mem,
1897         struct bna_mem_descr *page_mem)
1898 {
1899     u8 *kva;
1900     u64 dma;
1901     struct bna_dma_addr bna_dma;
1902     int i;
1903 
1904     rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
1905     rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
1906     rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
1907     rxp->cq.qpt.page_count = page_count;
1908     rxp->cq.qpt.page_size = page_size;
1909 
1910     rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
1911     rxp->cq.ccb->sw_q = page_mem->kva;
1912 
1913     kva = page_mem->kva;
1914     BNA_GET_DMA_ADDR(&page_mem->dma, dma);
1915 
1916     for (i = 0; i < rxp->cq.qpt.page_count; i++) {
1917         rxp->cq.ccb->sw_qpt[i] = kva;
1918         kva += PAGE_SIZE;
1919 
1920         BNA_SET_DMA_ADDR(dma, &bna_dma);
1921         ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
1922             bna_dma.lsb;
1923         ((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
1924             bna_dma.msb;
1925         dma += PAGE_SIZE;
1926     }
1927 }
1928 
1929 static void
1930 bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx)
1931 {
1932     struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1933 
1934     bfa_wc_down(&rx_mod->rx_stop_wc);
1935 }
1936 
1937 static void
1938 bna_rx_mod_cb_rx_stopped_all(void *arg)
1939 {
1940     struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
1941 
1942     if (rx_mod->stop_cbfn)
1943         rx_mod->stop_cbfn(&rx_mod->bna->enet);
1944     rx_mod->stop_cbfn = NULL;
1945 }
1946 
1947 static void
1948 bna_rx_start(struct bna_rx *rx)
1949 {
1950     rx->rx_flags |= BNA_RX_F_ENET_STARTED;
1951     if (rx->rx_flags & BNA_RX_F_ENABLED)
1952         bfa_fsm_send_event(rx, RX_E_START);
1953 }
1954 
1955 static void
1956 bna_rx_stop(struct bna_rx *rx)
1957 {
1958     rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
1959     if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
1960         bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx);
1961     else {
1962         rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
1963         rx->stop_cbarg = &rx->bna->rx_mod;
1964         bfa_fsm_send_event(rx, RX_E_STOP);
1965     }
1966 }
1967 
1968 static void
1969 bna_rx_fail(struct bna_rx *rx)
1970 {
1971     /* Indicate Enet is not enabled, and failed */
1972     rx->rx_flags &= ~BNA_RX_F_ENET_STARTED;
1973     bfa_fsm_send_event(rx, RX_E_FAIL);
1974 }
1975 
1976 void
1977 bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1978 {
1979     struct bna_rx *rx;
1980 
1981     rx_mod->flags |= BNA_RX_MOD_F_ENET_STARTED;
1982     if (type == BNA_RX_T_LOOPBACK)
1983         rx_mod->flags |= BNA_RX_MOD_F_ENET_LOOPBACK;
1984 
1985     list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
1986         if (rx->type == type)
1987             bna_rx_start(rx);
1988 }
1989 
1990 void
1991 bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
1992 {
1993     struct bna_rx *rx;
1994 
1995     rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
1996     rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
1997 
1998     rx_mod->stop_cbfn = bna_enet_cb_rx_stopped;
1999 
2000     bfa_wc_init(&rx_mod->rx_stop_wc, bna_rx_mod_cb_rx_stopped_all, rx_mod);
2001 
2002     list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
2003         if (rx->type == type) {
2004             bfa_wc_up(&rx_mod->rx_stop_wc);
2005             bna_rx_stop(rx);
2006         }
2007 
2008     bfa_wc_wait(&rx_mod->rx_stop_wc);
2009 }
2010 
2011 void
2012 bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
2013 {
2014     struct bna_rx *rx;
2015 
2016     rx_mod->flags &= ~BNA_RX_MOD_F_ENET_STARTED;
2017     rx_mod->flags &= ~BNA_RX_MOD_F_ENET_LOOPBACK;
2018 
2019     list_for_each_entry(rx, &rx_mod->rx_active_q, qe)
2020         bna_rx_fail(rx);
2021 }
2022 
2023 void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
2024             struct bna_res_info *res_info)
2025 {
2026     int index;
2027     struct bna_rx *rx_ptr;
2028     struct bna_rxp *rxp_ptr;
2029     struct bna_rxq *rxq_ptr;
2030 
2031     rx_mod->bna = bna;
2032     rx_mod->flags = 0;
2033 
2034     rx_mod->rx = (struct bna_rx *)
2035         res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
2036     rx_mod->rxp = (struct bna_rxp *)
2037         res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
2038     rx_mod->rxq = (struct bna_rxq *)
2039         res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
2040 
2041     /* Initialize the queues */
2042     INIT_LIST_HEAD(&rx_mod->rx_free_q);
2043     rx_mod->rx_free_count = 0;
2044     INIT_LIST_HEAD(&rx_mod->rxq_free_q);
2045     rx_mod->rxq_free_count = 0;
2046     INIT_LIST_HEAD(&rx_mod->rxp_free_q);
2047     rx_mod->rxp_free_count = 0;
2048     INIT_LIST_HEAD(&rx_mod->rx_active_q);
2049 
2050     /* Build RX queues */
2051     for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2052         rx_ptr = &rx_mod->rx[index];
2053 
2054         INIT_LIST_HEAD(&rx_ptr->rxp_q);
2055         rx_ptr->bna = NULL;
2056         rx_ptr->rid = index;
2057         rx_ptr->stop_cbfn = NULL;
2058         rx_ptr->stop_cbarg = NULL;
2059 
2060         list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
2061         rx_mod->rx_free_count++;
2062     }
2063 
2064     /* build RX-path queue */
2065     for (index = 0; index < bna->ioceth.attr.num_rxp; index++) {
2066         rxp_ptr = &rx_mod->rxp[index];
2067         list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
2068         rx_mod->rxp_free_count++;
2069     }
2070 
2071     /* build RXQ queue */
2072     for (index = 0; index < (bna->ioceth.attr.num_rxp * 2); index++) {
2073         rxq_ptr = &rx_mod->rxq[index];
2074         list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
2075         rx_mod->rxq_free_count++;
2076     }
2077 }
2078 
2079 void
2080 bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
2081 {
2082     rx_mod->bna = NULL;
2083 }
2084 
2085 void
2086 bna_bfi_rx_enet_start_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2087 {
2088     struct bfi_enet_rx_cfg_rsp *cfg_rsp = &rx->bfi_enet_cmd.cfg_rsp;
2089     struct bna_rxp *rxp = NULL;
2090     struct bna_rxq *q0 = NULL, *q1 = NULL;
2091     int i;
2092 
2093     bfa_msgq_rsp_copy(&rx->bna->msgq, (u8 *)cfg_rsp,
2094         sizeof(struct bfi_enet_rx_cfg_rsp));
2095 
2096     rx->hw_id = cfg_rsp->hw_id;
2097 
2098     for (i = 0, rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
2099          i < rx->num_paths; i++, rxp = list_next_entry(rxp, qe)) {
2100         GET_RXQS(rxp, q0, q1);
2101 
2102         /* Setup doorbells */
2103         rxp->cq.ccb->i_dbell->doorbell_addr =
2104             rx->bna->pcidev.pci_bar_kva
2105             + ntohl(cfg_rsp->q_handles[i].i_dbell);
2106         rxp->hw_id = cfg_rsp->q_handles[i].hw_cqid;
2107         q0->rcb->q_dbell =
2108             rx->bna->pcidev.pci_bar_kva
2109             + ntohl(cfg_rsp->q_handles[i].ql_dbell);
2110         q0->hw_id = cfg_rsp->q_handles[i].hw_lqid;
2111         if (q1) {
2112             q1->rcb->q_dbell =
2113             rx->bna->pcidev.pci_bar_kva
2114             + ntohl(cfg_rsp->q_handles[i].qs_dbell);
2115             q1->hw_id = cfg_rsp->q_handles[i].hw_sqid;
2116         }
2117 
2118         /* Initialize producer/consumer indexes */
2119         (*rxp->cq.ccb->hw_producer_index) = 0;
2120         rxp->cq.ccb->producer_index = 0;
2121         q0->rcb->producer_index = q0->rcb->consumer_index = 0;
2122         if (q1)
2123             q1->rcb->producer_index = q1->rcb->consumer_index = 0;
2124     }
2125 
2126     bfa_fsm_send_event(rx, RX_E_STARTED);
2127 }
2128 
2129 void
2130 bna_bfi_rx_enet_stop_rsp(struct bna_rx *rx, struct bfi_msgq_mhdr *msghdr)
2131 {
2132     bfa_fsm_send_event(rx, RX_E_STOPPED);
2133 }
2134 
2135 void
2136 bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
2137 {
2138     u32 cq_size, hq_size, dq_size;
2139     u32 cpage_count, hpage_count, dpage_count;
2140     struct bna_mem_info *mem_info;
2141     u32 cq_depth;
2142     u32 hq_depth;
2143     u32 dq_depth;
2144 
2145     dq_depth = q_cfg->q0_depth;
2146     hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q1_depth);
2147     cq_depth = roundup_pow_of_two(dq_depth + hq_depth);
2148 
2149     cq_size = cq_depth * BFI_CQ_WI_SIZE;
2150     cq_size = ALIGN(cq_size, PAGE_SIZE);
2151     cpage_count = SIZE_TO_PAGES(cq_size);
2152 
2153     dq_depth = roundup_pow_of_two(dq_depth);
2154     dq_size = dq_depth * BFI_RXQ_WI_SIZE;
2155     dq_size = ALIGN(dq_size, PAGE_SIZE);
2156     dpage_count = SIZE_TO_PAGES(dq_size);
2157 
2158     if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
2159         hq_depth = roundup_pow_of_two(hq_depth);
2160         hq_size = hq_depth * BFI_RXQ_WI_SIZE;
2161         hq_size = ALIGN(hq_size, PAGE_SIZE);
2162         hpage_count = SIZE_TO_PAGES(hq_size);
2163     } else
2164         hpage_count = 0;
2165 
2166     res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
2167     mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
2168     mem_info->mem_type = BNA_MEM_T_KVA;
2169     mem_info->len = sizeof(struct bna_ccb);
2170     mem_info->num = q_cfg->num_paths;
2171 
2172     res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
2173     mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
2174     mem_info->mem_type = BNA_MEM_T_KVA;
2175     mem_info->len = sizeof(struct bna_rcb);
2176     mem_info->num = BNA_GET_RXQS(q_cfg);
2177 
2178     res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
2179     mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
2180     mem_info->mem_type = BNA_MEM_T_DMA;
2181     mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
2182     mem_info->num = q_cfg->num_paths;
2183 
2184     res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
2185     mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
2186     mem_info->mem_type = BNA_MEM_T_KVA;
2187     mem_info->len = cpage_count * sizeof(void *);
2188     mem_info->num = q_cfg->num_paths;
2189 
2190     res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
2191     mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
2192     mem_info->mem_type = BNA_MEM_T_DMA;
2193     mem_info->len = PAGE_SIZE * cpage_count;
2194     mem_info->num = q_cfg->num_paths;
2195 
2196     res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
2197     mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
2198     mem_info->mem_type = BNA_MEM_T_DMA;
2199     mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
2200     mem_info->num = q_cfg->num_paths;
2201 
2202     res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
2203     mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
2204     mem_info->mem_type = BNA_MEM_T_KVA;
2205     mem_info->len = dpage_count * sizeof(void *);
2206     mem_info->num = q_cfg->num_paths;
2207 
2208     res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
2209     mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
2210     mem_info->mem_type = BNA_MEM_T_DMA;
2211     mem_info->len = PAGE_SIZE * dpage_count;
2212     mem_info->num = q_cfg->num_paths;
2213 
2214     res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
2215     mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
2216     mem_info->mem_type = BNA_MEM_T_DMA;
2217     mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
2218     mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2219 
2220     res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
2221     mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
2222     mem_info->mem_type = BNA_MEM_T_KVA;
2223     mem_info->len = hpage_count * sizeof(void *);
2224     mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2225 
2226     res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
2227     mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
2228     mem_info->mem_type = BNA_MEM_T_DMA;
2229     mem_info->len = PAGE_SIZE * hpage_count;
2230     mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
2231 
2232     res_info[BNA_RX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
2233     mem_info = &res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info;
2234     mem_info->mem_type = BNA_MEM_T_DMA;
2235     mem_info->len = BFI_IBIDX_SIZE;
2236     mem_info->num = q_cfg->num_paths;
2237 
2238     res_info[BNA_RX_RES_MEM_T_RIT].res_type = BNA_RES_T_MEM;
2239     mem_info = &res_info[BNA_RX_RES_MEM_T_RIT].res_u.mem_info;
2240     mem_info->mem_type = BNA_MEM_T_KVA;
2241     mem_info->len = BFI_ENET_RSS_RIT_MAX;
2242     mem_info->num = 1;
2243 
2244     res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
2245     res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
2246     res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
2247 }
2248 
2249 struct bna_rx *
2250 bna_rx_create(struct bna *bna, struct bnad *bnad,
2251         struct bna_rx_config *rx_cfg,
2252         const struct bna_rx_event_cbfn *rx_cbfn,
2253         struct bna_res_info *res_info,
2254         void *priv)
2255 {
2256     struct bna_rx_mod *rx_mod = &bna->rx_mod;
2257     struct bna_rx *rx;
2258     struct bna_rxp *rxp;
2259     struct bna_rxq *q0;
2260     struct bna_rxq *q1;
2261     struct bna_intr_info *intr_info;
2262     struct bna_mem_descr *hqunmap_mem;
2263     struct bna_mem_descr *dqunmap_mem;
2264     struct bna_mem_descr *ccb_mem;
2265     struct bna_mem_descr *rcb_mem;
2266     struct bna_mem_descr *cqpt_mem;
2267     struct bna_mem_descr *cswqpt_mem;
2268     struct bna_mem_descr *cpage_mem;
2269     struct bna_mem_descr *hqpt_mem;
2270     struct bna_mem_descr *dqpt_mem;
2271     struct bna_mem_descr *hsqpt_mem;
2272     struct bna_mem_descr *dsqpt_mem;
2273     struct bna_mem_descr *hpage_mem;
2274     struct bna_mem_descr *dpage_mem;
2275     u32 dpage_count, hpage_count;
2276     u32 hq_idx, dq_idx, rcb_idx;
2277     u32 cq_depth, i;
2278     u32 page_count;
2279 
2280     if (!bna_rx_res_check(rx_mod, rx_cfg))
2281         return NULL;
2282 
2283     intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2284     ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
2285     rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
2286     dqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPDQ].res_u.mem_info.mdl[0];
2287     hqunmap_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPHQ].res_u.mem_info.mdl[0];
2288     cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
2289     cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
2290     cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
2291     hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
2292     dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
2293     hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
2294     dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
2295     hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
2296     dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
2297 
2298     page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.len /
2299             PAGE_SIZE;
2300 
2301     dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.len /
2302             PAGE_SIZE;
2303 
2304     hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.len /
2305             PAGE_SIZE;
2306 
2307     rx = bna_rx_get(rx_mod, rx_cfg->rx_type);
2308     rx->bna = bna;
2309     rx->rx_flags = 0;
2310     INIT_LIST_HEAD(&rx->rxp_q);
2311     rx->stop_cbfn = NULL;
2312     rx->stop_cbarg = NULL;
2313     rx->priv = priv;
2314 
2315     rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
2316     rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
2317     rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
2318     rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
2319     rx->rx_stall_cbfn = rx_cbfn->rx_stall_cbfn;
2320     /* Following callbacks are mandatory */
2321     rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
2322     rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
2323 
2324     if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_STARTED) {
2325         switch (rx->type) {
2326         case BNA_RX_T_REGULAR:
2327             if (!(rx->bna->rx_mod.flags &
2328                 BNA_RX_MOD_F_ENET_LOOPBACK))
2329                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2330             break;
2331         case BNA_RX_T_LOOPBACK:
2332             if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_ENET_LOOPBACK)
2333                 rx->rx_flags |= BNA_RX_F_ENET_STARTED;
2334             break;
2335         }
2336     }
2337 
2338     rx->num_paths = rx_cfg->num_paths;
2339     for (i = 0, hq_idx = 0, dq_idx = 0, rcb_idx = 0;
2340             i < rx->num_paths; i++) {
2341         rxp = bna_rxp_get(rx_mod);
2342         list_add_tail(&rxp->qe, &rx->rxp_q);
2343         rxp->type = rx_cfg->rxp_type;
2344         rxp->rx = rx;
2345         rxp->cq.rx = rx;
2346 
2347         q0 = bna_rxq_get(rx_mod);
2348         if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
2349             q1 = NULL;
2350         else
2351             q1 = bna_rxq_get(rx_mod);
2352 
2353         if (1 == intr_info->num)
2354             rxp->vector = intr_info->idl[0].vector;
2355         else
2356             rxp->vector = intr_info->idl[i].vector;
2357 
2358         /* Setup IB */
2359 
2360         rxp->cq.ib.ib_seg_host_addr.lsb =
2361         res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
2362         rxp->cq.ib.ib_seg_host_addr.msb =
2363         res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
2364         rxp->cq.ib.ib_seg_host_addr_kva =
2365         res_info[BNA_RX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
2366         rxp->cq.ib.intr_type = intr_info->intr_type;
2367         if (intr_info->intr_type == BNA_INTR_T_MSIX)
2368             rxp->cq.ib.intr_vector = rxp->vector;
2369         else
2370             rxp->cq.ib.intr_vector = BIT(rxp->vector);
2371         rxp->cq.ib.coalescing_timeo = rx_cfg->coalescing_timeo;
2372         rxp->cq.ib.interpkt_count = BFI_RX_INTERPKT_COUNT;
2373         rxp->cq.ib.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
2374 
2375         bna_rxp_add_rxqs(rxp, q0, q1);
2376 
2377         /* Setup large Q */
2378 
2379         q0->rx = rx;
2380         q0->rxp = rxp;
2381 
2382         q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2383         q0->rcb->unmap_q = (void *)dqunmap_mem[dq_idx].kva;
2384         rcb_idx++; dq_idx++;
2385         q0->rcb->q_depth = rx_cfg->q0_depth;
2386         q0->q_depth = rx_cfg->q0_depth;
2387         q0->multi_buffer = rx_cfg->q0_multi_buf;
2388         q0->buffer_size = rx_cfg->q0_buf_size;
2389         q0->num_vecs = rx_cfg->q0_num_vecs;
2390         q0->rcb->rxq = q0;
2391         q0->rcb->bnad = bna->bnad;
2392         q0->rcb->id = 0;
2393         q0->rx_packets = q0->rx_bytes = 0;
2394         q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
2395         q0->rxbuf_map_failed = 0;
2396 
2397         bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
2398             &dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
2399 
2400         if (rx->rcb_setup_cbfn)
2401             rx->rcb_setup_cbfn(bnad, q0->rcb);
2402 
2403         /* Setup small Q */
2404 
2405         if (q1) {
2406             q1->rx = rx;
2407             q1->rxp = rxp;
2408 
2409             q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
2410             q1->rcb->unmap_q = (void *)hqunmap_mem[hq_idx].kva;
2411             rcb_idx++; hq_idx++;
2412             q1->rcb->q_depth = rx_cfg->q1_depth;
2413             q1->q_depth = rx_cfg->q1_depth;
2414             q1->multi_buffer = BNA_STATUS_T_DISABLED;
2415             q1->num_vecs = 1;
2416             q1->rcb->rxq = q1;
2417             q1->rcb->bnad = bna->bnad;
2418             q1->rcb->id = 1;
2419             q1->buffer_size = (rx_cfg->rxp_type == BNA_RXP_HDS) ?
2420                     rx_cfg->hds_config.forced_offset
2421                     : rx_cfg->q1_buf_size;
2422             q1->rx_packets = q1->rx_bytes = 0;
2423             q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
2424             q1->rxbuf_map_failed = 0;
2425 
2426             bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
2427                 &hqpt_mem[i], &hsqpt_mem[i],
2428                 &hpage_mem[i]);
2429 
2430             if (rx->rcb_setup_cbfn)
2431                 rx->rcb_setup_cbfn(bnad, q1->rcb);
2432         }
2433 
2434         /* Setup CQ */
2435 
2436         rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
2437         cq_depth = rx_cfg->q0_depth +
2438             ((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
2439              0 : rx_cfg->q1_depth);
2440         /* if multi-buffer is enabled sum of q0_depth
2441          * and q1_depth need not be a power of 2
2442          */
2443         cq_depth = roundup_pow_of_two(cq_depth);
2444         rxp->cq.ccb->q_depth = cq_depth;
2445         rxp->cq.ccb->cq = &rxp->cq;
2446         rxp->cq.ccb->rcb[0] = q0->rcb;
2447         q0->rcb->ccb = rxp->cq.ccb;
2448         if (q1) {
2449             rxp->cq.ccb->rcb[1] = q1->rcb;
2450             q1->rcb->ccb = rxp->cq.ccb;
2451         }
2452         rxp->cq.ccb->hw_producer_index =
2453             (u32 *)rxp->cq.ib.ib_seg_host_addr_kva;
2454         rxp->cq.ccb->i_dbell = &rxp->cq.ib.door_bell;
2455         rxp->cq.ccb->intr_type = rxp->cq.ib.intr_type;
2456         rxp->cq.ccb->intr_vector = rxp->cq.ib.intr_vector;
2457         rxp->cq.ccb->rx_coalescing_timeo =
2458             rxp->cq.ib.coalescing_timeo;
2459         rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
2460         rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
2461         rxp->cq.ccb->bnad = bna->bnad;
2462         rxp->cq.ccb->id = i;
2463 
2464         bna_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
2465             &cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[i]);
2466 
2467         if (rx->ccb_setup_cbfn)
2468             rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
2469     }
2470 
2471     rx->hds_cfg = rx_cfg->hds_config;
2472 
2473     bna_rxf_init(&rx->rxf, rx, rx_cfg, res_info);
2474 
2475     bfa_fsm_set_state(rx, bna_rx_sm_stopped);
2476 
2477     rx_mod->rid_mask |= BIT(rx->rid);
2478 
2479     return rx;
2480 }
2481 
2482 void
2483 bna_rx_destroy(struct bna_rx *rx)
2484 {
2485     struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
2486     struct bna_rxq *q0 = NULL;
2487     struct bna_rxq *q1 = NULL;
2488     struct bna_rxp *rxp;
2489     struct list_head *qe;
2490 
2491     bna_rxf_uninit(&rx->rxf);
2492 
2493     while (!list_empty(&rx->rxp_q)) {
2494         rxp = list_first_entry(&rx->rxp_q, struct bna_rxp, qe);
2495         list_del(&rxp->qe);
2496         GET_RXQS(rxp, q0, q1);
2497         if (rx->rcb_destroy_cbfn)
2498             rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
2499         q0->rcb = NULL;
2500         q0->rxp = NULL;
2501         q0->rx = NULL;
2502         bna_rxq_put(rx_mod, q0);
2503 
2504         if (q1) {
2505             if (rx->rcb_destroy_cbfn)
2506                 rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
2507             q1->rcb = NULL;
2508             q1->rxp = NULL;
2509             q1->rx = NULL;
2510             bna_rxq_put(rx_mod, q1);
2511         }
2512         rxp->rxq.slr.large = NULL;
2513         rxp->rxq.slr.small = NULL;
2514 
2515         if (rx->ccb_destroy_cbfn)
2516             rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
2517         rxp->cq.ccb = NULL;
2518         rxp->rx = NULL;
2519         bna_rxp_put(rx_mod, rxp);
2520     }
2521 
2522     list_for_each(qe, &rx_mod->rx_active_q)
2523         if (qe == &rx->qe) {
2524             list_del(&rx->qe);
2525             break;
2526         }
2527 
2528     rx_mod->rid_mask &= ~BIT(rx->rid);
2529 
2530     rx->bna = NULL;
2531     rx->priv = NULL;
2532     bna_rx_put(rx_mod, rx);
2533 }
2534 
2535 void
2536 bna_rx_enable(struct bna_rx *rx)
2537 {
2538     if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
2539         return;
2540 
2541     rx->rx_flags |= BNA_RX_F_ENABLED;
2542     if (rx->rx_flags & BNA_RX_F_ENET_STARTED)
2543         bfa_fsm_send_event(rx, RX_E_START);
2544 }
2545 
2546 void
2547 bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
2548         void (*cbfn)(void *, struct bna_rx *))
2549 {
2550     if (type == BNA_SOFT_CLEANUP) {
2551         /* h/w should not be accessed. Treat we're stopped */
2552         (*cbfn)(rx->bna->bnad, rx);
2553     } else {
2554         rx->stop_cbfn = cbfn;
2555         rx->stop_cbarg = rx->bna->bnad;
2556 
2557         rx->rx_flags &= ~BNA_RX_F_ENABLED;
2558 
2559         bfa_fsm_send_event(rx, RX_E_STOP);
2560     }
2561 }
2562 
2563 void
2564 bna_rx_cleanup_complete(struct bna_rx *rx)
2565 {
2566     bfa_fsm_send_event(rx, RX_E_CLEANUP_DONE);
2567 }
2568 
2569 void
2570 bna_rx_vlan_strip_enable(struct bna_rx *rx)
2571 {
2572     struct bna_rxf *rxf = &rx->rxf;
2573 
2574     if (rxf->vlan_strip_status == BNA_STATUS_T_DISABLED) {
2575         rxf->vlan_strip_status = BNA_STATUS_T_ENABLED;
2576         rxf->vlan_strip_pending = true;
2577         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2578     }
2579 }
2580 
2581 void
2582 bna_rx_vlan_strip_disable(struct bna_rx *rx)
2583 {
2584     struct bna_rxf *rxf = &rx->rxf;
2585 
2586     if (rxf->vlan_strip_status != BNA_STATUS_T_DISABLED) {
2587         rxf->vlan_strip_status = BNA_STATUS_T_DISABLED;
2588         rxf->vlan_strip_pending = true;
2589         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2590     }
2591 }
2592 
2593 enum bna_cb_status
2594 bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2595         enum bna_rxmode bitmask)
2596 {
2597     struct bna_rxf *rxf = &rx->rxf;
2598     int need_hw_config = 0;
2599 
2600     /* Error checks */
2601 
2602     if (is_promisc_enable(new_mode, bitmask)) {
2603         /* If promisc mode is already enabled elsewhere in the system */
2604         if ((rx->bna->promisc_rid != BFI_INVALID_RID) &&
2605             (rx->bna->promisc_rid != rxf->rx->rid))
2606             goto err_return;
2607 
2608         /* If default mode is already enabled in the system */
2609         if (rx->bna->default_mode_rid != BFI_INVALID_RID)
2610             goto err_return;
2611 
2612         /* Trying to enable promiscuous and default mode together */
2613         if (is_default_enable(new_mode, bitmask))
2614             goto err_return;
2615     }
2616 
2617     if (is_default_enable(new_mode, bitmask)) {
2618         /* If default mode is already enabled elsewhere in the system */
2619         if ((rx->bna->default_mode_rid != BFI_INVALID_RID) &&
2620             (rx->bna->default_mode_rid != rxf->rx->rid)) {
2621                 goto err_return;
2622         }
2623 
2624         /* If promiscuous mode is already enabled in the system */
2625         if (rx->bna->promisc_rid != BFI_INVALID_RID)
2626             goto err_return;
2627     }
2628 
2629     /* Process the commands */
2630 
2631     if (is_promisc_enable(new_mode, bitmask)) {
2632         if (bna_rxf_promisc_enable(rxf))
2633             need_hw_config = 1;
2634     } else if (is_promisc_disable(new_mode, bitmask)) {
2635         if (bna_rxf_promisc_disable(rxf))
2636             need_hw_config = 1;
2637     }
2638 
2639     if (is_allmulti_enable(new_mode, bitmask)) {
2640         if (bna_rxf_allmulti_enable(rxf))
2641             need_hw_config = 1;
2642     } else if (is_allmulti_disable(new_mode, bitmask)) {
2643         if (bna_rxf_allmulti_disable(rxf))
2644             need_hw_config = 1;
2645     }
2646 
2647     /* Trigger h/w if needed */
2648 
2649     if (need_hw_config) {
2650         rxf->cam_fltr_cbfn = NULL;
2651         rxf->cam_fltr_cbarg = rx->bna->bnad;
2652         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2653     }
2654 
2655     return BNA_CB_SUCCESS;
2656 
2657 err_return:
2658     return BNA_CB_FAIL;
2659 }
2660 
2661 void
2662 bna_rx_vlanfilter_enable(struct bna_rx *rx)
2663 {
2664     struct bna_rxf *rxf = &rx->rxf;
2665 
2666     if (rxf->vlan_filter_status == BNA_STATUS_T_DISABLED) {
2667         rxf->vlan_filter_status = BNA_STATUS_T_ENABLED;
2668         rxf->vlan_pending_bitmask = (u8)BFI_VLAN_BMASK_ALL;
2669         bfa_fsm_send_event(rxf, RXF_E_CONFIG);
2670     }
2671 }
2672 
2673 void
2674 bna_rx_coalescing_timeo_set(struct bna_rx *rx, int coalescing_timeo)
2675 {
2676     struct bna_rxp *rxp;
2677 
2678     list_for_each_entry(rxp, &rx->rxp_q, qe) {
2679         rxp->cq.ccb->rx_coalescing_timeo = coalescing_timeo;
2680         bna_ib_coalescing_timeo_set(&rxp->cq.ib, coalescing_timeo);
2681     }
2682 }
2683 
2684 void
2685 bna_rx_dim_reconfig(struct bna *bna, const u32 vector[][BNA_BIAS_T_MAX])
2686 {
2687     int i, j;
2688 
2689     for (i = 0; i < BNA_LOAD_T_MAX; i++)
2690         for (j = 0; j < BNA_BIAS_T_MAX; j++)
2691             bna->rx_mod.dim_vector[i][j] = vector[i][j];
2692 }
2693 
2694 void
2695 bna_rx_dim_update(struct bna_ccb *ccb)
2696 {
2697     struct bna *bna = ccb->cq->rx->bna;
2698     u32 load, bias;
2699     u32 pkt_rt, small_rt, large_rt;
2700     u8 coalescing_timeo;
2701 
2702     if ((ccb->pkt_rate.small_pkt_cnt == 0) &&
2703         (ccb->pkt_rate.large_pkt_cnt == 0))
2704         return;
2705 
2706     /* Arrive at preconfigured coalescing timeo value based on pkt rate */
2707 
2708     small_rt = ccb->pkt_rate.small_pkt_cnt;
2709     large_rt = ccb->pkt_rate.large_pkt_cnt;
2710 
2711     pkt_rt = small_rt + large_rt;
2712 
2713     if (pkt_rt < BNA_PKT_RATE_10K)
2714         load = BNA_LOAD_T_LOW_4;
2715     else if (pkt_rt < BNA_PKT_RATE_20K)
2716         load = BNA_LOAD_T_LOW_3;
2717     else if (pkt_rt < BNA_PKT_RATE_30K)
2718         load = BNA_LOAD_T_LOW_2;
2719     else if (pkt_rt < BNA_PKT_RATE_40K)
2720         load = BNA_LOAD_T_LOW_1;
2721     else if (pkt_rt < BNA_PKT_RATE_50K)
2722         load = BNA_LOAD_T_HIGH_1;
2723     else if (pkt_rt < BNA_PKT_RATE_60K)
2724         load = BNA_LOAD_T_HIGH_2;
2725     else if (pkt_rt < BNA_PKT_RATE_80K)
2726         load = BNA_LOAD_T_HIGH_3;
2727     else
2728         load = BNA_LOAD_T_HIGH_4;
2729 
2730     if (small_rt > (large_rt << 1))
2731         bias = 0;
2732     else
2733         bias = 1;
2734 
2735     ccb->pkt_rate.small_pkt_cnt = 0;
2736     ccb->pkt_rate.large_pkt_cnt = 0;
2737 
2738     coalescing_timeo = bna->rx_mod.dim_vector[load][bias];
2739     ccb->rx_coalescing_timeo = coalescing_timeo;
2740 
2741     /* Set it to IB */
2742     bna_ib_coalescing_timeo_set(&ccb->cq->ib, coalescing_timeo);
2743 }
2744 
2745 const u32 bna_napi_dim_vector[BNA_LOAD_T_MAX][BNA_BIAS_T_MAX] = {
2746     {12, 12},
2747     {6, 10},
2748     {5, 10},
2749     {4, 8},
2750     {3, 6},
2751     {3, 6},
2752     {2, 4},
2753     {1, 2},
2754 };
2755 
2756 /* TX */
2757 
2758 #define call_tx_stop_cbfn(tx)                       \
2759 do {                                    \
2760     if ((tx)->stop_cbfn) {                      \
2761         void (*cbfn)(void *, struct bna_tx *);      \
2762         void *cbarg;                        \
2763         cbfn = (tx)->stop_cbfn;                 \
2764         cbarg = (tx)->stop_cbarg;               \
2765         (tx)->stop_cbfn = NULL;                 \
2766         (tx)->stop_cbarg = NULL;                \
2767         cbfn(cbarg, (tx));                  \
2768     }                               \
2769 } while (0)
2770 
2771 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx);
2772 static void bna_bfi_tx_enet_start(struct bna_tx *tx);
2773 static void bna_tx_enet_stop(struct bna_tx *tx);
2774 
2775 enum bna_tx_event {
2776     TX_E_START          = 1,
2777     TX_E_STOP           = 2,
2778     TX_E_FAIL           = 3,
2779     TX_E_STARTED            = 4,
2780     TX_E_STOPPED            = 5,
2781     TX_E_CLEANUP_DONE       = 7,
2782     TX_E_BW_UPDATE          = 8,
2783 };
2784 
2785 bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx, enum bna_tx_event);
2786 bfa_fsm_state_decl(bna_tx, start_wait, struct bna_tx, enum bna_tx_event);
2787 bfa_fsm_state_decl(bna_tx, started, struct bna_tx, enum bna_tx_event);
2788 bfa_fsm_state_decl(bna_tx, stop_wait, struct bna_tx, enum bna_tx_event);
2789 bfa_fsm_state_decl(bna_tx, cleanup_wait, struct bna_tx,
2790             enum bna_tx_event);
2791 bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
2792             enum bna_tx_event);
2793 bfa_fsm_state_decl(bna_tx, prio_cleanup_wait, struct bna_tx,
2794             enum bna_tx_event);
2795 bfa_fsm_state_decl(bna_tx, failed, struct bna_tx, enum bna_tx_event);
2796 bfa_fsm_state_decl(bna_tx, quiesce_wait, struct bna_tx,
2797             enum bna_tx_event);
2798 
2799 static void
2800 bna_tx_sm_stopped_entry(struct bna_tx *tx)
2801 {
2802     call_tx_stop_cbfn(tx);
2803 }
2804 
2805 static void
2806 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
2807 {
2808     switch (event) {
2809     case TX_E_START:
2810         bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
2811         break;
2812 
2813     case TX_E_STOP:
2814         call_tx_stop_cbfn(tx);
2815         break;
2816 
2817     case TX_E_FAIL:
2818         /* No-op */
2819         break;
2820 
2821     case TX_E_BW_UPDATE:
2822         /* No-op */
2823         break;
2824 
2825     default:
2826         bfa_sm_fault(event);
2827     }
2828 }
2829 
2830 static void
2831 bna_tx_sm_start_wait_entry(struct bna_tx *tx)
2832 {
2833     bna_bfi_tx_enet_start(tx);
2834 }
2835 
2836 static void
2837 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event)
2838 {
2839     switch (event) {
2840     case TX_E_STOP:
2841         tx->flags &= ~BNA_TX_F_BW_UPDATED;
2842         bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2843         break;
2844 
2845     case TX_E_FAIL:
2846         tx->flags &= ~BNA_TX_F_BW_UPDATED;
2847         bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2848         break;
2849 
2850     case TX_E_STARTED:
2851         if (tx->flags & BNA_TX_F_BW_UPDATED) {
2852             tx->flags &= ~BNA_TX_F_BW_UPDATED;
2853             bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2854         } else
2855             bfa_fsm_set_state(tx, bna_tx_sm_started);
2856         break;
2857 
2858     case TX_E_BW_UPDATE:
2859         tx->flags |= BNA_TX_F_BW_UPDATED;
2860         break;
2861 
2862     default:
2863         bfa_sm_fault(event);
2864     }
2865 }
2866 
2867 static void
2868 bna_tx_sm_started_entry(struct bna_tx *tx)
2869 {
2870     struct bna_txq *txq;
2871     int is_regular = (tx->type == BNA_TX_T_REGULAR);
2872 
2873     list_for_each_entry(txq, &tx->txq_q, qe) {
2874         txq->tcb->priority = txq->priority;
2875         /* Start IB */
2876         bna_ib_start(tx->bna, &txq->ib, is_regular);
2877     }
2878     tx->tx_resume_cbfn(tx->bna->bnad, tx);
2879 }
2880 
2881 static void
2882 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
2883 {
2884     switch (event) {
2885     case TX_E_STOP:
2886         bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2887         tx->tx_stall_cbfn(tx->bna->bnad, tx);
2888         bna_tx_enet_stop(tx);
2889         break;
2890 
2891     case TX_E_FAIL:
2892         bfa_fsm_set_state(tx, bna_tx_sm_failed);
2893         tx->tx_stall_cbfn(tx->bna->bnad, tx);
2894         tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2895         break;
2896 
2897     case TX_E_BW_UPDATE:
2898         bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
2899         break;
2900 
2901     default:
2902         bfa_sm_fault(event);
2903     }
2904 }
2905 
2906 static void
2907 bna_tx_sm_stop_wait_entry(struct bna_tx *tx)
2908 {
2909 }
2910 
2911 static void
2912 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
2913 {
2914     switch (event) {
2915     case TX_E_FAIL:
2916     case TX_E_STOPPED:
2917         bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
2918         tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2919         break;
2920 
2921     case TX_E_STARTED:
2922         /**
2923          * We are here due to start_wait -> stop_wait transition on
2924          * TX_E_STOP event
2925          */
2926         bna_tx_enet_stop(tx);
2927         break;
2928 
2929     case TX_E_BW_UPDATE:
2930         /* No-op */
2931         break;
2932 
2933     default:
2934         bfa_sm_fault(event);
2935     }
2936 }
2937 
2938 static void
2939 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx)
2940 {
2941 }
2942 
2943 static void
2944 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
2945 {
2946     switch (event) {
2947     case TX_E_FAIL:
2948     case TX_E_BW_UPDATE:
2949         /* No-op */
2950         break;
2951 
2952     case TX_E_CLEANUP_DONE:
2953         bfa_fsm_set_state(tx, bna_tx_sm_stopped);
2954         break;
2955 
2956     default:
2957         bfa_sm_fault(event);
2958     }
2959 }
2960 
2961 static void
2962 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
2963 {
2964     tx->tx_stall_cbfn(tx->bna->bnad, tx);
2965     bna_tx_enet_stop(tx);
2966 }
2967 
2968 static void
2969 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
2970 {
2971     switch (event) {
2972     case TX_E_STOP:
2973         bfa_fsm_set_state(tx, bna_tx_sm_stop_wait);
2974         break;
2975 
2976     case TX_E_FAIL:
2977         bfa_fsm_set_state(tx, bna_tx_sm_failed);
2978         tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2979         break;
2980 
2981     case TX_E_STOPPED:
2982         bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait);
2983         break;
2984 
2985     case TX_E_BW_UPDATE:
2986         /* No-op */
2987         break;
2988 
2989     default:
2990         bfa_sm_fault(event);
2991     }
2992 }
2993 
2994 static void
2995 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx)
2996 {
2997     tx->tx_cleanup_cbfn(tx->bna->bnad, tx);
2998 }
2999 
3000 static void
3001 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event)
3002 {
3003     switch (event) {
3004     case TX_E_STOP:
3005         bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3006         break;
3007 
3008     case TX_E_FAIL:
3009         bfa_fsm_set_state(tx, bna_tx_sm_failed);
3010         break;
3011 
3012     case TX_E_BW_UPDATE:
3013         /* No-op */
3014         break;
3015 
3016     case TX_E_CLEANUP_DONE:
3017         bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3018         break;
3019 
3020     default:
3021         bfa_sm_fault(event);
3022     }
3023 }
3024 
3025 static void
3026 bna_tx_sm_failed_entry(struct bna_tx *tx)
3027 {
3028 }
3029 
3030 static void
3031 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event)
3032 {
3033     switch (event) {
3034     case TX_E_START:
3035         bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait);
3036         break;
3037 
3038     case TX_E_STOP:
3039         bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3040         break;
3041 
3042     case TX_E_FAIL:
3043         /* No-op */
3044         break;
3045 
3046     case TX_E_CLEANUP_DONE:
3047         bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3048         break;
3049 
3050     default:
3051         bfa_sm_fault(event);
3052     }
3053 }
3054 
3055 static void
3056 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx)
3057 {
3058 }
3059 
3060 static void
3061 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event)
3062 {
3063     switch (event) {
3064     case TX_E_STOP:
3065         bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait);
3066         break;
3067 
3068     case TX_E_FAIL:
3069         bfa_fsm_set_state(tx, bna_tx_sm_failed);
3070         break;
3071 
3072     case TX_E_CLEANUP_DONE:
3073         bfa_fsm_set_state(tx, bna_tx_sm_start_wait);
3074         break;
3075 
3076     case TX_E_BW_UPDATE:
3077         /* No-op */
3078         break;
3079 
3080     default:
3081         bfa_sm_fault(event);
3082     }
3083 }
3084 
3085 static void
3086 bna_bfi_tx_enet_start(struct bna_tx *tx)
3087 {
3088     struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req;
3089     struct bna_txq *txq = NULL;
3090     int i;
3091 
3092     bfi_msgq_mhdr_set(cfg_req->mh, BFI_MC_ENET,
3093         BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid);
3094     cfg_req->mh.num_entries = htons(
3095         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_tx_cfg_req)));
3096 
3097     cfg_req->num_queues = tx->num_txq;
3098     for (i = 0; i < tx->num_txq; i++) {
3099         txq = txq ? list_next_entry(txq, qe)
3100             : list_first_entry(&tx->txq_q, struct bna_txq, qe);
3101         bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt);
3102         cfg_req->q_cfg[i].q.priority = txq->priority;
3103 
3104         cfg_req->q_cfg[i].ib.index_addr.a32.addr_lo =
3105             txq->ib.ib_seg_host_addr.lsb;
3106         cfg_req->q_cfg[i].ib.index_addr.a32.addr_hi =
3107             txq->ib.ib_seg_host_addr.msb;
3108         cfg_req->q_cfg[i].ib.intr.msix_index =
3109             htons((u16)txq->ib.intr_vector);
3110     }
3111 
3112     cfg_req->ib_cfg.int_pkt_dma = BNA_STATUS_T_ENABLED;
3113     cfg_req->ib_cfg.int_enabled = BNA_STATUS_T_ENABLED;
3114     cfg_req->ib_cfg.int_pkt_enabled = BNA_STATUS_T_DISABLED;
3115     cfg_req->ib_cfg.continuous_coalescing = BNA_STATUS_T_ENABLED;
3116     cfg_req->ib_cfg.msix = (txq->ib.intr_type == BNA_INTR_T_MSIX)
3117                 ? BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
3118     cfg_req->ib_cfg.coalescing_timeout =
3119             htonl((u32)txq->ib.coalescing_timeo);
3120     cfg_req->ib_cfg.inter_pkt_timeout =
3121             htonl((u32)txq->ib.interpkt_timeo);
3122     cfg_req->ib_cfg.inter_pkt_count = (u8)txq->ib.interpkt_count;
3123 
3124     cfg_req->tx_cfg.vlan_mode = BFI_ENET_TX_VLAN_WI;
3125     cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id);
3126     cfg_req->tx_cfg.admit_tagged_frame = BNA_STATUS_T_ENABLED;
3127     cfg_req->tx_cfg.apply_vlan_filter = BNA_STATUS_T_DISABLED;
3128 
3129     bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL,
3130         sizeof(struct bfi_enet_tx_cfg_req), &cfg_req->mh);
3131     bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3132 }
3133 
3134 static void
3135 bna_bfi_tx_enet_stop(struct bna_tx *tx)
3136 {
3137     struct bfi_enet_req *req = &tx->bfi_enet_cmd.req;
3138 
3139     bfi_msgq_mhdr_set(req->mh, BFI_MC_ENET,
3140         BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid);
3141     req->mh.num_entries = htons(
3142         bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_req)));
3143     bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req),
3144         &req->mh);
3145     bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd);
3146 }
3147 
3148 static void
3149 bna_tx_enet_stop(struct bna_tx *tx)
3150 {
3151     struct bna_txq *txq;
3152 
3153     /* Stop IB */
3154     list_for_each_entry(txq, &tx->txq_q, qe)
3155         bna_ib_stop(tx->bna, &txq->ib);
3156 
3157     bna_bfi_tx_enet_stop(tx);
3158 }
3159 
3160 static void
3161 bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
3162         struct bna_mem_descr *qpt_mem,
3163         struct bna_mem_descr *swqpt_mem,
3164         struct bna_mem_descr *page_mem)
3165 {
3166     u8 *kva;
3167     u64 dma;
3168     struct bna_dma_addr bna_dma;
3169     int i;
3170 
3171     txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
3172     txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
3173     txq->qpt.kv_qpt_ptr = qpt_mem->kva;
3174     txq->qpt.page_count = page_count;
3175     txq->qpt.page_size = page_size;
3176 
3177     txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
3178     txq->tcb->sw_q = page_mem->kva;
3179 
3180     kva = page_mem->kva;
3181     BNA_GET_DMA_ADDR(&page_mem->dma, dma);
3182 
3183     for (i = 0; i < page_count; i++) {
3184         txq->tcb->sw_qpt[i] = kva;
3185         kva += PAGE_SIZE;
3186 
3187         BNA_SET_DMA_ADDR(dma, &bna_dma);
3188         ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
3189             bna_dma.lsb;
3190         ((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
3191             bna_dma.msb;
3192         dma += PAGE_SIZE;
3193     }
3194 }
3195 
3196 static struct bna_tx *
3197 bna_tx_get(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3198 {
3199     struct bna_tx *tx = NULL;
3200 
3201     if (list_empty(&tx_mod->tx_free_q))
3202         return NULL;
3203     if (type == BNA_TX_T_REGULAR)
3204         tx = list_first_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
3205     else
3206         tx = list_last_entry(&tx_mod->tx_free_q, struct bna_tx, qe);
3207     list_del(&tx->qe);
3208     tx->type = type;
3209 
3210     return tx;
3211 }
3212 
3213 static void
3214 bna_tx_free(struct bna_tx *tx)
3215 {
3216     struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
3217     struct bna_txq *txq;
3218     struct list_head *qe;
3219 
3220     while (!list_empty(&tx->txq_q)) {
3221         txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
3222         txq->tcb = NULL;
3223         txq->tx = NULL;
3224         list_move_tail(&txq->qe, &tx_mod->txq_free_q);
3225     }
3226 
3227     list_for_each(qe, &tx_mod->tx_active_q) {
3228         if (qe == &tx->qe) {
3229             list_del(&tx->qe);
3230             break;
3231         }
3232     }
3233 
3234     tx->bna = NULL;
3235     tx->priv = NULL;
3236 
3237     list_for_each_prev(qe, &tx_mod->tx_free_q)
3238         if (((struct bna_tx *)qe)->rid < tx->rid)
3239             break;
3240 
3241     list_add(&tx->qe, qe);
3242 }
3243 
3244 static void
3245 bna_tx_start(struct bna_tx *tx)
3246 {
3247     tx->flags |= BNA_TX_F_ENET_STARTED;
3248     if (tx->flags & BNA_TX_F_ENABLED)
3249         bfa_fsm_send_event(tx, TX_E_START);
3250 }
3251 
3252 static void
3253 bna_tx_stop(struct bna_tx *tx)
3254 {
3255     tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
3256     tx->stop_cbarg = &tx->bna->tx_mod;
3257 
3258     tx->flags &= ~BNA_TX_F_ENET_STARTED;
3259     bfa_fsm_send_event(tx, TX_E_STOP);
3260 }
3261 
3262 static void
3263 bna_tx_fail(struct bna_tx *tx)
3264 {
3265     tx->flags &= ~BNA_TX_F_ENET_STARTED;
3266     bfa_fsm_send_event(tx, TX_E_FAIL);
3267 }
3268 
3269 void
3270 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3271 {
3272     struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp;
3273     struct bna_txq *txq = NULL;
3274     int i;
3275 
3276     bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp,
3277         sizeof(struct bfi_enet_tx_cfg_rsp));
3278 
3279     tx->hw_id = cfg_rsp->hw_id;
3280 
3281     for (i = 0, txq = list_first_entry(&tx->txq_q, struct bna_txq, qe);
3282          i < tx->num_txq; i++, txq = list_next_entry(txq, qe)) {
3283         /* Setup doorbells */
3284         txq->tcb->i_dbell->doorbell_addr =
3285             tx->bna->pcidev.pci_bar_kva
3286             + ntohl(cfg_rsp->q_handles[i].i_dbell);
3287         txq->tcb->q_dbell =
3288             tx->bna->pcidev.pci_bar_kva
3289             + ntohl(cfg_rsp->q_handles[i].q_dbell);
3290         txq->hw_id = cfg_rsp->q_handles[i].hw_qid;
3291 
3292         /* Initialize producer/consumer indexes */
3293         (*txq->tcb->hw_consumer_index) = 0;
3294         txq->tcb->producer_index = txq->tcb->consumer_index = 0;
3295     }
3296 
3297     bfa_fsm_send_event(tx, TX_E_STARTED);
3298 }
3299 
3300 void
3301 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr)
3302 {
3303     bfa_fsm_send_event(tx, TX_E_STOPPED);
3304 }
3305 
3306 void
3307 bna_bfi_bw_update_aen(struct bna_tx_mod *tx_mod)
3308 {
3309     struct bna_tx *tx;
3310 
3311     list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
3312         bfa_fsm_send_event(tx, TX_E_BW_UPDATE);
3313 }
3314 
3315 void
3316 bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
3317 {
3318     u32 q_size;
3319     u32 page_count;
3320     struct bna_mem_info *mem_info;
3321 
3322     res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
3323     mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
3324     mem_info->mem_type = BNA_MEM_T_KVA;
3325     mem_info->len = sizeof(struct bna_tcb);
3326     mem_info->num = num_txq;
3327 
3328     q_size = txq_depth * BFI_TXQ_WI_SIZE;
3329     q_size = ALIGN(q_size, PAGE_SIZE);
3330     page_count = q_size >> PAGE_SHIFT;
3331 
3332     res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
3333     mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
3334     mem_info->mem_type = BNA_MEM_T_DMA;
3335     mem_info->len = page_count * sizeof(struct bna_dma_addr);
3336     mem_info->num = num_txq;
3337 
3338     res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
3339     mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
3340     mem_info->mem_type = BNA_MEM_T_KVA;
3341     mem_info->len = page_count * sizeof(void *);
3342     mem_info->num = num_txq;
3343 
3344     res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
3345     mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
3346     mem_info->mem_type = BNA_MEM_T_DMA;
3347     mem_info->len = PAGE_SIZE * page_count;
3348     mem_info->num = num_txq;
3349 
3350     res_info[BNA_TX_RES_MEM_T_IBIDX].res_type = BNA_RES_T_MEM;
3351     mem_info = &res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info;
3352     mem_info->mem_type = BNA_MEM_T_DMA;
3353     mem_info->len = BFI_IBIDX_SIZE;
3354     mem_info->num = num_txq;
3355 
3356     res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
3357     res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
3358             BNA_INTR_T_MSIX;
3359     res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
3360 }
3361 
3362 struct bna_tx *
3363 bna_tx_create(struct bna *bna, struct bnad *bnad,
3364         struct bna_tx_config *tx_cfg,
3365         const struct bna_tx_event_cbfn *tx_cbfn,
3366         struct bna_res_info *res_info, void *priv)
3367 {
3368     struct bna_intr_info *intr_info;
3369     struct bna_tx_mod *tx_mod = &bna->tx_mod;
3370     struct bna_tx *tx;
3371     struct bna_txq *txq;
3372     int page_count;
3373     int i;
3374 
3375     intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
3376     page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len) /
3377                     PAGE_SIZE;
3378 
3379     /**
3380      * Get resources
3381      */
3382 
3383     if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
3384         return NULL;
3385 
3386     /* Tx */
3387 
3388     tx = bna_tx_get(tx_mod, tx_cfg->tx_type);
3389     if (!tx)
3390         return NULL;
3391     tx->bna = bna;
3392     tx->priv = priv;
3393 
3394     /* TxQs */
3395 
3396     INIT_LIST_HEAD(&tx->txq_q);
3397     for (i = 0; i < tx_cfg->num_txq; i++) {
3398         if (list_empty(&tx_mod->txq_free_q))
3399             goto err_return;
3400 
3401         txq = list_first_entry(&tx_mod->txq_free_q, struct bna_txq, qe);
3402         list_move_tail(&txq->qe, &tx->txq_q);
3403         txq->tx = tx;
3404     }
3405 
3406     /*
3407      * Initialize
3408      */
3409 
3410     /* Tx */
3411 
3412     tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
3413     tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
3414     /* Following callbacks are mandatory */
3415     tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
3416     tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
3417     tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
3418 
3419     list_add_tail(&tx->qe, &tx_mod->tx_active_q);
3420 
3421     tx->num_txq = tx_cfg->num_txq;
3422 
3423     tx->flags = 0;
3424     if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) {
3425         switch (tx->type) {
3426         case BNA_TX_T_REGULAR:
3427             if (!(tx->bna->tx_mod.flags &
3428                 BNA_TX_MOD_F_ENET_LOOPBACK))
3429                 tx->flags |= BNA_TX_F_ENET_STARTED;
3430             break;
3431         case BNA_TX_T_LOOPBACK:
3432             if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK)
3433                 tx->flags |= BNA_TX_F_ENET_STARTED;
3434             break;
3435         }
3436     }
3437 
3438     /* TxQ */
3439 
3440     i = 0;
3441     list_for_each_entry(txq, &tx->txq_q, qe) {
3442         txq->tcb = (struct bna_tcb *)
3443         res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
3444         txq->tx_packets = 0;
3445         txq->tx_bytes = 0;
3446 
3447         /* IB */
3448         txq->ib.ib_seg_host_addr.lsb =
3449         res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
3450         txq->ib.ib_seg_host_addr.msb =
3451         res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
3452         txq->ib.ib_seg_host_addr_kva =
3453         res_info[BNA_TX_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
3454         txq->ib.intr_type = intr_info->intr_type;
3455         txq->ib.intr_vector = (intr_info->num == 1) ?
3456                     intr_info->idl[0].vector :
3457                     intr_info->idl[i].vector;
3458         if (intr_info->intr_type == BNA_INTR_T_INTX)
3459             txq->ib.intr_vector = BIT(txq->ib.intr_vector);
3460         txq->ib.coalescing_timeo = tx_cfg->coalescing_timeo;
3461         txq->ib.interpkt_timeo = BFI_TX_INTERPKT_TIMEO;
3462         txq->ib.interpkt_count = BFI_TX_INTERPKT_COUNT;
3463 
3464         /* TCB */
3465 
3466         txq->tcb->q_depth = tx_cfg->txq_depth;
3467         txq->tcb->unmap_q = (void *)
3468         res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
3469         txq->tcb->hw_consumer_index =
3470             (u32 *)txq->ib.ib_seg_host_addr_kva;
3471         txq->tcb->i_dbell = &txq->ib.door_bell;
3472         txq->tcb->intr_type = txq->ib.intr_type;
3473         txq->tcb->intr_vector = txq->ib.intr_vector;
3474         txq->tcb->txq = txq;
3475         txq->tcb->bnad = bnad;
3476         txq->tcb->id = i;
3477 
3478         /* QPT, SWQPT, Pages */
3479         bna_txq_qpt_setup(txq, page_count, PAGE_SIZE,
3480             &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
3481             &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
3482             &res_info[BNA_TX_RES_MEM_T_PAGE].
3483                   res_u.mem_info.mdl[i]);
3484 
3485         /* Callback to bnad for setting up TCB */
3486         if (tx->tcb_setup_cbfn)
3487             (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
3488 
3489         if (tx_cfg->num_txq == BFI_TX_MAX_PRIO)
3490             txq->priority = txq->tcb->id;
3491         else
3492             txq->priority = tx_mod->default_prio;
3493 
3494         i++;
3495     }
3496 
3497     tx->txf_vlan_id = 0;
3498 
3499     bfa_fsm_set_state(tx, bna_tx_sm_stopped);
3500 
3501     tx_mod->rid_mask |= BIT(tx->rid);
3502 
3503     return tx;
3504 
3505 err_return:
3506     bna_tx_free(tx);
3507     return NULL;
3508 }
3509 
3510 void
3511 bna_tx_destroy(struct bna_tx *tx)
3512 {
3513     struct bna_txq *txq;
3514 
3515     list_for_each_entry(txq, &tx->txq_q, qe)
3516         if (tx->tcb_destroy_cbfn)
3517             (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
3518 
3519     tx->bna->tx_mod.rid_mask &= ~BIT(tx->rid);
3520     bna_tx_free(tx);
3521 }
3522 
3523 void
3524 bna_tx_enable(struct bna_tx *tx)
3525 {
3526     if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
3527         return;
3528 
3529     tx->flags |= BNA_TX_F_ENABLED;
3530 
3531     if (tx->flags & BNA_TX_F_ENET_STARTED)
3532         bfa_fsm_send_event(tx, TX_E_START);
3533 }
3534 
3535 void
3536 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
3537         void (*cbfn)(void *, struct bna_tx *))
3538 {
3539     if (type == BNA_SOFT_CLEANUP) {
3540         (*cbfn)(tx->bna->bnad, tx);
3541         return;
3542     }
3543 
3544     tx->stop_cbfn = cbfn;
3545     tx->stop_cbarg = tx->bna->bnad;
3546 
3547     tx->flags &= ~BNA_TX_F_ENABLED;
3548 
3549     bfa_fsm_send_event(tx, TX_E_STOP);
3550 }
3551 
3552 void
3553 bna_tx_cleanup_complete(struct bna_tx *tx)
3554 {
3555     bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE);
3556 }
3557 
3558 static void
3559 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx)
3560 {
3561     struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3562 
3563     bfa_wc_down(&tx_mod->tx_stop_wc);
3564 }
3565 
3566 static void
3567 bna_tx_mod_cb_tx_stopped_all(void *arg)
3568 {
3569     struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
3570 
3571     if (tx_mod->stop_cbfn)
3572         tx_mod->stop_cbfn(&tx_mod->bna->enet);
3573     tx_mod->stop_cbfn = NULL;
3574 }
3575 
3576 void
3577 bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
3578         struct bna_res_info *res_info)
3579 {
3580     int i;
3581 
3582     tx_mod->bna = bna;
3583     tx_mod->flags = 0;
3584 
3585     tx_mod->tx = (struct bna_tx *)
3586         res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
3587     tx_mod->txq = (struct bna_txq *)
3588         res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
3589 
3590     INIT_LIST_HEAD(&tx_mod->tx_free_q);
3591     INIT_LIST_HEAD(&tx_mod->tx_active_q);
3592 
3593     INIT_LIST_HEAD(&tx_mod->txq_free_q);
3594 
3595     for (i = 0; i < bna->ioceth.attr.num_txq; i++) {
3596         tx_mod->tx[i].rid = i;
3597         list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
3598         list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
3599     }
3600 
3601     tx_mod->prio_map = BFI_TX_PRIO_MAP_ALL;
3602     tx_mod->default_prio = 0;
3603     tx_mod->iscsi_over_cee = BNA_STATUS_T_DISABLED;
3604     tx_mod->iscsi_prio = -1;
3605 }
3606 
3607 void
3608 bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
3609 {
3610     tx_mod->bna = NULL;
3611 }
3612 
3613 void
3614 bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3615 {
3616     struct bna_tx *tx;
3617 
3618     tx_mod->flags |= BNA_TX_MOD_F_ENET_STARTED;
3619     if (type == BNA_TX_T_LOOPBACK)
3620         tx_mod->flags |= BNA_TX_MOD_F_ENET_LOOPBACK;
3621 
3622     list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
3623         if (tx->type == type)
3624             bna_tx_start(tx);
3625 }
3626 
3627 void
3628 bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
3629 {
3630     struct bna_tx *tx;
3631 
3632     tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3633     tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3634 
3635     tx_mod->stop_cbfn = bna_enet_cb_tx_stopped;
3636 
3637     bfa_wc_init(&tx_mod->tx_stop_wc, bna_tx_mod_cb_tx_stopped_all, tx_mod);
3638 
3639     list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
3640         if (tx->type == type) {
3641             bfa_wc_up(&tx_mod->tx_stop_wc);
3642             bna_tx_stop(tx);
3643         }
3644 
3645     bfa_wc_wait(&tx_mod->tx_stop_wc);
3646 }
3647 
3648 void
3649 bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
3650 {
3651     struct bna_tx *tx;
3652 
3653     tx_mod->flags &= ~BNA_TX_MOD_F_ENET_STARTED;
3654     tx_mod->flags &= ~BNA_TX_MOD_F_ENET_LOOPBACK;
3655 
3656     list_for_each_entry(tx, &tx_mod->tx_active_q, qe)
3657         bna_tx_fail(tx);
3658 }
3659 
3660 void
3661 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo)
3662 {
3663     struct bna_txq *txq;
3664 
3665     list_for_each_entry(txq, &tx->txq_q, qe)
3666         bna_ib_coalescing_timeo_set(&txq->ib, coalescing_timeo);
3667 }