0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include "bna.h"
0012
0013 static inline int
0014 ethport_can_be_up(struct bna_ethport *ethport)
0015 {
0016 int ready = 0;
0017 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
0018 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
0019 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
0020 (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
0021 else
0022 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
0023 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
0024 !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
0025 return ready;
0026 }
0027
0028 #define ethport_is_up ethport_can_be_up
0029
0030 enum bna_ethport_event {
0031 ETHPORT_E_START = 1,
0032 ETHPORT_E_STOP = 2,
0033 ETHPORT_E_FAIL = 3,
0034 ETHPORT_E_UP = 4,
0035 ETHPORT_E_DOWN = 5,
0036 ETHPORT_E_FWRESP_UP_OK = 6,
0037 ETHPORT_E_FWRESP_DOWN = 7,
0038 ETHPORT_E_FWRESP_UP_FAIL = 8,
0039 };
0040
0041 enum bna_enet_event {
0042 ENET_E_START = 1,
0043 ENET_E_STOP = 2,
0044 ENET_E_FAIL = 3,
0045 ENET_E_PAUSE_CFG = 4,
0046 ENET_E_MTU_CFG = 5,
0047 ENET_E_FWRESP_PAUSE = 6,
0048 ENET_E_CHLD_STOPPED = 7,
0049 };
0050
0051 enum bna_ioceth_event {
0052 IOCETH_E_ENABLE = 1,
0053 IOCETH_E_DISABLE = 2,
0054 IOCETH_E_IOC_RESET = 3,
0055 IOCETH_E_IOC_FAILED = 4,
0056 IOCETH_E_IOC_READY = 5,
0057 IOCETH_E_ENET_ATTR_RESP = 6,
0058 IOCETH_E_ENET_STOPPED = 7,
0059 IOCETH_E_IOC_DISABLED = 8,
0060 };
0061
0062 #define bna_stats_copy(_name, _type) \
0063 do { \
0064 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
0065 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
0066 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
0067 for (i = 0; i < count; i++) \
0068 stats_dst[i] = be64_to_cpu(stats_src[i]); \
0069 } while (0) \
0070
0071
0072
0073
0074
0075 static void
0076 bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
0077 struct bfi_msgq_mhdr *msghdr)
0078 {
0079 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
0080
0081 if (ethport_can_be_up(ethport))
0082 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
0083 }
0084
0085 static void
0086 bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
0087 struct bfi_msgq_mhdr *msghdr)
0088 {
0089 int ethport_up = ethport_is_up(ethport);
0090
0091 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
0092
0093 if (ethport_up)
0094 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
0095 }
0096
0097 static void
0098 bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
0099 struct bfi_msgq_mhdr *msghdr)
0100 {
0101 struct bfi_enet_enable_req *admin_req =
0102 ðport->bfi_enet_cmd.admin_req;
0103 struct bfi_enet_rsp *rsp =
0104 container_of(msghdr, struct bfi_enet_rsp, mh);
0105
0106 switch (admin_req->enable) {
0107 case BNA_STATUS_T_ENABLED:
0108 if (rsp->error == BFI_ENET_CMD_OK)
0109 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
0110 else {
0111 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
0112 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
0113 }
0114 break;
0115
0116 case BNA_STATUS_T_DISABLED:
0117 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
0118 ethport->link_status = BNA_LINK_DOWN;
0119 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
0120 break;
0121 }
0122 }
0123
0124 static void
0125 bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
0126 struct bfi_msgq_mhdr *msghdr)
0127 {
0128 struct bfi_enet_diag_lb_req *diag_lb_req =
0129 ðport->bfi_enet_cmd.lpbk_req;
0130 struct bfi_enet_rsp *rsp =
0131 container_of(msghdr, struct bfi_enet_rsp, mh);
0132
0133 switch (diag_lb_req->enable) {
0134 case BNA_STATUS_T_ENABLED:
0135 if (rsp->error == BFI_ENET_CMD_OK)
0136 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
0137 else {
0138 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
0139 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
0140 }
0141 break;
0142
0143 case BNA_STATUS_T_DISABLED:
0144 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
0145 break;
0146 }
0147 }
0148
0149 static void
0150 bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
0151 {
0152 bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
0153 }
0154
0155 static void
0156 bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
0157 struct bfi_msgq_mhdr *msghdr)
0158 {
0159 struct bfi_enet_attr_rsp *rsp =
0160 container_of(msghdr, struct bfi_enet_attr_rsp, mh);
0161
0162
0163
0164
0165
0166 if (!ioceth->attr.fw_query_complete) {
0167 ioceth->attr.num_txq = ntohl(rsp->max_cfg);
0168 ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
0169 ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
0170 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
0171 ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
0172 ioceth->attr.fw_query_complete = true;
0173 }
0174
0175 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
0176 }
0177
0178 static void
0179 bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
0180 {
0181 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
0182 u64 *stats_src;
0183 u64 *stats_dst;
0184 u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
0185 u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
0186 int count;
0187 int i;
0188
0189 bna_stats_copy(mac, mac);
0190 bna_stats_copy(bpc, bpc);
0191 bna_stats_copy(rad, rad);
0192 bna_stats_copy(rlb, rad);
0193 bna_stats_copy(fc_rx, fc_rx);
0194 bna_stats_copy(fc_tx, fc_tx);
0195
0196 stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
0197
0198
0199 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
0200 stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
0201 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
0202 if (rx_enet_mask & BIT(i)) {
0203 int k;
0204 count = sizeof(struct bfi_enet_stats_rxf) /
0205 sizeof(u64);
0206 for (k = 0; k < count; k++) {
0207 stats_dst[k] = be64_to_cpu(*stats_src);
0208 stats_src++;
0209 }
0210 }
0211 }
0212
0213
0214 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
0215 stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
0216 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
0217 if (tx_enet_mask & BIT(i)) {
0218 int k;
0219 count = sizeof(struct bfi_enet_stats_txf) /
0220 sizeof(u64);
0221 for (k = 0; k < count; k++) {
0222 stats_dst[k] = be64_to_cpu(*stats_src);
0223 stats_src++;
0224 }
0225 }
0226 }
0227
0228 bna->stats_mod.stats_get_busy = false;
0229 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
0230 }
0231
0232 static void
0233 bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
0234 struct bfi_msgq_mhdr *msghdr)
0235 {
0236 ethport->link_status = BNA_LINK_UP;
0237
0238
0239 ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
0240 }
0241
0242 static void
0243 bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
0244 struct bfi_msgq_mhdr *msghdr)
0245 {
0246 ethport->link_status = BNA_LINK_DOWN;
0247
0248
0249 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
0250 }
0251
0252 static void
0253 bna_err_handler(struct bna *bna, u32 intr_status)
0254 {
0255 if (BNA_IS_HALT_INTR(bna, intr_status))
0256 bna_halt_clear(bna);
0257
0258 bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
0259 }
0260
0261 void
0262 bna_mbox_handler(struct bna *bna, u32 intr_status)
0263 {
0264 if (BNA_IS_ERR_INTR(bna, intr_status)) {
0265 bna_err_handler(bna, intr_status);
0266 return;
0267 }
0268 if (BNA_IS_MBOX_INTR(bna, intr_status))
0269 bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
0270 }
0271
0272 static void
0273 bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
0274 {
0275 struct bna *bna = (struct bna *)arg;
0276 struct bna_tx *tx;
0277 struct bna_rx *rx;
0278
0279 switch (msghdr->msg_id) {
0280 case BFI_ENET_I2H_RX_CFG_SET_RSP:
0281 bna_rx_from_rid(bna, msghdr->enet_id, rx);
0282 if (rx)
0283 bna_bfi_rx_enet_start_rsp(rx, msghdr);
0284 break;
0285
0286 case BFI_ENET_I2H_RX_CFG_CLR_RSP:
0287 bna_rx_from_rid(bna, msghdr->enet_id, rx);
0288 if (rx)
0289 bna_bfi_rx_enet_stop_rsp(rx, msghdr);
0290 break;
0291
0292 case BFI_ENET_I2H_RIT_CFG_RSP:
0293 case BFI_ENET_I2H_RSS_CFG_RSP:
0294 case BFI_ENET_I2H_RSS_ENABLE_RSP:
0295 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
0296 case BFI_ENET_I2H_RX_DEFAULT_RSP:
0297 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
0298 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
0299 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
0300 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
0301 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
0302 case BFI_ENET_I2H_RX_VLAN_SET_RSP:
0303 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
0304 bna_rx_from_rid(bna, msghdr->enet_id, rx);
0305 if (rx)
0306 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
0307 break;
0308
0309 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
0310 bna_rx_from_rid(bna, msghdr->enet_id, rx);
0311 if (rx)
0312 bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
0313 break;
0314
0315 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
0316 bna_rx_from_rid(bna, msghdr->enet_id, rx);
0317 if (rx)
0318 bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
0319 break;
0320
0321 case BFI_ENET_I2H_TX_CFG_SET_RSP:
0322 bna_tx_from_rid(bna, msghdr->enet_id, tx);
0323 if (tx)
0324 bna_bfi_tx_enet_start_rsp(tx, msghdr);
0325 break;
0326
0327 case BFI_ENET_I2H_TX_CFG_CLR_RSP:
0328 bna_tx_from_rid(bna, msghdr->enet_id, tx);
0329 if (tx)
0330 bna_bfi_tx_enet_stop_rsp(tx, msghdr);
0331 break;
0332
0333 case BFI_ENET_I2H_PORT_ADMIN_RSP:
0334 bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
0335 break;
0336
0337 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
0338 bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
0339 break;
0340
0341 case BFI_ENET_I2H_SET_PAUSE_RSP:
0342 bna_bfi_pause_set_rsp(&bna->enet, msghdr);
0343 break;
0344
0345 case BFI_ENET_I2H_GET_ATTR_RSP:
0346 bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
0347 break;
0348
0349 case BFI_ENET_I2H_STATS_GET_RSP:
0350 bna_bfi_stats_get_rsp(bna, msghdr);
0351 break;
0352
0353 case BFI_ENET_I2H_STATS_CLR_RSP:
0354
0355 break;
0356
0357 case BFI_ENET_I2H_LINK_UP_AEN:
0358 bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
0359 break;
0360
0361 case BFI_ENET_I2H_LINK_DOWN_AEN:
0362 bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
0363 break;
0364
0365 case BFI_ENET_I2H_PORT_ENABLE_AEN:
0366 bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
0367 break;
0368
0369 case BFI_ENET_I2H_PORT_DISABLE_AEN:
0370 bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
0371 break;
0372
0373 case BFI_ENET_I2H_BW_UPDATE_AEN:
0374 bna_bfi_bw_update_aen(&bna->tx_mod);
0375 break;
0376
0377 default:
0378 break;
0379 }
0380 }
0381
0382
0383
0384 #define call_ethport_stop_cbfn(_ethport) \
0385 do { \
0386 if ((_ethport)->stop_cbfn) { \
0387 void (*cbfn)(struct bna_enet *); \
0388 cbfn = (_ethport)->stop_cbfn; \
0389 (_ethport)->stop_cbfn = NULL; \
0390 cbfn(&(_ethport)->bna->enet); \
0391 } \
0392 } while (0)
0393
0394 #define call_ethport_adminup_cbfn(ethport, status) \
0395 do { \
0396 if ((ethport)->adminup_cbfn) { \
0397 void (*cbfn)(struct bnad *, enum bna_cb_status); \
0398 cbfn = (ethport)->adminup_cbfn; \
0399 (ethport)->adminup_cbfn = NULL; \
0400 cbfn((ethport)->bna->bnad, status); \
0401 } \
0402 } while (0)
0403
0404 static void
0405 bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
0406 {
0407 struct bfi_enet_enable_req *admin_up_req =
0408 ðport->bfi_enet_cmd.admin_req;
0409
0410 bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
0411 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
0412 admin_up_req->mh.num_entries = htons(
0413 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
0414 admin_up_req->enable = BNA_STATUS_T_ENABLED;
0415
0416 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
0417 sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
0418 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
0419 }
0420
0421 static void
0422 bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
0423 {
0424 struct bfi_enet_enable_req *admin_down_req =
0425 ðport->bfi_enet_cmd.admin_req;
0426
0427 bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
0428 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
0429 admin_down_req->mh.num_entries = htons(
0430 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
0431 admin_down_req->enable = BNA_STATUS_T_DISABLED;
0432
0433 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
0434 sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
0435 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
0436 }
0437
0438 static void
0439 bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
0440 {
0441 struct bfi_enet_diag_lb_req *lpbk_up_req =
0442 ðport->bfi_enet_cmd.lpbk_req;
0443
0444 bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
0445 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
0446 lpbk_up_req->mh.num_entries = htons(
0447 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
0448 lpbk_up_req->mode = (ethport->bna->enet.type ==
0449 BNA_ENET_T_LOOPBACK_INTERNAL) ?
0450 BFI_ENET_DIAG_LB_OPMODE_EXT :
0451 BFI_ENET_DIAG_LB_OPMODE_CBL;
0452 lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
0453
0454 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
0455 sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
0456 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
0457 }
0458
0459 static void
0460 bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
0461 {
0462 struct bfi_enet_diag_lb_req *lpbk_down_req =
0463 ðport->bfi_enet_cmd.lpbk_req;
0464
0465 bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
0466 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
0467 lpbk_down_req->mh.num_entries = htons(
0468 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
0469 lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
0470
0471 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
0472 sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
0473 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
0474 }
0475
0476 static void
0477 bna_bfi_ethport_up(struct bna_ethport *ethport)
0478 {
0479 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
0480 bna_bfi_ethport_admin_up(ethport);
0481 else
0482 bna_bfi_ethport_lpbk_up(ethport);
0483 }
0484
0485 static void
0486 bna_bfi_ethport_down(struct bna_ethport *ethport)
0487 {
0488 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
0489 bna_bfi_ethport_admin_down(ethport);
0490 else
0491 bna_bfi_ethport_lpbk_down(ethport);
0492 }
0493
0494 bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
0495 enum bna_ethport_event);
0496 bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
0497 enum bna_ethport_event);
0498 bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
0499 enum bna_ethport_event);
0500 bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
0501 enum bna_ethport_event);
0502 bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
0503 enum bna_ethport_event);
0504 bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
0505 enum bna_ethport_event);
0506
0507 static void
0508 bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
0509 {
0510 call_ethport_stop_cbfn(ethport);
0511 }
0512
0513 static void
0514 bna_ethport_sm_stopped(struct bna_ethport *ethport,
0515 enum bna_ethport_event event)
0516 {
0517 switch (event) {
0518 case ETHPORT_E_START:
0519 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
0520 break;
0521
0522 case ETHPORT_E_STOP:
0523 call_ethport_stop_cbfn(ethport);
0524 break;
0525
0526 case ETHPORT_E_FAIL:
0527
0528 break;
0529
0530 case ETHPORT_E_DOWN:
0531
0532
0533 break;
0534
0535 default:
0536 bfa_sm_fault(event);
0537 }
0538 }
0539
0540 static void
0541 bna_ethport_sm_down_entry(struct bna_ethport *ethport)
0542 {
0543 }
0544
0545 static void
0546 bna_ethport_sm_down(struct bna_ethport *ethport,
0547 enum bna_ethport_event event)
0548 {
0549 switch (event) {
0550 case ETHPORT_E_STOP:
0551 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
0552 break;
0553
0554 case ETHPORT_E_FAIL:
0555 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
0556 break;
0557
0558 case ETHPORT_E_UP:
0559 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
0560 bna_bfi_ethport_up(ethport);
0561 break;
0562
0563 default:
0564 bfa_sm_fault(event);
0565 }
0566 }
0567
0568 static void
0569 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
0570 {
0571 }
0572
0573 static void
0574 bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
0575 enum bna_ethport_event event)
0576 {
0577 switch (event) {
0578 case ETHPORT_E_STOP:
0579 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
0580 break;
0581
0582 case ETHPORT_E_FAIL:
0583 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
0584 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
0585 break;
0586
0587 case ETHPORT_E_DOWN:
0588 call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
0589 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
0590 break;
0591
0592 case ETHPORT_E_FWRESP_UP_OK:
0593 call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
0594 bfa_fsm_set_state(ethport, bna_ethport_sm_up);
0595 break;
0596
0597 case ETHPORT_E_FWRESP_UP_FAIL:
0598 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
0599 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
0600 break;
0601
0602 case ETHPORT_E_FWRESP_DOWN:
0603
0604 bna_bfi_ethport_up(ethport);
0605 break;
0606
0607 default:
0608 bfa_sm_fault(event);
0609 }
0610 }
0611
0612 static void
0613 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
0614 {
0615
0616
0617
0618
0619
0620 }
0621
0622 static void
0623 bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
0624 enum bna_ethport_event event)
0625 {
0626 switch (event) {
0627 case ETHPORT_E_STOP:
0628 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
0629 break;
0630
0631 case ETHPORT_E_FAIL:
0632 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
0633 break;
0634
0635 case ETHPORT_E_UP:
0636 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
0637 break;
0638
0639 case ETHPORT_E_FWRESP_UP_OK:
0640
0641 bna_bfi_ethport_down(ethport);
0642 break;
0643
0644 case ETHPORT_E_FWRESP_UP_FAIL:
0645 case ETHPORT_E_FWRESP_DOWN:
0646 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
0647 break;
0648
0649 default:
0650 bfa_sm_fault(event);
0651 }
0652 }
0653
0654 static void
0655 bna_ethport_sm_up_entry(struct bna_ethport *ethport)
0656 {
0657 }
0658
0659 static void
0660 bna_ethport_sm_up(struct bna_ethport *ethport,
0661 enum bna_ethport_event event)
0662 {
0663 switch (event) {
0664 case ETHPORT_E_STOP:
0665 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
0666 bna_bfi_ethport_down(ethport);
0667 break;
0668
0669 case ETHPORT_E_FAIL:
0670 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
0671 break;
0672
0673 case ETHPORT_E_DOWN:
0674 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
0675 bna_bfi_ethport_down(ethport);
0676 break;
0677
0678 default:
0679 bfa_sm_fault(event);
0680 }
0681 }
0682
0683 static void
0684 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
0685 {
0686 }
0687
0688 static void
0689 bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
0690 enum bna_ethport_event event)
0691 {
0692 switch (event) {
0693 case ETHPORT_E_FAIL:
0694 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
0695 break;
0696
0697 case ETHPORT_E_DOWN:
0698
0699
0700
0701
0702
0703 break;
0704
0705 case ETHPORT_E_FWRESP_UP_OK:
0706
0707 bna_bfi_ethport_down(ethport);
0708 break;
0709
0710 case ETHPORT_E_FWRESP_UP_FAIL:
0711 case ETHPORT_E_FWRESP_DOWN:
0712 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
0713 break;
0714
0715 default:
0716 bfa_sm_fault(event);
0717 }
0718 }
0719
0720 static void
0721 bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
0722 {
0723 ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
0724 ethport->bna = bna;
0725
0726 ethport->link_status = BNA_LINK_DOWN;
0727 ethport->link_cbfn = bnad_cb_ethport_link_status;
0728
0729 ethport->rx_started_count = 0;
0730
0731 ethport->stop_cbfn = NULL;
0732 ethport->adminup_cbfn = NULL;
0733
0734 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
0735 }
0736
0737 static void
0738 bna_ethport_uninit(struct bna_ethport *ethport)
0739 {
0740 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
0741 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
0742
0743 ethport->bna = NULL;
0744 }
0745
0746 static void
0747 bna_ethport_start(struct bna_ethport *ethport)
0748 {
0749 bfa_fsm_send_event(ethport, ETHPORT_E_START);
0750 }
0751
0752 static void
0753 bna_enet_cb_ethport_stopped(struct bna_enet *enet)
0754 {
0755 bfa_wc_down(&enet->chld_stop_wc);
0756 }
0757
0758 static void
0759 bna_ethport_stop(struct bna_ethport *ethport)
0760 {
0761 ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
0762 bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
0763 }
0764
0765 static void
0766 bna_ethport_fail(struct bna_ethport *ethport)
0767 {
0768
0769 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
0770
0771 if (ethport->link_status != BNA_LINK_DOWN) {
0772 ethport->link_status = BNA_LINK_DOWN;
0773 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
0774 }
0775 bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
0776 }
0777
0778
0779 void
0780 bna_ethport_cb_rx_started(struct bna_ethport *ethport)
0781 {
0782 ethport->rx_started_count++;
0783
0784 if (ethport->rx_started_count == 1) {
0785 ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
0786
0787 if (ethport_can_be_up(ethport))
0788 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
0789 }
0790 }
0791
0792 void
0793 bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
0794 {
0795 int ethport_up = ethport_is_up(ethport);
0796
0797 ethport->rx_started_count--;
0798
0799 if (ethport->rx_started_count == 0) {
0800 ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
0801
0802 if (ethport_up)
0803 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
0804 }
0805 }
0806
0807
0808
0809 #define bna_enet_chld_start(enet) \
0810 do { \
0811 enum bna_tx_type tx_type = \
0812 ((enet)->type == BNA_ENET_T_REGULAR) ? \
0813 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
0814 enum bna_rx_type rx_type = \
0815 ((enet)->type == BNA_ENET_T_REGULAR) ? \
0816 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
0817 bna_ethport_start(&(enet)->bna->ethport); \
0818 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
0819 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
0820 } while (0)
0821
0822 #define bna_enet_chld_stop(enet) \
0823 do { \
0824 enum bna_tx_type tx_type = \
0825 ((enet)->type == BNA_ENET_T_REGULAR) ? \
0826 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
0827 enum bna_rx_type rx_type = \
0828 ((enet)->type == BNA_ENET_T_REGULAR) ? \
0829 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
0830 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
0831 bfa_wc_up(&(enet)->chld_stop_wc); \
0832 bna_ethport_stop(&(enet)->bna->ethport); \
0833 bfa_wc_up(&(enet)->chld_stop_wc); \
0834 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
0835 bfa_wc_up(&(enet)->chld_stop_wc); \
0836 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
0837 bfa_wc_wait(&(enet)->chld_stop_wc); \
0838 } while (0)
0839
0840 #define bna_enet_chld_fail(enet) \
0841 do { \
0842 bna_ethport_fail(&(enet)->bna->ethport); \
0843 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
0844 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
0845 } while (0)
0846
0847 #define bna_enet_rx_start(enet) \
0848 do { \
0849 enum bna_rx_type rx_type = \
0850 ((enet)->type == BNA_ENET_T_REGULAR) ? \
0851 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
0852 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
0853 } while (0)
0854
0855 #define bna_enet_rx_stop(enet) \
0856 do { \
0857 enum bna_rx_type rx_type = \
0858 ((enet)->type == BNA_ENET_T_REGULAR) ? \
0859 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
0860 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
0861 bfa_wc_up(&(enet)->chld_stop_wc); \
0862 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
0863 bfa_wc_wait(&(enet)->chld_stop_wc); \
0864 } while (0)
0865
0866 #define call_enet_stop_cbfn(enet) \
0867 do { \
0868 if ((enet)->stop_cbfn) { \
0869 void (*cbfn)(void *); \
0870 void *cbarg; \
0871 cbfn = (enet)->stop_cbfn; \
0872 cbarg = (enet)->stop_cbarg; \
0873 (enet)->stop_cbfn = NULL; \
0874 (enet)->stop_cbarg = NULL; \
0875 cbfn(cbarg); \
0876 } \
0877 } while (0)
0878
0879 #define call_enet_mtu_cbfn(enet) \
0880 do { \
0881 if ((enet)->mtu_cbfn) { \
0882 void (*cbfn)(struct bnad *); \
0883 cbfn = (enet)->mtu_cbfn; \
0884 (enet)->mtu_cbfn = NULL; \
0885 cbfn((enet)->bna->bnad); \
0886 } \
0887 } while (0)
0888
0889 static void bna_enet_cb_chld_stopped(void *arg);
0890 static void bna_bfi_pause_set(struct bna_enet *enet);
0891
0892 bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
0893 enum bna_enet_event);
0894 bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
0895 enum bna_enet_event);
0896 bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
0897 enum bna_enet_event);
0898 bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
0899 enum bna_enet_event);
0900 bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
0901 enum bna_enet_event);
0902 bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
0903 enum bna_enet_event);
0904 bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
0905 enum bna_enet_event);
0906
0907 static void
0908 bna_enet_sm_stopped_entry(struct bna_enet *enet)
0909 {
0910 call_enet_mtu_cbfn(enet);
0911 call_enet_stop_cbfn(enet);
0912 }
0913
0914 static void
0915 bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
0916 {
0917 switch (event) {
0918 case ENET_E_START:
0919 bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
0920 break;
0921
0922 case ENET_E_STOP:
0923 call_enet_stop_cbfn(enet);
0924 break;
0925
0926 case ENET_E_FAIL:
0927
0928 break;
0929
0930 case ENET_E_PAUSE_CFG:
0931 break;
0932
0933 case ENET_E_MTU_CFG:
0934 call_enet_mtu_cbfn(enet);
0935 break;
0936
0937 case ENET_E_CHLD_STOPPED:
0938
0939
0940
0941
0942
0943 break;
0944
0945 default:
0946 bfa_sm_fault(event);
0947 }
0948 }
0949
0950 static void
0951 bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
0952 {
0953 bna_bfi_pause_set(enet);
0954 }
0955
0956 static void
0957 bna_enet_sm_pause_init_wait(struct bna_enet *enet,
0958 enum bna_enet_event event)
0959 {
0960 switch (event) {
0961 case ENET_E_STOP:
0962 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
0963 bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
0964 break;
0965
0966 case ENET_E_FAIL:
0967 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
0968 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
0969 break;
0970
0971 case ENET_E_PAUSE_CFG:
0972 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
0973 break;
0974
0975 case ENET_E_MTU_CFG:
0976
0977 break;
0978
0979 case ENET_E_FWRESP_PAUSE:
0980 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
0981 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
0982 bna_bfi_pause_set(enet);
0983 } else {
0984 bfa_fsm_set_state(enet, bna_enet_sm_started);
0985 bna_enet_chld_start(enet);
0986 }
0987 break;
0988
0989 default:
0990 bfa_sm_fault(event);
0991 }
0992 }
0993
0994 static void
0995 bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
0996 {
0997 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
0998 }
0999
1000 static void
1001 bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1002 enum bna_enet_event event)
1003 {
1004 switch (event) {
1005 case ENET_E_FAIL:
1006 case ENET_E_FWRESP_PAUSE:
1007 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1008 break;
1009
1010 default:
1011 bfa_sm_fault(event);
1012 }
1013 }
1014
1015 static void
1016 bna_enet_sm_started_entry(struct bna_enet *enet)
1017 {
1018
1019
1020
1021
1022 call_enet_mtu_cbfn(enet);
1023 }
1024
1025 static void
1026 bna_enet_sm_started(struct bna_enet *enet,
1027 enum bna_enet_event event)
1028 {
1029 switch (event) {
1030 case ENET_E_STOP:
1031 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1032 break;
1033
1034 case ENET_E_FAIL:
1035 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1036 bna_enet_chld_fail(enet);
1037 break;
1038
1039 case ENET_E_PAUSE_CFG:
1040 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1041 bna_bfi_pause_set(enet);
1042 break;
1043
1044 case ENET_E_MTU_CFG:
1045 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1046 bna_enet_rx_stop(enet);
1047 break;
1048
1049 default:
1050 bfa_sm_fault(event);
1051 }
1052 }
1053
1054 static void
1055 bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1056 {
1057 }
1058
1059 static void
1060 bna_enet_sm_cfg_wait(struct bna_enet *enet,
1061 enum bna_enet_event event)
1062 {
1063 switch (event) {
1064 case ENET_E_STOP:
1065 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1066 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1067 bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1068 break;
1069
1070 case ENET_E_FAIL:
1071 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1072 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1073 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1074 bna_enet_chld_fail(enet);
1075 break;
1076
1077 case ENET_E_PAUSE_CFG:
1078 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1079 break;
1080
1081 case ENET_E_MTU_CFG:
1082 enet->flags |= BNA_ENET_F_MTU_CHANGED;
1083 break;
1084
1085 case ENET_E_CHLD_STOPPED:
1086 bna_enet_rx_start(enet);
1087 fallthrough;
1088 case ENET_E_FWRESP_PAUSE:
1089 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1090 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1091 bna_bfi_pause_set(enet);
1092 } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1093 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1094 bna_enet_rx_stop(enet);
1095 } else {
1096 bfa_fsm_set_state(enet, bna_enet_sm_started);
1097 }
1098 break;
1099
1100 default:
1101 bfa_sm_fault(event);
1102 }
1103 }
1104
1105 static void
1106 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1107 {
1108 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1109 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1110 }
1111
1112 static void
1113 bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1114 enum bna_enet_event event)
1115 {
1116 switch (event) {
1117 case ENET_E_FAIL:
1118 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1119 bna_enet_chld_fail(enet);
1120 break;
1121
1122 case ENET_E_FWRESP_PAUSE:
1123 case ENET_E_CHLD_STOPPED:
1124 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1125 break;
1126
1127 default:
1128 bfa_sm_fault(event);
1129 }
1130 }
1131
1132 static void
1133 bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1134 {
1135 bna_enet_chld_stop(enet);
1136 }
1137
1138 static void
1139 bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1140 enum bna_enet_event event)
1141 {
1142 switch (event) {
1143 case ENET_E_FAIL:
1144 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1145 bna_enet_chld_fail(enet);
1146 break;
1147
1148 case ENET_E_CHLD_STOPPED:
1149 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1150 break;
1151
1152 default:
1153 bfa_sm_fault(event);
1154 }
1155 }
1156
1157 static void
1158 bna_bfi_pause_set(struct bna_enet *enet)
1159 {
1160 struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1161
1162 bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1163 BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1164 pause_req->mh.num_entries = htons(
1165 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1166 pause_req->tx_pause = enet->pause_config.tx_pause;
1167 pause_req->rx_pause = enet->pause_config.rx_pause;
1168
1169 bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1170 sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1171 bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1172 }
1173
1174 static void
1175 bna_enet_cb_chld_stopped(void *arg)
1176 {
1177 struct bna_enet *enet = (struct bna_enet *)arg;
1178
1179 bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1180 }
1181
1182 static void
1183 bna_enet_init(struct bna_enet *enet, struct bna *bna)
1184 {
1185 enet->bna = bna;
1186 enet->flags = 0;
1187 enet->mtu = 0;
1188 enet->type = BNA_ENET_T_REGULAR;
1189
1190 enet->stop_cbfn = NULL;
1191 enet->stop_cbarg = NULL;
1192
1193 enet->mtu_cbfn = NULL;
1194
1195 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1196 }
1197
1198 static void
1199 bna_enet_uninit(struct bna_enet *enet)
1200 {
1201 enet->flags = 0;
1202
1203 enet->bna = NULL;
1204 }
1205
1206 static void
1207 bna_enet_start(struct bna_enet *enet)
1208 {
1209 enet->flags |= BNA_ENET_F_IOCETH_READY;
1210 if (enet->flags & BNA_ENET_F_ENABLED)
1211 bfa_fsm_send_event(enet, ENET_E_START);
1212 }
1213
1214 static void
1215 bna_ioceth_cb_enet_stopped(void *arg)
1216 {
1217 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1218
1219 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1220 }
1221
1222 static void
1223 bna_enet_stop(struct bna_enet *enet)
1224 {
1225 enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1226 enet->stop_cbarg = &enet->bna->ioceth;
1227
1228 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1229 bfa_fsm_send_event(enet, ENET_E_STOP);
1230 }
1231
1232 static void
1233 bna_enet_fail(struct bna_enet *enet)
1234 {
1235 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1236 bfa_fsm_send_event(enet, ENET_E_FAIL);
1237 }
1238
1239 void
1240 bna_enet_cb_tx_stopped(struct bna_enet *enet)
1241 {
1242 bfa_wc_down(&enet->chld_stop_wc);
1243 }
1244
1245 void
1246 bna_enet_cb_rx_stopped(struct bna_enet *enet)
1247 {
1248 bfa_wc_down(&enet->chld_stop_wc);
1249 }
1250
1251 int
1252 bna_enet_mtu_get(struct bna_enet *enet)
1253 {
1254 return enet->mtu;
1255 }
1256
1257 void
1258 bna_enet_enable(struct bna_enet *enet)
1259 {
1260 if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1261 return;
1262
1263 enet->flags |= BNA_ENET_F_ENABLED;
1264
1265 if (enet->flags & BNA_ENET_F_IOCETH_READY)
1266 bfa_fsm_send_event(enet, ENET_E_START);
1267 }
1268
1269 void
1270 bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1271 void (*cbfn)(void *))
1272 {
1273 if (type == BNA_SOFT_CLEANUP) {
1274 (*cbfn)(enet->bna->bnad);
1275 return;
1276 }
1277
1278 enet->stop_cbfn = cbfn;
1279 enet->stop_cbarg = enet->bna->bnad;
1280
1281 enet->flags &= ~BNA_ENET_F_ENABLED;
1282
1283 bfa_fsm_send_event(enet, ENET_E_STOP);
1284 }
1285
1286 void
1287 bna_enet_pause_config(struct bna_enet *enet,
1288 struct bna_pause_config *pause_config)
1289 {
1290 enet->pause_config = *pause_config;
1291
1292 bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1293 }
1294
1295 void
1296 bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1297 void (*cbfn)(struct bnad *))
1298 {
1299 enet->mtu = mtu;
1300
1301 enet->mtu_cbfn = cbfn;
1302
1303 bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1304 }
1305
1306 void
1307 bna_enet_perm_mac_get(struct bna_enet *enet, u8 *mac)
1308 {
1309 bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc, mac);
1310 }
1311
1312
1313
1314 #define enable_mbox_intr(_ioceth) \
1315 do { \
1316 u32 intr_status; \
1317 bna_intr_status_get((_ioceth)->bna, intr_status); \
1318 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1319 bna_mbox_intr_enable((_ioceth)->bna); \
1320 } while (0)
1321
1322 #define disable_mbox_intr(_ioceth) \
1323 do { \
1324 bna_mbox_intr_disable((_ioceth)->bna); \
1325 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1326 } while (0)
1327
1328 #define call_ioceth_stop_cbfn(_ioceth) \
1329 do { \
1330 if ((_ioceth)->stop_cbfn) { \
1331 void (*cbfn)(struct bnad *); \
1332 struct bnad *cbarg; \
1333 cbfn = (_ioceth)->stop_cbfn; \
1334 cbarg = (_ioceth)->stop_cbarg; \
1335 (_ioceth)->stop_cbfn = NULL; \
1336 (_ioceth)->stop_cbarg = NULL; \
1337 cbfn(cbarg); \
1338 } \
1339 } while (0)
1340
1341 #define bna_stats_mod_uninit(_stats_mod) \
1342 do { \
1343 } while (0)
1344
1345 #define bna_stats_mod_start(_stats_mod) \
1346 do { \
1347 (_stats_mod)->ioc_ready = true; \
1348 } while (0)
1349
1350 #define bna_stats_mod_stop(_stats_mod) \
1351 do { \
1352 (_stats_mod)->ioc_ready = false; \
1353 } while (0)
1354
1355 #define bna_stats_mod_fail(_stats_mod) \
1356 do { \
1357 (_stats_mod)->ioc_ready = false; \
1358 (_stats_mod)->stats_get_busy = false; \
1359 (_stats_mod)->stats_clr_busy = false; \
1360 } while (0)
1361
1362 static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1363
1364 bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1365 enum bna_ioceth_event);
1366 bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1367 enum bna_ioceth_event);
1368 bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1369 enum bna_ioceth_event);
1370 bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1371 enum bna_ioceth_event);
1372 bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1373 enum bna_ioceth_event);
1374 bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1375 enum bna_ioceth_event);
1376 bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1377 enum bna_ioceth_event);
1378 bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1379 enum bna_ioceth_event);
1380
1381 static void
1382 bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1383 {
1384 call_ioceth_stop_cbfn(ioceth);
1385 }
1386
1387 static void
1388 bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1389 enum bna_ioceth_event event)
1390 {
1391 switch (event) {
1392 case IOCETH_E_ENABLE:
1393 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1394 bfa_nw_ioc_enable(&ioceth->ioc);
1395 break;
1396
1397 case IOCETH_E_DISABLE:
1398 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1399 break;
1400
1401 case IOCETH_E_IOC_RESET:
1402 enable_mbox_intr(ioceth);
1403 break;
1404
1405 case IOCETH_E_IOC_FAILED:
1406 disable_mbox_intr(ioceth);
1407 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1408 break;
1409
1410 default:
1411 bfa_sm_fault(event);
1412 }
1413 }
1414
1415 static void
1416 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1417 {
1418
1419
1420
1421
1422 }
1423
1424 static void
1425 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1426 enum bna_ioceth_event event)
1427 {
1428 switch (event) {
1429 case IOCETH_E_DISABLE:
1430 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1431 bfa_nw_ioc_disable(&ioceth->ioc);
1432 break;
1433
1434 case IOCETH_E_IOC_RESET:
1435 enable_mbox_intr(ioceth);
1436 break;
1437
1438 case IOCETH_E_IOC_FAILED:
1439 disable_mbox_intr(ioceth);
1440 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1441 break;
1442
1443 case IOCETH_E_IOC_READY:
1444 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1445 break;
1446
1447 default:
1448 bfa_sm_fault(event);
1449 }
1450 }
1451
1452 static void
1453 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1454 {
1455 bna_bfi_attr_get(ioceth);
1456 }
1457
1458 static void
1459 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1460 enum bna_ioceth_event event)
1461 {
1462 switch (event) {
1463 case IOCETH_E_DISABLE:
1464 bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1465 break;
1466
1467 case IOCETH_E_IOC_FAILED:
1468 disable_mbox_intr(ioceth);
1469 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1470 break;
1471
1472 case IOCETH_E_ENET_ATTR_RESP:
1473 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1474 break;
1475
1476 default:
1477 bfa_sm_fault(event);
1478 }
1479 }
1480
1481 static void
1482 bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1483 {
1484 bna_enet_start(&ioceth->bna->enet);
1485 bna_stats_mod_start(&ioceth->bna->stats_mod);
1486 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1487 }
1488
1489 static void
1490 bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1491 {
1492 switch (event) {
1493 case IOCETH_E_DISABLE:
1494 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1495 break;
1496
1497 case IOCETH_E_IOC_FAILED:
1498 disable_mbox_intr(ioceth);
1499 bna_enet_fail(&ioceth->bna->enet);
1500 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1501 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1502 break;
1503
1504 default:
1505 bfa_sm_fault(event);
1506 }
1507 }
1508
1509 static void
1510 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1511 {
1512 }
1513
1514 static void
1515 bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1516 enum bna_ioceth_event event)
1517 {
1518 switch (event) {
1519 case IOCETH_E_IOC_FAILED:
1520 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1521 disable_mbox_intr(ioceth);
1522 bfa_nw_ioc_disable(&ioceth->ioc);
1523 break;
1524
1525 case IOCETH_E_ENET_ATTR_RESP:
1526 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1527 bfa_nw_ioc_disable(&ioceth->ioc);
1528 break;
1529
1530 default:
1531 bfa_sm_fault(event);
1532 }
1533 }
1534
1535 static void
1536 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1537 {
1538 bna_stats_mod_stop(&ioceth->bna->stats_mod);
1539 bna_enet_stop(&ioceth->bna->enet);
1540 }
1541
1542 static void
1543 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1544 enum bna_ioceth_event event)
1545 {
1546 switch (event) {
1547 case IOCETH_E_IOC_FAILED:
1548 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1549 disable_mbox_intr(ioceth);
1550 bna_enet_fail(&ioceth->bna->enet);
1551 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1552 bfa_nw_ioc_disable(&ioceth->ioc);
1553 break;
1554
1555 case IOCETH_E_ENET_STOPPED:
1556 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1557 bfa_nw_ioc_disable(&ioceth->ioc);
1558 break;
1559
1560 default:
1561 bfa_sm_fault(event);
1562 }
1563 }
1564
1565 static void
1566 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1567 {
1568 }
1569
1570 static void
1571 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1572 enum bna_ioceth_event event)
1573 {
1574 switch (event) {
1575 case IOCETH_E_IOC_DISABLED:
1576 disable_mbox_intr(ioceth);
1577 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1578 break;
1579
1580 case IOCETH_E_ENET_STOPPED:
1581
1582
1583 break;
1584
1585 default:
1586 bfa_sm_fault(event);
1587 }
1588 }
1589
1590 static void
1591 bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1592 {
1593 bnad_cb_ioceth_failed(ioceth->bna->bnad);
1594 }
1595
1596 static void
1597 bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1598 enum bna_ioceth_event event)
1599 {
1600 switch (event) {
1601 case IOCETH_E_DISABLE:
1602 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1603 bfa_nw_ioc_disable(&ioceth->ioc);
1604 break;
1605
1606 case IOCETH_E_IOC_RESET:
1607 enable_mbox_intr(ioceth);
1608 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1609 break;
1610
1611 case IOCETH_E_IOC_FAILED:
1612 break;
1613
1614 default:
1615 bfa_sm_fault(event);
1616 }
1617 }
1618
1619 static void
1620 bna_bfi_attr_get(struct bna_ioceth *ioceth)
1621 {
1622 struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1623
1624 bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1625 BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1626 attr_req->mh.num_entries = htons(
1627 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1628 bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1629 sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1630 bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1631 }
1632
1633
1634
1635 static void
1636 bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1637 {
1638 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1639
1640 if (error)
1641 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1642 else
1643 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1644 }
1645
1646 static void
1647 bna_cb_ioceth_disable(void *arg)
1648 {
1649 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1650
1651 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1652 }
1653
1654 static void
1655 bna_cb_ioceth_hbfail(void *arg)
1656 {
1657 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1658
1659 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1660 }
1661
1662 static void
1663 bna_cb_ioceth_reset(void *arg)
1664 {
1665 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1666
1667 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1668 }
1669
1670 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1671 .enable_cbfn = bna_cb_ioceth_enable,
1672 .disable_cbfn = bna_cb_ioceth_disable,
1673 .hbfail_cbfn = bna_cb_ioceth_hbfail,
1674 .reset_cbfn = bna_cb_ioceth_reset
1675 };
1676
1677 static void bna_attr_init(struct bna_ioceth *ioceth)
1678 {
1679 ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1680 ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1681 ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1682 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1683 ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1684 ioceth->attr.fw_query_complete = false;
1685 }
1686
1687 static void
1688 bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1689 struct bna_res_info *res_info)
1690 {
1691 u64 dma;
1692 u8 *kva;
1693
1694 ioceth->bna = bna;
1695
1696
1697
1698
1699
1700
1701 bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1702 bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1703
1704 BNA_GET_DMA_ADDR(
1705 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1706 kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1707 bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1708
1709 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1710 bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1711
1712
1713
1714
1715
1716 BNA_GET_DMA_ADDR(
1717 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1718 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1719 bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1720 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1721 kva += bfa_nw_cee_meminfo();
1722 dma += bfa_nw_cee_meminfo();
1723
1724 bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1725 bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1726 kva += bfa_nw_flash_meminfo();
1727 dma += bfa_nw_flash_meminfo();
1728
1729 bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1730 bfa_msgq_memclaim(&bna->msgq, kva, dma);
1731 bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1732 kva += bfa_msgq_meminfo();
1733 dma += bfa_msgq_meminfo();
1734
1735 ioceth->stop_cbfn = NULL;
1736 ioceth->stop_cbarg = NULL;
1737
1738 bna_attr_init(ioceth);
1739
1740 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1741 }
1742
1743 static void
1744 bna_ioceth_uninit(struct bna_ioceth *ioceth)
1745 {
1746 bfa_nw_ioc_detach(&ioceth->ioc);
1747
1748 ioceth->bna = NULL;
1749 }
1750
1751 void
1752 bna_ioceth_enable(struct bna_ioceth *ioceth)
1753 {
1754 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1755 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1756 return;
1757 }
1758
1759 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1760 bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1761 }
1762
1763 void
1764 bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1765 {
1766 if (type == BNA_SOFT_CLEANUP) {
1767 bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1768 return;
1769 }
1770
1771 ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1772 ioceth->stop_cbarg = ioceth->bna->bnad;
1773
1774 bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1775 }
1776
1777 static void
1778 bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1779 struct bna_res_info *res_info)
1780 {
1781 int i;
1782
1783 ucam_mod->ucmac = (struct bna_mac *)
1784 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1785
1786 INIT_LIST_HEAD(&ucam_mod->free_q);
1787 for (i = 0; i < bna->ioceth.attr.num_ucmac; i++)
1788 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1789
1790
1791 INIT_LIST_HEAD(&ucam_mod->del_q);
1792 for (; i < (bna->ioceth.attr.num_ucmac * 2); i++)
1793 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
1794
1795 ucam_mod->bna = bna;
1796 }
1797
1798 static void
1799 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1800 {
1801 ucam_mod->bna = NULL;
1802 }
1803
1804 static void
1805 bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1806 struct bna_res_info *res_info)
1807 {
1808 int i;
1809
1810 mcam_mod->mcmac = (struct bna_mac *)
1811 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1812
1813 INIT_LIST_HEAD(&mcam_mod->free_q);
1814 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
1815 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1816
1817 mcam_mod->mchandle = (struct bna_mcam_handle *)
1818 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1819
1820 INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1821 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++)
1822 list_add_tail(&mcam_mod->mchandle[i].qe,
1823 &mcam_mod->free_handle_q);
1824
1825
1826 INIT_LIST_HEAD(&mcam_mod->del_q);
1827 for (; i < (bna->ioceth.attr.num_mcmac * 2); i++)
1828 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
1829
1830 mcam_mod->bna = bna;
1831 }
1832
1833 static void
1834 bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1835 {
1836 mcam_mod->bna = NULL;
1837 }
1838
1839 static void
1840 bna_bfi_stats_get(struct bna *bna)
1841 {
1842 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1843
1844 bna->stats_mod.stats_get_busy = true;
1845
1846 bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1847 BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1848 stats_req->mh.num_entries = htons(
1849 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1850 stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1851 stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1852 stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1853 stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1854 stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1855
1856 bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1857 sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1858 bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1859 }
1860
1861 void
1862 bna_res_req(struct bna_res_info *res_info)
1863 {
1864
1865 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1866 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1867 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1868 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1869 (bfa_nw_cee_meminfo() +
1870 bfa_nw_flash_meminfo() +
1871 bfa_msgq_meminfo()), PAGE_SIZE);
1872
1873
1874 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1875 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1876 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1877 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1878 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1879
1880
1881 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1882 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1883 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1884 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1885
1886
1887 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1888 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1889 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1890 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1891 ALIGN(sizeof(struct bfi_enet_stats),
1892 PAGE_SIZE);
1893 }
1894
1895 void
1896 bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1897 {
1898 struct bna_attr *attr = &bna->ioceth.attr;
1899
1900
1901 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1902 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1903 BNA_MEM_T_KVA;
1904 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1905 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1906 attr->num_txq * sizeof(struct bna_tx);
1907
1908
1909 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1910 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1911 BNA_MEM_T_KVA;
1912 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1913 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1914 attr->num_txq * sizeof(struct bna_txq);
1915
1916
1917 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1918 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1919 BNA_MEM_T_KVA;
1920 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1921 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1922 attr->num_rxp * sizeof(struct bna_rx);
1923
1924
1925 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1926 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1927 BNA_MEM_T_KVA;
1928 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1929 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1930 attr->num_rxp * sizeof(struct bna_rxp);
1931
1932
1933 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1934 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1935 BNA_MEM_T_KVA;
1936 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1937 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1938 (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1939
1940
1941 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1942 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1943 BNA_MEM_T_KVA;
1944 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
1945 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
1946 (attr->num_ucmac * 2) * sizeof(struct bna_mac);
1947
1948
1949 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1950 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
1951 BNA_MEM_T_KVA;
1952 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
1953 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
1954 (attr->num_mcmac * 2) * sizeof(struct bna_mac);
1955
1956
1957 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
1958 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
1959 BNA_MEM_T_KVA;
1960 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
1961 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
1962 attr->num_mcmac * sizeof(struct bna_mcam_handle);
1963 }
1964
1965 void
1966 bna_init(struct bna *bna, struct bnad *bnad,
1967 struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
1968 {
1969 bna->bnad = bnad;
1970 bna->pcidev = *pcidev;
1971
1972 bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
1973 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
1974 bna->stats.hw_stats_dma.msb =
1975 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
1976 bna->stats.hw_stats_dma.lsb =
1977 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
1978
1979 bna_reg_addr_init(bna, &bna->pcidev);
1980
1981
1982 bna_ioceth_init(&bna->ioceth, bna, res_info);
1983
1984 bna_enet_init(&bna->enet, bna);
1985 bna_ethport_init(&bna->ethport, bna);
1986 }
1987
1988 void
1989 bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
1990 {
1991 bna_tx_mod_init(&bna->tx_mod, bna, res_info);
1992
1993 bna_rx_mod_init(&bna->rx_mod, bna, res_info);
1994
1995 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
1996
1997 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
1998
1999 bna->default_mode_rid = BFI_INVALID_RID;
2000 bna->promisc_rid = BFI_INVALID_RID;
2001
2002 bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2003 }
2004
2005 void
2006 bna_uninit(struct bna *bna)
2007 {
2008 if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2009 bna_mcam_mod_uninit(&bna->mcam_mod);
2010 bna_ucam_mod_uninit(&bna->ucam_mod);
2011 bna_rx_mod_uninit(&bna->rx_mod);
2012 bna_tx_mod_uninit(&bna->tx_mod);
2013 bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2014 }
2015
2016 bna_stats_mod_uninit(&bna->stats_mod);
2017 bna_ethport_uninit(&bna->ethport);
2018 bna_enet_uninit(&bna->enet);
2019
2020 bna_ioceth_uninit(&bna->ioceth);
2021
2022 bna->bnad = NULL;
2023 }
2024
2025 int
2026 bna_num_txq_set(struct bna *bna, int num_txq)
2027 {
2028 if (bna->ioceth.attr.fw_query_complete &&
2029 (num_txq <= bna->ioceth.attr.num_txq)) {
2030 bna->ioceth.attr.num_txq = num_txq;
2031 return BNA_CB_SUCCESS;
2032 }
2033
2034 return BNA_CB_FAIL;
2035 }
2036
2037 int
2038 bna_num_rxp_set(struct bna *bna, int num_rxp)
2039 {
2040 if (bna->ioceth.attr.fw_query_complete &&
2041 (num_rxp <= bna->ioceth.attr.num_rxp)) {
2042 bna->ioceth.attr.num_rxp = num_rxp;
2043 return BNA_CB_SUCCESS;
2044 }
2045
2046 return BNA_CB_FAIL;
2047 }
2048
2049 struct bna_mac *
2050 bna_cam_mod_mac_get(struct list_head *head)
2051 {
2052 struct bna_mac *mac;
2053
2054 mac = list_first_entry_or_null(head, struct bna_mac, qe);
2055 if (mac)
2056 list_del(&mac->qe);
2057
2058 return mac;
2059 }
2060
2061 struct bna_mcam_handle *
2062 bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2063 {
2064 struct bna_mcam_handle *handle;
2065
2066 handle = list_first_entry_or_null(&mcam_mod->free_handle_q,
2067 struct bna_mcam_handle, qe);
2068 if (handle)
2069 list_del(&handle->qe);
2070
2071 return handle;
2072 }
2073
2074 void
2075 bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2076 struct bna_mcam_handle *handle)
2077 {
2078 list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2079 }
2080
2081 void
2082 bna_hw_stats_get(struct bna *bna)
2083 {
2084 if (!bna->stats_mod.ioc_ready) {
2085 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2086 return;
2087 }
2088 if (bna->stats_mod.stats_get_busy) {
2089 bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2090 return;
2091 }
2092
2093 bna_bfi_stats_get(bna);
2094 }