Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
0004  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
0005  */
0006 
0007 /*
0008  * This file implements remote node state machines for:
0009  * - Fabric logins.
0010  * - Fabric controller events.
0011  * - Name/directory services interaction.
0012  * - Point-to-point logins.
0013  */
0014 
0015 /*
0016  * fabric_sm Node State Machine: Fabric States
0017  * ns_sm Node State Machine: Name/Directory Services States
0018  * p2p_sm Node State Machine: Point-to-Point Node States
0019  */
0020 
0021 #include "efc.h"
0022 
0023 static void
0024 efc_fabric_initiate_shutdown(struct efc_node *node)
0025 {
0026     struct efc *efc = node->efc;
0027 
0028     node->els_io_enabled = false;
0029 
0030     if (node->attached) {
0031         int rc;
0032 
0033         /* issue hw node free; don't care if succeeds right away
0034          * or sometime later, will check node->attached later in
0035          * shutdown process
0036          */
0037         rc = efc_cmd_node_detach(efc, &node->rnode);
0038         if (rc < 0) {
0039             node_printf(node, "Failed freeing HW node, rc=%d\n",
0040                     rc);
0041         }
0042     }
0043     /*
0044      * node has either been detached or is in the process of being detached,
0045      * call common node's initiate cleanup function
0046      */
0047     efc_node_initiate_cleanup(node);
0048 }
0049 
0050 static void
0051 __efc_fabric_common(const char *funcname, struct efc_sm_ctx *ctx,
0052             enum efc_sm_event evt, void *arg)
0053 {
0054     struct efc_node *node = NULL;
0055 
0056     node = ctx->app;
0057 
0058     switch (evt) {
0059     case EFC_EVT_DOMAIN_ATTACH_OK:
0060         break;
0061     case EFC_EVT_SHUTDOWN:
0062         node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
0063         efc_fabric_initiate_shutdown(node);
0064         break;
0065 
0066     default:
0067         /* call default event handler common to all nodes */
0068         __efc_node_common(funcname, ctx, evt, arg);
0069     }
0070 }
0071 
0072 void
0073 __efc_fabric_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
0074           void *arg)
0075 {
0076     struct efc_node *node = ctx->app;
0077     struct efc *efc = node->efc;
0078 
0079     efc_node_evt_set(ctx, evt, __func__);
0080 
0081     node_sm_trace();
0082 
0083     switch (evt) {
0084     case EFC_EVT_REENTER:
0085         efc_log_debug(efc, ">>> reenter !!\n");
0086         fallthrough;
0087 
0088     case EFC_EVT_ENTER:
0089         /* send FLOGI */
0090         efc_send_flogi(node);
0091         efc_node_transition(node, __efc_fabric_flogi_wait_rsp, NULL);
0092         break;
0093 
0094     default:
0095         __efc_fabric_common(__func__, ctx, evt, arg);
0096     }
0097 }
0098 
0099 void
0100 efc_fabric_set_topology(struct efc_node *node,
0101             enum efc_nport_topology topology)
0102 {
0103     node->nport->topology = topology;
0104 }
0105 
0106 void
0107 efc_fabric_notify_topology(struct efc_node *node)
0108 {
0109     struct efc_node *tmp_node;
0110     unsigned long index;
0111 
0112     /*
0113      * now loop through the nodes in the nport
0114      * and send topology notification
0115      */
0116     xa_for_each(&node->nport->lookup, index, tmp_node) {
0117         if (tmp_node != node) {
0118             efc_node_post_event(tmp_node,
0119                         EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
0120                         &node->nport->topology);
0121         }
0122     }
0123 }
0124 
0125 static bool efc_rnode_is_nport(struct fc_els_flogi *rsp)
0126 {
0127     return !(ntohs(rsp->fl_csp.sp_features) & FC_SP_FT_FPORT);
0128 }
0129 
0130 void
0131 __efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
0132                 enum efc_sm_event evt, void *arg)
0133 {
0134     struct efc_node_cb *cbdata = arg;
0135     struct efc_node *node = ctx->app;
0136 
0137     efc_node_evt_set(ctx, evt, __func__);
0138 
0139     node_sm_trace();
0140 
0141     switch (evt) {
0142     case EFC_EVT_SRRS_ELS_REQ_OK: {
0143         if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
0144                        __efc_fabric_common, __func__)) {
0145             return;
0146         }
0147         WARN_ON(!node->els_req_cnt);
0148         node->els_req_cnt--;
0149 
0150         memcpy(node->nport->domain->flogi_service_params,
0151                cbdata->els_rsp.virt,
0152                sizeof(struct fc_els_flogi));
0153 
0154         /* Check to see if the fabric is an F_PORT or and N_PORT */
0155         if (!efc_rnode_is_nport(cbdata->els_rsp.virt)) {
0156             /* sm: if not nport / efc_domain_attach */
0157             /* ext_status has the fc_id, attach domain */
0158             efc_fabric_set_topology(node, EFC_NPORT_TOPO_FABRIC);
0159             efc_fabric_notify_topology(node);
0160             WARN_ON(node->nport->domain->attached);
0161             efc_domain_attach(node->nport->domain,
0162                       cbdata->ext_status);
0163             efc_node_transition(node,
0164                         __efc_fabric_wait_domain_attach,
0165                         NULL);
0166             break;
0167         }
0168 
0169         /*  sm: if nport and p2p_winner / efc_domain_attach */
0170         efc_fabric_set_topology(node, EFC_NPORT_TOPO_P2P);
0171         if (efc_p2p_setup(node->nport)) {
0172             node_printf(node,
0173                     "p2p setup failed, shutting down node\n");
0174             node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
0175             efc_fabric_initiate_shutdown(node);
0176             break;
0177         }
0178 
0179         if (node->nport->p2p_winner) {
0180             efc_node_transition(node,
0181                         __efc_p2p_wait_domain_attach,
0182                          NULL);
0183             if (node->nport->domain->attached &&
0184                 !node->nport->domain->domain_notify_pend) {
0185                 /*
0186                  * already attached,
0187                  * just send ATTACH_OK
0188                  */
0189                 node_printf(node,
0190                         "p2p winner, domain already attached\n");
0191                 efc_node_post_event(node,
0192                             EFC_EVT_DOMAIN_ATTACH_OK,
0193                             NULL);
0194             }
0195         } else {
0196             /*
0197              * peer is p2p winner;
0198              * PLOGI will be received on the
0199              * remote SID=1 node;
0200              * this node has served its purpose
0201              */
0202             node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
0203             efc_fabric_initiate_shutdown(node);
0204         }
0205 
0206         break;
0207     }
0208 
0209     case EFC_EVT_ELS_REQ_ABORTED:
0210     case EFC_EVT_SRRS_ELS_REQ_RJT:
0211     case EFC_EVT_SRRS_ELS_REQ_FAIL: {
0212         struct efc_nport *nport = node->nport;
0213         /*
0214          * with these errors, we have no recovery,
0215          * so shutdown the nport, leave the link
0216          * up and the domain ready
0217          */
0218         if (efc_node_check_els_req(ctx, evt, arg, ELS_FLOGI,
0219                        __efc_fabric_common, __func__)) {
0220             return;
0221         }
0222         node_printf(node,
0223                 "FLOGI failed evt=%s, shutting down nport [%s]\n",
0224                 efc_sm_event_name(evt), nport->display_name);
0225         WARN_ON(!node->els_req_cnt);
0226         node->els_req_cnt--;
0227         efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
0228         break;
0229     }
0230 
0231     default:
0232         __efc_fabric_common(__func__, ctx, evt, arg);
0233     }
0234 }
0235 
0236 void
0237 __efc_vport_fabric_init(struct efc_sm_ctx *ctx,
0238             enum efc_sm_event evt, void *arg)
0239 {
0240     struct efc_node *node = ctx->app;
0241 
0242     efc_node_evt_set(ctx, evt, __func__);
0243 
0244     node_sm_trace();
0245 
0246     switch (evt) {
0247     case EFC_EVT_ENTER:
0248         /* sm: / send FDISC */
0249         efc_send_fdisc(node);
0250         efc_node_transition(node, __efc_fabric_fdisc_wait_rsp, NULL);
0251         break;
0252 
0253     default:
0254         __efc_fabric_common(__func__, ctx, evt, arg);
0255     }
0256 }
0257 
0258 void
0259 __efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
0260                 enum efc_sm_event evt, void *arg)
0261 {
0262     struct efc_node_cb *cbdata = arg;
0263     struct efc_node *node = ctx->app;
0264 
0265     efc_node_evt_set(ctx, evt, __func__);
0266 
0267     node_sm_trace();
0268 
0269     switch (evt) {
0270     case EFC_EVT_SRRS_ELS_REQ_OK: {
0271         /* fc_id is in ext_status */
0272         if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
0273                        __efc_fabric_common, __func__)) {
0274             return;
0275         }
0276 
0277         WARN_ON(!node->els_req_cnt);
0278         node->els_req_cnt--;
0279         /* sm: / efc_nport_attach */
0280         efc_nport_attach(node->nport, cbdata->ext_status);
0281         efc_node_transition(node, __efc_fabric_wait_domain_attach,
0282                     NULL);
0283         break;
0284     }
0285 
0286     case EFC_EVT_SRRS_ELS_REQ_RJT:
0287     case EFC_EVT_SRRS_ELS_REQ_FAIL: {
0288         if (efc_node_check_els_req(ctx, evt, arg, ELS_FDISC,
0289                        __efc_fabric_common, __func__)) {
0290             return;
0291         }
0292         WARN_ON(!node->els_req_cnt);
0293         node->els_req_cnt--;
0294         efc_log_err(node->efc, "FDISC failed, shutting down nport\n");
0295         /* sm: / shutdown nport */
0296         efc_sm_post_event(&node->nport->sm, EFC_EVT_SHUTDOWN, NULL);
0297         break;
0298     }
0299 
0300     default:
0301         __efc_fabric_common(__func__, ctx, evt, arg);
0302     }
0303 }
0304 
0305 static int
0306 efc_start_ns_node(struct efc_nport *nport)
0307 {
0308     struct efc_node *ns;
0309 
0310     /* Instantiate a name services node */
0311     ns = efc_node_find(nport, FC_FID_DIR_SERV);
0312     if (!ns) {
0313         ns = efc_node_alloc(nport, FC_FID_DIR_SERV, false, false);
0314         if (!ns)
0315             return -EIO;
0316     }
0317     /*
0318      * for found ns, should we be transitioning from here?
0319      * breaks transition only
0320      *  1. from within state machine or
0321      *  2. if after alloc
0322      */
0323     if (ns->efc->nodedb_mask & EFC_NODEDB_PAUSE_NAMESERVER)
0324         efc_node_pause(ns, __efc_ns_init);
0325     else
0326         efc_node_transition(ns, __efc_ns_init, NULL);
0327     return 0;
0328 }
0329 
0330 static int
0331 efc_start_fabctl_node(struct efc_nport *nport)
0332 {
0333     struct efc_node *fabctl;
0334 
0335     fabctl = efc_node_find(nport, FC_FID_FCTRL);
0336     if (!fabctl) {
0337         fabctl = efc_node_alloc(nport, FC_FID_FCTRL,
0338                     false, false);
0339         if (!fabctl)
0340             return -EIO;
0341     }
0342     /*
0343      * for found ns, should we be transitioning from here?
0344      * breaks transition only
0345      *  1. from within state machine or
0346      *  2. if after alloc
0347      */
0348     efc_node_transition(fabctl, __efc_fabctl_init, NULL);
0349     return 0;
0350 }
0351 
0352 void
0353 __efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
0354                 enum efc_sm_event evt, void *arg)
0355 {
0356     struct efc_node *node = ctx->app;
0357 
0358     efc_node_evt_set(ctx, evt, __func__);
0359 
0360     node_sm_trace();
0361 
0362     switch (evt) {
0363     case EFC_EVT_ENTER:
0364         efc_node_hold_frames(node);
0365         break;
0366 
0367     case EFC_EVT_EXIT:
0368         efc_node_accept_frames(node);
0369         break;
0370     case EFC_EVT_DOMAIN_ATTACH_OK:
0371     case EFC_EVT_NPORT_ATTACH_OK: {
0372         int rc;
0373 
0374         rc = efc_start_ns_node(node->nport);
0375         if (rc)
0376             return;
0377 
0378         /* sm: if enable_ini / start fabctl node */
0379         /* Instantiate the fabric controller (sends SCR) */
0380         if (node->nport->enable_rscn) {
0381             rc = efc_start_fabctl_node(node->nport);
0382             if (rc)
0383                 return;
0384         }
0385         efc_node_transition(node, __efc_fabric_idle, NULL);
0386         break;
0387     }
0388     default:
0389         __efc_fabric_common(__func__, ctx, evt, arg);
0390     }
0391 }
0392 
0393 void
0394 __efc_fabric_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
0395           void *arg)
0396 {
0397     struct efc_node *node = ctx->app;
0398 
0399     efc_node_evt_set(ctx, evt, __func__);
0400 
0401     node_sm_trace();
0402 
0403     switch (evt) {
0404     case EFC_EVT_DOMAIN_ATTACH_OK:
0405         break;
0406     default:
0407         __efc_fabric_common(__func__, ctx, evt, arg);
0408     }
0409 }
0410 
0411 void
0412 __efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
0413 {
0414     struct efc_node *node = ctx->app;
0415 
0416     efc_node_evt_set(ctx, evt, __func__);
0417 
0418     node_sm_trace();
0419 
0420     switch (evt) {
0421     case EFC_EVT_ENTER:
0422         /* sm: / send PLOGI */
0423         efc_send_plogi(node);
0424         efc_node_transition(node, __efc_ns_plogi_wait_rsp, NULL);
0425         break;
0426     default:
0427         __efc_fabric_common(__func__, ctx, evt, arg);
0428     }
0429 }
0430 
0431 void
0432 __efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
0433             enum efc_sm_event evt, void *arg)
0434 {
0435     struct efc_node_cb *cbdata = arg;
0436     struct efc_node *node = ctx->app;
0437 
0438     efc_node_evt_set(ctx, evt, __func__);
0439 
0440     node_sm_trace();
0441 
0442     switch (evt) {
0443     case EFC_EVT_SRRS_ELS_REQ_OK: {
0444         int rc;
0445 
0446         /* Save service parameters */
0447         if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
0448                        __efc_fabric_common, __func__)) {
0449             return;
0450         }
0451         WARN_ON(!node->els_req_cnt);
0452         node->els_req_cnt--;
0453         /* sm: / save sparams, efc_node_attach */
0454         efc_node_save_sparms(node, cbdata->els_rsp.virt);
0455         rc = efc_node_attach(node);
0456         efc_node_transition(node, __efc_ns_wait_node_attach, NULL);
0457         if (rc < 0)
0458             efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
0459                         NULL);
0460         break;
0461     }
0462     default:
0463         __efc_fabric_common(__func__, ctx, evt, arg);
0464     }
0465 }
0466 
0467 void
0468 __efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
0469               enum efc_sm_event evt, void *arg)
0470 {
0471     struct efc_node *node = ctx->app;
0472 
0473     efc_node_evt_set(ctx, evt, __func__);
0474 
0475     node_sm_trace();
0476 
0477     switch (evt) {
0478     case EFC_EVT_ENTER:
0479         efc_node_hold_frames(node);
0480         break;
0481 
0482     case EFC_EVT_EXIT:
0483         efc_node_accept_frames(node);
0484         break;
0485 
0486     case EFC_EVT_NODE_ATTACH_OK:
0487         node->attached = true;
0488         /* sm: / send RFTID */
0489         efc_ns_send_rftid(node);
0490         efc_node_transition(node, __efc_ns_rftid_wait_rsp, NULL);
0491         break;
0492 
0493     case EFC_EVT_NODE_ATTACH_FAIL:
0494         /* node attach failed, shutdown the node */
0495         node->attached = false;
0496         node_printf(node, "Node attach failed\n");
0497         node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
0498         efc_fabric_initiate_shutdown(node);
0499         break;
0500 
0501     case EFC_EVT_SHUTDOWN:
0502         node_printf(node, "Shutdown event received\n");
0503         node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
0504         efc_node_transition(node,
0505                     __efc_fabric_wait_attach_evt_shutdown,
0506                      NULL);
0507         break;
0508 
0509     /*
0510      * if receive RSCN just ignore,
0511      * we haven't sent GID_PT yet (ACC sent by fabctl node)
0512      */
0513     case EFC_EVT_RSCN_RCVD:
0514         break;
0515 
0516     default:
0517         __efc_fabric_common(__func__, ctx, evt, arg);
0518     }
0519 }
0520 
0521 void
0522 __efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
0523                       enum efc_sm_event evt, void *arg)
0524 {
0525     struct efc_node *node = ctx->app;
0526 
0527     efc_node_evt_set(ctx, evt, __func__);
0528 
0529     node_sm_trace();
0530 
0531     switch (evt) {
0532     case EFC_EVT_ENTER:
0533         efc_node_hold_frames(node);
0534         break;
0535 
0536     case EFC_EVT_EXIT:
0537         efc_node_accept_frames(node);
0538         break;
0539 
0540     /* wait for any of these attach events and then shutdown */
0541     case EFC_EVT_NODE_ATTACH_OK:
0542         node->attached = true;
0543         node_printf(node, "Attach evt=%s, proceed to shutdown\n",
0544                 efc_sm_event_name(evt));
0545         efc_fabric_initiate_shutdown(node);
0546         break;
0547 
0548     case EFC_EVT_NODE_ATTACH_FAIL:
0549         node->attached = false;
0550         node_printf(node, "Attach evt=%s, proceed to shutdown\n",
0551                 efc_sm_event_name(evt));
0552         efc_fabric_initiate_shutdown(node);
0553         break;
0554 
0555     /* ignore shutdown event as we're already in shutdown path */
0556     case EFC_EVT_SHUTDOWN:
0557         node_printf(node, "Shutdown event received\n");
0558         break;
0559 
0560     default:
0561         __efc_fabric_common(__func__, ctx, evt, arg);
0562     }
0563 }
0564 
0565 void
0566 __efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
0567             enum efc_sm_event evt, void *arg)
0568 {
0569     struct efc_node *node = ctx->app;
0570 
0571     efc_node_evt_set(ctx, evt, __func__);
0572 
0573     node_sm_trace();
0574 
0575     switch (evt) {
0576     case EFC_EVT_SRRS_ELS_REQ_OK:
0577         if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFT_ID,
0578                       __efc_fabric_common, __func__)) {
0579             return;
0580         }
0581         WARN_ON(!node->els_req_cnt);
0582         node->els_req_cnt--;
0583         /* sm: / send RFFID */
0584         efc_ns_send_rffid(node);
0585         efc_node_transition(node, __efc_ns_rffid_wait_rsp, NULL);
0586         break;
0587 
0588     /*
0589      * if receive RSCN just ignore,
0590      * we haven't sent GID_PT yet (ACC sent by fabctl node)
0591      */
0592     case EFC_EVT_RSCN_RCVD:
0593         break;
0594 
0595     default:
0596         __efc_fabric_common(__func__, ctx, evt, arg);
0597     }
0598 }
0599 
0600 void
0601 __efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
0602             enum efc_sm_event evt, void *arg)
0603 {
0604     struct efc_node *node = ctx->app;
0605 
0606     efc_node_evt_set(ctx, evt, __func__);
0607 
0608     node_sm_trace();
0609 
0610     /*
0611      * Waits for an RFFID response event;
0612      * if rscn enabled, a GIDPT name services request is issued.
0613      */
0614     switch (evt) {
0615     case EFC_EVT_SRRS_ELS_REQ_OK:   {
0616         if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_RFF_ID,
0617                       __efc_fabric_common, __func__)) {
0618             return;
0619         }
0620         WARN_ON(!node->els_req_cnt);
0621         node->els_req_cnt--;
0622         if (node->nport->enable_rscn) {
0623             /* sm: if enable_rscn / send GIDPT */
0624             efc_ns_send_gidpt(node);
0625 
0626             efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
0627                         NULL);
0628         } else {
0629             /* if 'T' only, we're done, go to idle */
0630             efc_node_transition(node, __efc_ns_idle, NULL);
0631         }
0632         break;
0633     }
0634     /*
0635      * if receive RSCN just ignore,
0636      * we haven't sent GID_PT yet (ACC sent by fabctl node)
0637      */
0638     case EFC_EVT_RSCN_RCVD:
0639         break;
0640 
0641     default:
0642         __efc_fabric_common(__func__, ctx, evt, arg);
0643     }
0644 }
0645 
0646 static int
0647 efc_process_gidpt_payload(struct efc_node *node,
0648               void *data, u32 gidpt_len)
0649 {
0650     u32 i, j;
0651     struct efc_node *newnode;
0652     struct efc_nport *nport = node->nport;
0653     struct efc *efc = node->efc;
0654     u32 port_id = 0, port_count, plist_count;
0655     struct efc_node *n;
0656     struct efc_node **active_nodes;
0657     int residual;
0658     struct {
0659         struct fc_ct_hdr hdr;
0660         struct fc_gid_pn_resp pn_rsp;
0661     } *rsp;
0662     struct fc_gid_pn_resp *gidpt;
0663     unsigned long index;
0664 
0665     rsp = data;
0666     gidpt = &rsp->pn_rsp;
0667     residual = be16_to_cpu(rsp->hdr.ct_mr_size);
0668 
0669     if (residual != 0)
0670         efc_log_debug(node->efc, "residual is %u words\n", residual);
0671 
0672     if (be16_to_cpu(rsp->hdr.ct_cmd) == FC_FS_RJT) {
0673         node_printf(node,
0674                 "GIDPT request failed: rsn x%x rsn_expl x%x\n",
0675                 rsp->hdr.ct_reason, rsp->hdr.ct_explan);
0676         return -EIO;
0677     }
0678 
0679     plist_count = (gidpt_len - sizeof(struct fc_ct_hdr)) / sizeof(*gidpt);
0680 
0681     /* Count the number of nodes */
0682     port_count = 0;
0683     xa_for_each(&nport->lookup, index, n) {
0684         port_count++;
0685     }
0686 
0687     /* Allocate a buffer for all nodes */
0688     active_nodes = kcalloc(port_count, sizeof(*active_nodes), GFP_ATOMIC);
0689     if (!active_nodes) {
0690         node_printf(node, "efc_malloc failed\n");
0691         return -EIO;
0692     }
0693 
0694     /* Fill buffer with fc_id of active nodes */
0695     i = 0;
0696     xa_for_each(&nport->lookup, index, n) {
0697         port_id = n->rnode.fc_id;
0698         switch (port_id) {
0699         case FC_FID_FLOGI:
0700         case FC_FID_FCTRL:
0701         case FC_FID_DIR_SERV:
0702             break;
0703         default:
0704             if (port_id != FC_FID_DOM_MGR)
0705                 active_nodes[i++] = n;
0706             break;
0707         }
0708     }
0709 
0710     /* update the active nodes buffer */
0711     for (i = 0; i < plist_count; i++) {
0712         hton24(gidpt[i].fp_fid, port_id);
0713 
0714         for (j = 0; j < port_count; j++) {
0715             if (active_nodes[j] &&
0716                 port_id == active_nodes[j]->rnode.fc_id) {
0717                 active_nodes[j] = NULL;
0718             }
0719         }
0720 
0721         if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
0722             break;
0723     }
0724 
0725     /* Those remaining in the active_nodes[] are now gone ! */
0726     for (i = 0; i < port_count; i++) {
0727         /*
0728          * if we're an initiator and the remote node
0729          * is a target, then post the node missing event.
0730          * if we're target and we have enabled
0731          * target RSCN, then post the node missing event.
0732          */
0733         if (!active_nodes[i])
0734             continue;
0735 
0736         if ((node->nport->enable_ini && active_nodes[i]->targ) ||
0737             (node->nport->enable_tgt && enable_target_rscn(efc))) {
0738             efc_node_post_event(active_nodes[i],
0739                         EFC_EVT_NODE_MISSING, NULL);
0740         } else {
0741             node_printf(node,
0742                     "GID_PT: skipping non-tgt port_id x%06x\n",
0743                     active_nodes[i]->rnode.fc_id);
0744         }
0745     }
0746     kfree(active_nodes);
0747 
0748     for (i = 0; i < plist_count; i++) {
0749         hton24(gidpt[i].fp_fid, port_id);
0750 
0751         /* Don't create node for ourselves */
0752         if (port_id == node->rnode.nport->fc_id) {
0753             if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
0754                 break;
0755             continue;
0756         }
0757 
0758         newnode = efc_node_find(nport, port_id);
0759         if (!newnode) {
0760             if (!node->nport->enable_ini)
0761                 continue;
0762 
0763             newnode = efc_node_alloc(nport, port_id, false, false);
0764             if (!newnode) {
0765                 efc_log_err(efc, "efc_node_alloc() failed\n");
0766                 return -EIO;
0767             }
0768             /*
0769              * send PLOGI automatically
0770              * if initiator
0771              */
0772             efc_node_init_device(newnode, true);
0773         }
0774 
0775         if (node->nport->enable_ini && newnode->targ) {
0776             efc_node_post_event(newnode, EFC_EVT_NODE_REFOUND,
0777                         NULL);
0778         }
0779 
0780         if (gidpt[i].fp_resvd & FC_NS_FID_LAST)
0781             break;
0782     }
0783     return 0;
0784 }
0785 
0786 void
0787 __efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
0788             enum efc_sm_event evt, void *arg)
0789 {
0790     struct efc_node_cb *cbdata = arg;
0791     struct efc_node *node = ctx->app;
0792 
0793     efc_node_evt_set(ctx, evt, __func__);
0794 
0795     node_sm_trace();
0796     /*
0797      * Wait for a GIDPT response from the name server. Process the FC_IDs
0798      * that are reported by creating new remote ports, as needed.
0799      */
0800 
0801     switch (evt) {
0802     case EFC_EVT_SRRS_ELS_REQ_OK:   {
0803         if (efc_node_check_ns_req(ctx, evt, arg, FC_NS_GID_PT,
0804                       __efc_fabric_common, __func__)) {
0805             return;
0806         }
0807         WARN_ON(!node->els_req_cnt);
0808         node->els_req_cnt--;
0809         /* sm: / process GIDPT payload */
0810         efc_process_gidpt_payload(node, cbdata->els_rsp.virt,
0811                       cbdata->els_rsp.len);
0812         efc_node_transition(node, __efc_ns_idle, NULL);
0813         break;
0814     }
0815 
0816     case EFC_EVT_SRRS_ELS_REQ_FAIL: {
0817         /* not much we can do; will retry with the next RSCN */
0818         node_printf(node, "GID_PT failed to complete\n");
0819         WARN_ON(!node->els_req_cnt);
0820         node->els_req_cnt--;
0821         efc_node_transition(node, __efc_ns_idle, NULL);
0822         break;
0823     }
0824 
0825     /* if receive RSCN here, queue up another discovery processing */
0826     case EFC_EVT_RSCN_RCVD: {
0827         node_printf(node, "RSCN received during GID_PT processing\n");
0828         node->rscn_pending = true;
0829         break;
0830     }
0831 
0832     default:
0833         __efc_fabric_common(__func__, ctx, evt, arg);
0834     }
0835 }
0836 
0837 void
0838 __efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg)
0839 {
0840     struct efc_node *node = ctx->app;
0841     struct efc *efc = node->efc;
0842 
0843     efc_node_evt_set(ctx, evt, __func__);
0844 
0845     node_sm_trace();
0846 
0847     /*
0848      * Wait for RSCN received events (posted from the fabric controller)
0849      * and restart the GIDPT name services query and processing.
0850      */
0851 
0852     switch (evt) {
0853     case EFC_EVT_ENTER:
0854         if (!node->rscn_pending)
0855             break;
0856 
0857         node_printf(node, "RSCN pending, restart discovery\n");
0858         node->rscn_pending = false;
0859         fallthrough;
0860 
0861     case EFC_EVT_RSCN_RCVD: {
0862         /* sm: / send GIDPT */
0863         /*
0864          * If target RSCN processing is enabled,
0865          * and this is target only (not initiator),
0866          * and tgt_rscn_delay is non-zero,
0867          * then we delay issuing the GID_PT
0868          */
0869         if (efc->tgt_rscn_delay_msec != 0 &&
0870             !node->nport->enable_ini && node->nport->enable_tgt &&
0871             enable_target_rscn(efc)) {
0872             efc_node_transition(node, __efc_ns_gidpt_delay, NULL);
0873         } else {
0874             efc_ns_send_gidpt(node);
0875             efc_node_transition(node, __efc_ns_gidpt_wait_rsp,
0876                         NULL);
0877         }
0878         break;
0879     }
0880 
0881     default:
0882         __efc_fabric_common(__func__, ctx, evt, arg);
0883     }
0884 }
0885 
0886 static void
0887 gidpt_delay_timer_cb(struct timer_list *t)
0888 {
0889     struct efc_node *node = from_timer(node, t, gidpt_delay_timer);
0890 
0891     del_timer(&node->gidpt_delay_timer);
0892 
0893     efc_node_post_event(node, EFC_EVT_GIDPT_DELAY_EXPIRED, NULL);
0894 }
0895 
0896 void
0897 __efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
0898              enum efc_sm_event evt, void *arg)
0899 {
0900     struct efc_node *node = ctx->app;
0901     struct efc *efc = node->efc;
0902 
0903     efc_node_evt_set(ctx, evt, __func__);
0904 
0905     node_sm_trace();
0906 
0907     switch (evt) {
0908     case EFC_EVT_ENTER: {
0909         u64 delay_msec, tmp;
0910 
0911         /*
0912          * Compute the delay time.
0913          * Set to tgt_rscn_delay, if the time since last GIDPT
0914          * is less than tgt_rscn_period, then use tgt_rscn_period.
0915          */
0916         delay_msec = efc->tgt_rscn_delay_msec;
0917         tmp = jiffies_to_msecs(jiffies) - node->time_last_gidpt_msec;
0918         if (tmp < efc->tgt_rscn_period_msec)
0919             delay_msec = efc->tgt_rscn_period_msec;
0920 
0921         timer_setup(&node->gidpt_delay_timer, &gidpt_delay_timer_cb,
0922                 0);
0923         mod_timer(&node->gidpt_delay_timer,
0924               jiffies + msecs_to_jiffies(delay_msec));
0925 
0926         break;
0927     }
0928 
0929     case EFC_EVT_GIDPT_DELAY_EXPIRED:
0930         node->time_last_gidpt_msec = jiffies_to_msecs(jiffies);
0931 
0932         efc_ns_send_gidpt(node);
0933         efc_node_transition(node, __efc_ns_gidpt_wait_rsp, NULL);
0934         break;
0935 
0936     case EFC_EVT_RSCN_RCVD: {
0937         efc_log_debug(efc,
0938                   "RSCN received while in GIDPT delay - no action\n");
0939         break;
0940     }
0941 
0942     default:
0943         __efc_fabric_common(__func__, ctx, evt, arg);
0944     }
0945 }
0946 
0947 void
0948 __efc_fabctl_init(struct efc_sm_ctx *ctx,
0949           enum efc_sm_event evt, void *arg)
0950 {
0951     struct efc_node *node = ctx->app;
0952 
0953     node_sm_trace();
0954 
0955     switch (evt) {
0956     case EFC_EVT_ENTER:
0957         /* no need to login to fabric controller, just send SCR */
0958         efc_send_scr(node);
0959         efc_node_transition(node, __efc_fabctl_wait_scr_rsp, NULL);
0960         break;
0961 
0962     case EFC_EVT_NODE_ATTACH_OK:
0963         node->attached = true;
0964         break;
0965 
0966     default:
0967         __efc_fabric_common(__func__, ctx, evt, arg);
0968     }
0969 }
0970 
0971 void
0972 __efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
0973               enum efc_sm_event evt, void *arg)
0974 {
0975     struct efc_node *node = ctx->app;
0976 
0977     efc_node_evt_set(ctx, evt, __func__);
0978 
0979     node_sm_trace();
0980 
0981     /*
0982      * Fabric controller node state machine:
0983      * Wait for an SCR response from the fabric controller.
0984      */
0985     switch (evt) {
0986     case EFC_EVT_SRRS_ELS_REQ_OK:
0987         if (efc_node_check_els_req(ctx, evt, arg, ELS_SCR,
0988                        __efc_fabric_common, __func__)) {
0989             return;
0990         }
0991         WARN_ON(!node->els_req_cnt);
0992         node->els_req_cnt--;
0993         efc_node_transition(node, __efc_fabctl_ready, NULL);
0994         break;
0995 
0996     default:
0997         __efc_fabric_common(__func__, ctx, evt, arg);
0998     }
0999 }
1000 
1001 static void
1002 efc_process_rscn(struct efc_node *node, struct efc_node_cb *cbdata)
1003 {
1004     struct efc *efc = node->efc;
1005     struct efc_nport *nport = node->nport;
1006     struct efc_node *ns;
1007 
1008     /* Forward this event to the name-services node */
1009     ns = efc_node_find(nport, FC_FID_DIR_SERV);
1010     if (ns)
1011         efc_node_post_event(ns, EFC_EVT_RSCN_RCVD, cbdata);
1012     else
1013         efc_log_warn(efc, "can't find name server node\n");
1014 }
1015 
1016 void
1017 __efc_fabctl_ready(struct efc_sm_ctx *ctx,
1018            enum efc_sm_event evt, void *arg)
1019 {
1020     struct efc_node_cb *cbdata = arg;
1021     struct efc_node *node = ctx->app;
1022 
1023     efc_node_evt_set(ctx, evt, __func__);
1024 
1025     node_sm_trace();
1026 
1027     /*
1028      * Fabric controller node state machine: Ready.
1029      * In this state, the fabric controller sends a RSCN, which is received
1030      * by this node and is forwarded to the name services node object; and
1031      * the RSCN LS_ACC is sent.
1032      */
1033     switch (evt) {
1034     case EFC_EVT_RSCN_RCVD: {
1035         struct fc_frame_header *hdr = cbdata->header->dma.virt;
1036 
1037         /*
1038          * sm: / process RSCN (forward to name services node),
1039          * send LS_ACC
1040          */
1041         efc_process_rscn(node, cbdata);
1042         efc_send_ls_acc(node, be16_to_cpu(hdr->fh_ox_id));
1043         efc_node_transition(node, __efc_fabctl_wait_ls_acc_cmpl,
1044                     NULL);
1045         break;
1046     }
1047 
1048     default:
1049         __efc_fabric_common(__func__, ctx, evt, arg);
1050     }
1051 }
1052 
1053 void
1054 __efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
1055                   enum efc_sm_event evt, void *arg)
1056 {
1057     struct efc_node *node = ctx->app;
1058 
1059     efc_node_evt_set(ctx, evt, __func__);
1060 
1061     node_sm_trace();
1062 
1063     switch (evt) {
1064     case EFC_EVT_ENTER:
1065         efc_node_hold_frames(node);
1066         break;
1067 
1068     case EFC_EVT_EXIT:
1069         efc_node_accept_frames(node);
1070         break;
1071 
1072     case EFC_EVT_SRRS_ELS_CMPL_OK:
1073         WARN_ON(!node->els_cmpl_cnt);
1074         node->els_cmpl_cnt--;
1075         efc_node_transition(node, __efc_fabctl_ready, NULL);
1076         break;
1077 
1078     default:
1079         __efc_fabric_common(__func__, ctx, evt, arg);
1080     }
1081 }
1082 
1083 static uint64_t
1084 efc_get_wwpn(struct fc_els_flogi *sp)
1085 {
1086     return be64_to_cpu(sp->fl_wwnn);
1087 }
1088 
1089 static int
1090 efc_rnode_is_winner(struct efc_nport *nport)
1091 {
1092     struct fc_els_flogi *remote_sp;
1093     u64 remote_wwpn;
1094     u64 local_wwpn = nport->wwpn;
1095     u64 wwn_bump = 0;
1096 
1097     remote_sp = (struct fc_els_flogi *)nport->domain->flogi_service_params;
1098     remote_wwpn = efc_get_wwpn(remote_sp);
1099 
1100     local_wwpn ^= wwn_bump;
1101 
1102     efc_log_debug(nport->efc, "r: %llx\n",
1103               be64_to_cpu(remote_sp->fl_wwpn));
1104     efc_log_debug(nport->efc, "l: %llx\n", local_wwpn);
1105 
1106     if (remote_wwpn == local_wwpn) {
1107         efc_log_warn(nport->efc,
1108                  "WWPN of remote node [%08x %08x] matches local WWPN\n",
1109                  (u32)(local_wwpn >> 32ll),
1110                  (u32)local_wwpn);
1111         return -1;
1112     }
1113 
1114     return (remote_wwpn > local_wwpn);
1115 }
1116 
1117 void
1118 __efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
1119                  enum efc_sm_event evt, void *arg)
1120 {
1121     struct efc_node *node = ctx->app;
1122     struct efc *efc = node->efc;
1123 
1124     efc_node_evt_set(ctx, evt, __func__);
1125 
1126     node_sm_trace();
1127 
1128     switch (evt) {
1129     case EFC_EVT_ENTER:
1130         efc_node_hold_frames(node);
1131         break;
1132 
1133     case EFC_EVT_EXIT:
1134         efc_node_accept_frames(node);
1135         break;
1136 
1137     case EFC_EVT_DOMAIN_ATTACH_OK: {
1138         struct efc_nport *nport = node->nport;
1139         struct efc_node *rnode;
1140 
1141         /*
1142          * this transient node (SID=0 (recv'd FLOGI)
1143          * or DID=fabric (sent FLOGI))
1144          * is the p2p winner, will use a separate node
1145          * to send PLOGI to peer
1146          */
1147         WARN_ON(!node->nport->p2p_winner);
1148 
1149         rnode = efc_node_find(nport, node->nport->p2p_remote_port_id);
1150         if (rnode) {
1151             /*
1152              * the "other" transient p2p node has
1153              * already kicked off the
1154              * new node from which PLOGI is sent
1155              */
1156             node_printf(node,
1157                     "Node with fc_id x%x already exists\n",
1158                     rnode->rnode.fc_id);
1159         } else {
1160             /*
1161              * create new node (SID=1, DID=2)
1162              * from which to send PLOGI
1163              */
1164             rnode = efc_node_alloc(nport,
1165                            nport->p2p_remote_port_id,
1166                         false, false);
1167             if (!rnode) {
1168                 efc_log_err(efc, "node alloc failed\n");
1169                 return;
1170             }
1171 
1172             efc_fabric_notify_topology(node);
1173             /* sm: / allocate p2p remote node */
1174             efc_node_transition(rnode, __efc_p2p_rnode_init,
1175                         NULL);
1176         }
1177 
1178         /*
1179          * the transient node (SID=0 or DID=fabric)
1180          * has served its purpose
1181          */
1182         if (node->rnode.fc_id == 0) {
1183             /*
1184              * if this is the SID=0 node,
1185              * move to the init state in case peer
1186              * has restarted FLOGI discovery and FLOGI is pending
1187              */
1188             /* don't send PLOGI on efc_d_init entry */
1189             efc_node_init_device(node, false);
1190         } else {
1191             /*
1192              * if this is the DID=fabric node
1193              * (we initiated FLOGI), shut it down
1194              */
1195             node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1196             efc_fabric_initiate_shutdown(node);
1197         }
1198         break;
1199     }
1200 
1201     default:
1202         __efc_fabric_common(__func__, ctx, evt, arg);
1203     }
1204 }
1205 
1206 void
1207 __efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
1208              enum efc_sm_event evt, void *arg)
1209 {
1210     struct efc_node_cb *cbdata = arg;
1211     struct efc_node *node = ctx->app;
1212 
1213     efc_node_evt_set(ctx, evt, __func__);
1214 
1215     node_sm_trace();
1216 
1217     switch (evt) {
1218     case EFC_EVT_ENTER:
1219         /* sm: / send PLOGI */
1220         efc_send_plogi(node);
1221         efc_node_transition(node, __efc_p2p_wait_plogi_rsp, NULL);
1222         break;
1223 
1224     case EFC_EVT_ABTS_RCVD:
1225         /* sm: send BA_ACC */
1226         efc_send_bls_acc(node, cbdata->header->dma.virt);
1227 
1228         break;
1229 
1230     default:
1231         __efc_fabric_common(__func__, ctx, evt, arg);
1232     }
1233 }
1234 
1235 void
1236 __efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
1237                   enum efc_sm_event evt, void *arg)
1238 {
1239     struct efc_node_cb *cbdata = arg;
1240     struct efc_node *node = ctx->app;
1241 
1242     efc_node_evt_set(ctx, evt, __func__);
1243 
1244     node_sm_trace();
1245 
1246     switch (evt) {
1247     case EFC_EVT_ENTER:
1248         efc_node_hold_frames(node);
1249         break;
1250 
1251     case EFC_EVT_EXIT:
1252         efc_node_accept_frames(node);
1253         break;
1254 
1255     case EFC_EVT_SRRS_ELS_CMPL_OK:
1256         WARN_ON(!node->els_cmpl_cnt);
1257         node->els_cmpl_cnt--;
1258 
1259         /* sm: if p2p_winner / domain_attach */
1260         if (node->nport->p2p_winner) {
1261             efc_node_transition(node,
1262                         __efc_p2p_wait_domain_attach,
1263                     NULL);
1264             if (!node->nport->domain->attached) {
1265                 node_printf(node, "Domain not attached\n");
1266                 efc_domain_attach(node->nport->domain,
1267                           node->nport->p2p_port_id);
1268             } else {
1269                 node_printf(node, "Domain already attached\n");
1270                 efc_node_post_event(node,
1271                             EFC_EVT_DOMAIN_ATTACH_OK,
1272                             NULL);
1273             }
1274         } else {
1275             /* this node has served its purpose;
1276              * we'll expect a PLOGI on a separate
1277              * node (remote SID=0x1); return this node
1278              * to init state in case peer
1279              * restarts discovery -- it may already
1280              * have (pending frames may exist).
1281              */
1282             /* don't send PLOGI on efc_d_init entry */
1283             efc_node_init_device(node, false);
1284         }
1285         break;
1286 
1287     case EFC_EVT_SRRS_ELS_CMPL_FAIL:
1288         /*
1289          * LS_ACC failed, possibly due to link down;
1290          * shutdown node and wait
1291          * for FLOGI discovery to restart
1292          */
1293         node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
1294         WARN_ON(!node->els_cmpl_cnt);
1295         node->els_cmpl_cnt--;
1296         node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1297         efc_fabric_initiate_shutdown(node);
1298         break;
1299 
1300     case EFC_EVT_ABTS_RCVD: {
1301         /* sm: / send BA_ACC */
1302         efc_send_bls_acc(node, cbdata->header->dma.virt);
1303         break;
1304     }
1305 
1306     default:
1307         __efc_fabric_common(__func__, ctx, evt, arg);
1308     }
1309 }
1310 
1311 void
1312 __efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
1313              enum efc_sm_event evt, void *arg)
1314 {
1315     struct efc_node_cb *cbdata = arg;
1316     struct efc_node *node = ctx->app;
1317 
1318     efc_node_evt_set(ctx, evt, __func__);
1319 
1320     node_sm_trace();
1321 
1322     switch (evt) {
1323     case EFC_EVT_SRRS_ELS_REQ_OK: {
1324         int rc;
1325 
1326         if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1327                        __efc_fabric_common, __func__)) {
1328             return;
1329         }
1330         WARN_ON(!node->els_req_cnt);
1331         node->els_req_cnt--;
1332         /* sm: / save sparams, efc_node_attach */
1333         efc_node_save_sparms(node, cbdata->els_rsp.virt);
1334         rc = efc_node_attach(node);
1335         efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
1336         if (rc < 0)
1337             efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
1338                         NULL);
1339         break;
1340     }
1341     case EFC_EVT_SRRS_ELS_REQ_FAIL: {
1342         if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1343                        __efc_fabric_common, __func__)) {
1344             return;
1345         }
1346         node_printf(node, "PLOGI failed, shutting down\n");
1347         WARN_ON(!node->els_req_cnt);
1348         node->els_req_cnt--;
1349         node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1350         efc_fabric_initiate_shutdown(node);
1351         break;
1352     }
1353 
1354     case EFC_EVT_PLOGI_RCVD: {
1355         struct fc_frame_header *hdr = cbdata->header->dma.virt;
1356         /* if we're in external loopback mode, just send LS_ACC */
1357         if (node->efc->external_loopback) {
1358             efc_send_plogi_acc(node, be16_to_cpu(hdr->fh_ox_id));
1359         } else {
1360             /*
1361              * if this isn't external loopback,
1362              * pass to default handler
1363              */
1364             __efc_fabric_common(__func__, ctx, evt, arg);
1365         }
1366         break;
1367     }
1368     case EFC_EVT_PRLI_RCVD:
1369         /* I, or I+T */
1370         /* sent PLOGI and before completion was seen, received the
1371          * PRLI from the remote node (WCQEs and RCQEs come in on
1372          * different queues and order of processing cannot be assumed)
1373          * Save OXID so PRLI can be sent after the attach and continue
1374          * to wait for PLOGI response
1375          */
1376         efc_process_prli_payload(node, cbdata->payload->dma.virt);
1377         efc_send_ls_acc_after_attach(node,
1378                          cbdata->header->dma.virt,
1379                          EFC_NODE_SEND_LS_ACC_PRLI);
1380         efc_node_transition(node, __efc_p2p_wait_plogi_rsp_recvd_prli,
1381                     NULL);
1382         break;
1383     default:
1384         __efc_fabric_common(__func__, ctx, evt, arg);
1385     }
1386 }
1387 
1388 void
1389 __efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
1390                     enum efc_sm_event evt, void *arg)
1391 {
1392     struct efc_node_cb *cbdata = arg;
1393     struct efc_node *node = ctx->app;
1394 
1395     efc_node_evt_set(ctx, evt, __func__);
1396 
1397     node_sm_trace();
1398 
1399     switch (evt) {
1400     case EFC_EVT_ENTER:
1401         /*
1402          * Since we've received a PRLI, we have a port login and will
1403          * just need to wait for the PLOGI response to do the node
1404          * attach and then we can send the LS_ACC for the PRLI. If,
1405          * during this time, we receive FCP_CMNDs (which is possible
1406          * since we've already sent a PRLI and our peer may have
1407          * accepted).
1408          * At this time, we are not waiting on any other unsolicited
1409          * frames to continue with the login process. Thus, it will not
1410          * hurt to hold frames here.
1411          */
1412         efc_node_hold_frames(node);
1413         break;
1414 
1415     case EFC_EVT_EXIT:
1416         efc_node_accept_frames(node);
1417         break;
1418 
1419     case EFC_EVT_SRRS_ELS_REQ_OK: { /* PLOGI response received */
1420         int rc;
1421 
1422         /* Completion from PLOGI sent */
1423         if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1424                        __efc_fabric_common, __func__)) {
1425             return;
1426         }
1427         WARN_ON(!node->els_req_cnt);
1428         node->els_req_cnt--;
1429         /* sm: / save sparams, efc_node_attach */
1430         efc_node_save_sparms(node, cbdata->els_rsp.virt);
1431         rc = efc_node_attach(node);
1432         efc_node_transition(node, __efc_p2p_wait_node_attach, NULL);
1433         if (rc < 0)
1434             efc_node_post_event(node, EFC_EVT_NODE_ATTACH_FAIL,
1435                         NULL);
1436         break;
1437     }
1438     case EFC_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
1439     case EFC_EVT_SRRS_ELS_REQ_RJT:
1440         /* PLOGI failed, shutdown the node */
1441         if (efc_node_check_els_req(ctx, evt, arg, ELS_PLOGI,
1442                        __efc_fabric_common, __func__)) {
1443             return;
1444         }
1445         WARN_ON(!node->els_req_cnt);
1446         node->els_req_cnt--;
1447         node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1448         efc_fabric_initiate_shutdown(node);
1449         break;
1450 
1451     default:
1452         __efc_fabric_common(__func__, ctx, evt, arg);
1453     }
1454 }
1455 
1456 void
1457 __efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
1458                enum efc_sm_event evt, void *arg)
1459 {
1460     struct efc_node_cb *cbdata = arg;
1461     struct efc_node *node = ctx->app;
1462 
1463     efc_node_evt_set(ctx, evt, __func__);
1464 
1465     node_sm_trace();
1466 
1467     switch (evt) {
1468     case EFC_EVT_ENTER:
1469         efc_node_hold_frames(node);
1470         break;
1471 
1472     case EFC_EVT_EXIT:
1473         efc_node_accept_frames(node);
1474         break;
1475 
1476     case EFC_EVT_NODE_ATTACH_OK:
1477         node->attached = true;
1478         switch (node->send_ls_acc) {
1479         case EFC_NODE_SEND_LS_ACC_PRLI: {
1480             efc_d_send_prli_rsp(node->ls_acc_io,
1481                         node->ls_acc_oxid);
1482             node->send_ls_acc = EFC_NODE_SEND_LS_ACC_NONE;
1483             node->ls_acc_io = NULL;
1484             break;
1485         }
1486         case EFC_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
1487         case EFC_NODE_SEND_LS_ACC_NONE:
1488         default:
1489             /* Normal case for I */
1490             /* sm: send_plogi_acc is not set / send PLOGI acc */
1491             efc_node_transition(node, __efc_d_port_logged_in,
1492                         NULL);
1493             break;
1494         }
1495         break;
1496 
1497     case EFC_EVT_NODE_ATTACH_FAIL:
1498         /* node attach failed, shutdown the node */
1499         node->attached = false;
1500         node_printf(node, "Node attach failed\n");
1501         node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1502         efc_fabric_initiate_shutdown(node);
1503         break;
1504 
1505     case EFC_EVT_SHUTDOWN:
1506         node_printf(node, "%s received\n", efc_sm_event_name(evt));
1507         node->shutdown_reason = EFC_NODE_SHUTDOWN_DEFAULT;
1508         efc_node_transition(node,
1509                     __efc_fabric_wait_attach_evt_shutdown,
1510                      NULL);
1511         break;
1512     case EFC_EVT_PRLI_RCVD:
1513         node_printf(node, "%s: PRLI received before node is attached\n",
1514                 efc_sm_event_name(evt));
1515         efc_process_prli_payload(node, cbdata->payload->dma.virt);
1516         efc_send_ls_acc_after_attach(node,
1517                          cbdata->header->dma.virt,
1518                 EFC_NODE_SEND_LS_ACC_PRLI);
1519         break;
1520 
1521     default:
1522         __efc_fabric_common(__func__, ctx, evt, arg);
1523     }
1524 }
1525 
1526 int
1527 efc_p2p_setup(struct efc_nport *nport)
1528 {
1529     struct efc *efc = nport->efc;
1530     int rnode_winner;
1531 
1532     rnode_winner = efc_rnode_is_winner(nport);
1533 
1534     /* set nport flags to indicate p2p "winner" */
1535     if (rnode_winner == 1) {
1536         nport->p2p_remote_port_id = 0;
1537         nport->p2p_port_id = 0;
1538         nport->p2p_winner = false;
1539     } else if (rnode_winner == 0) {
1540         nport->p2p_remote_port_id = 2;
1541         nport->p2p_port_id = 1;
1542         nport->p2p_winner = true;
1543     } else {
1544         /* no winner; only okay if external loopback enabled */
1545         if (nport->efc->external_loopback) {
1546             /*
1547              * External loopback mode enabled;
1548              * local nport and remote node
1549              * will be registered with an NPortID = 1;
1550              */
1551             efc_log_debug(efc,
1552                       "External loopback mode enabled\n");
1553             nport->p2p_remote_port_id = 1;
1554             nport->p2p_port_id = 1;
1555             nport->p2p_winner = true;
1556         } else {
1557             efc_log_warn(efc,
1558                      "failed to determine p2p winner\n");
1559             return rnode_winner;
1560         }
1561     }
1562     return 0;
1563 }