0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028 #include "efc.h"
0029
0030 void
0031 efc_nport_cb(void *arg, int event, void *data)
0032 {
0033 struct efc *efc = arg;
0034 struct efc_nport *nport = data;
0035 unsigned long flags = 0;
0036
0037 efc_log_debug(efc, "nport event: %s\n", efc_sm_event_name(event));
0038
0039 spin_lock_irqsave(&efc->lock, flags);
0040 efc_sm_post_event(&nport->sm, event, NULL);
0041 spin_unlock_irqrestore(&efc->lock, flags);
0042 }
0043
0044 static struct efc_nport *
0045 efc_nport_find_wwn(struct efc_domain *domain, uint64_t wwnn, uint64_t wwpn)
0046 {
0047 struct efc_nport *nport = NULL;
0048
0049
0050 list_for_each_entry(nport, &domain->nport_list, list_entry) {
0051 if (nport->wwnn == wwnn && nport->wwpn == wwpn)
0052 return nport;
0053 }
0054 return NULL;
0055 }
0056
0057 static void
0058 _efc_nport_free(struct kref *arg)
0059 {
0060 struct efc_nport *nport = container_of(arg, struct efc_nport, ref);
0061
0062 kfree(nport);
0063 }
0064
0065 struct efc_nport *
0066 efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
0067 u32 fc_id, bool enable_ini, bool enable_tgt)
0068 {
0069 struct efc_nport *nport;
0070
0071 if (domain->efc->enable_ini)
0072 enable_ini = 0;
0073
0074
0075 if ((wwpn != 0) || (wwnn != 0)) {
0076 nport = efc_nport_find_wwn(domain, wwnn, wwpn);
0077 if (nport) {
0078 efc_log_err(domain->efc,
0079 "NPORT %016llX %016llX already allocated\n",
0080 wwnn, wwpn);
0081 return NULL;
0082 }
0083 }
0084
0085 nport = kzalloc(sizeof(*nport), GFP_ATOMIC);
0086 if (!nport)
0087 return nport;
0088
0089
0090 kref_init(&nport->ref);
0091 nport->release = _efc_nport_free;
0092
0093 nport->efc = domain->efc;
0094 snprintf(nport->display_name, sizeof(nport->display_name), "------");
0095 nport->domain = domain;
0096 xa_init(&nport->lookup);
0097 nport->instance_index = domain->nport_count++;
0098 nport->sm.app = nport;
0099 nport->enable_ini = enable_ini;
0100 nport->enable_tgt = enable_tgt;
0101 nport->enable_rscn = (nport->enable_ini ||
0102 (nport->enable_tgt && enable_target_rscn(nport->efc)));
0103
0104
0105 memcpy(nport->service_params, domain->service_params,
0106 sizeof(struct fc_els_flogi));
0107
0108
0109 nport->fc_id = fc_id;
0110
0111
0112 nport->wwpn = wwpn;
0113 nport->wwnn = wwnn;
0114 snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), "%016llX",
0115 (unsigned long long)wwnn);
0116
0117
0118
0119
0120
0121 if (list_empty(&domain->nport_list))
0122 domain->nport = nport;
0123
0124 INIT_LIST_HEAD(&nport->list_entry);
0125 list_add_tail(&nport->list_entry, &domain->nport_list);
0126
0127 kref_get(&domain->ref);
0128
0129 efc_log_debug(domain->efc, "New Nport [%s]\n", nport->display_name);
0130
0131 return nport;
0132 }
0133
0134 void
0135 efc_nport_free(struct efc_nport *nport)
0136 {
0137 struct efc_domain *domain;
0138
0139 if (!nport)
0140 return;
0141
0142 domain = nport->domain;
0143 efc_log_debug(domain->efc, "[%s] free nport\n", nport->display_name);
0144 list_del(&nport->list_entry);
0145
0146
0147
0148
0149 if (nport == domain->nport)
0150 domain->nport = NULL;
0151
0152 xa_destroy(&nport->lookup);
0153 xa_erase(&domain->lookup, nport->fc_id);
0154
0155 if (list_empty(&domain->nport_list))
0156 efc_domain_post_event(domain, EFC_EVT_ALL_CHILD_NODES_FREE,
0157 NULL);
0158
0159 kref_put(&domain->ref, domain->release);
0160 kref_put(&nport->ref, nport->release);
0161 }
0162
0163 struct efc_nport *
0164 efc_nport_find(struct efc_domain *domain, u32 d_id)
0165 {
0166 struct efc_nport *nport;
0167
0168
0169 nport = xa_load(&domain->lookup, d_id);
0170 if (!nport || !kref_get_unless_zero(&nport->ref))
0171 return NULL;
0172
0173 return nport;
0174 }
0175
0176 int
0177 efc_nport_attach(struct efc_nport *nport, u32 fc_id)
0178 {
0179 int rc;
0180 struct efc_node *node;
0181 struct efc *efc = nport->efc;
0182 unsigned long index;
0183
0184
0185 rc = xa_err(xa_store(&nport->domain->lookup, fc_id, nport, GFP_ATOMIC));
0186 if (rc) {
0187 efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
0188 return rc;
0189 }
0190
0191
0192 efc_node_fcid_display(fc_id, nport->display_name,
0193 sizeof(nport->display_name));
0194
0195 xa_for_each(&nport->lookup, index, node) {
0196 efc_node_update_display_name(node);
0197 }
0198
0199 efc_log_debug(nport->efc, "[%s] attach nport: fc_id x%06x\n",
0200 nport->display_name, fc_id);
0201
0202
0203 rc = efc_cmd_nport_attach(efc, nport, fc_id);
0204 if (rc < 0) {
0205 efc_log_err(nport->efc,
0206 "efc_hw_port_attach failed: %d\n", rc);
0207 return -EIO;
0208 }
0209 return 0;
0210 }
0211
0212 static void
0213 efc_nport_shutdown(struct efc_nport *nport)
0214 {
0215 struct efc *efc = nport->efc;
0216 struct efc_node *node;
0217 unsigned long index;
0218
0219 xa_for_each(&nport->lookup, index, node) {
0220 if (!(node->rnode.fc_id == FC_FID_FLOGI && nport->is_vport)) {
0221 efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
0222 continue;
0223 }
0224
0225
0226
0227
0228
0229
0230
0231 if (efc->link_status == EFC_LINK_STATUS_DOWN) {
0232 efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
0233 continue;
0234 }
0235
0236 efc_log_debug(efc, "[%s] nport shutdown vport, send logo\n",
0237 node->display_name);
0238
0239 if (!efc_send_logo(node)) {
0240
0241 efc_node_transition(node, __efc_d_wait_logo_rsp, NULL);
0242 continue;
0243 }
0244
0245
0246
0247
0248
0249 node_printf(node, "Failed to send LOGO\n");
0250 efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
0251 }
0252 }
0253
0254 static void
0255 efc_vport_link_down(struct efc_nport *nport)
0256 {
0257 struct efc *efc = nport->efc;
0258 struct efc_vport *vport;
0259
0260
0261 list_for_each_entry(vport, &efc->vport_list, list_entry) {
0262 if (vport->nport == nport) {
0263 kref_put(&nport->ref, nport->release);
0264 vport->nport = NULL;
0265 break;
0266 }
0267 }
0268 }
0269
0270 static void
0271 __efc_nport_common(const char *funcname, struct efc_sm_ctx *ctx,
0272 enum efc_sm_event evt, void *arg)
0273 {
0274 struct efc_nport *nport = ctx->app;
0275 struct efc_domain *domain = nport->domain;
0276 struct efc *efc = nport->efc;
0277
0278 switch (evt) {
0279 case EFC_EVT_ENTER:
0280 case EFC_EVT_REENTER:
0281 case EFC_EVT_EXIT:
0282 case EFC_EVT_ALL_CHILD_NODES_FREE:
0283 break;
0284 case EFC_EVT_NPORT_ATTACH_OK:
0285 efc_sm_transition(ctx, __efc_nport_attached, NULL);
0286 break;
0287 case EFC_EVT_SHUTDOWN:
0288
0289 nport->shutting_down = true;
0290
0291 if (nport->is_vport)
0292 efc_vport_link_down(nport);
0293
0294 if (xa_empty(&nport->lookup)) {
0295
0296 xa_erase(&domain->lookup, nport->fc_id);
0297 efc_sm_transition(ctx, __efc_nport_wait_port_free,
0298 NULL);
0299 if (efc_cmd_nport_free(efc, nport)) {
0300 efc_log_debug(nport->efc,
0301 "efc_hw_port_free failed\n");
0302
0303 efc_nport_free(nport);
0304 }
0305 } else {
0306
0307 efc_sm_transition(ctx,
0308 __efc_nport_wait_shutdown, NULL);
0309 efc_nport_shutdown(nport);
0310 }
0311 break;
0312 default:
0313 efc_log_debug(nport->efc, "[%s] %-20s %-20s not handled\n",
0314 nport->display_name, funcname,
0315 efc_sm_event_name(evt));
0316 }
0317 }
0318
0319 void
0320 __efc_nport_allocated(struct efc_sm_ctx *ctx,
0321 enum efc_sm_event evt, void *arg)
0322 {
0323 struct efc_nport *nport = ctx->app;
0324 struct efc_domain *domain = nport->domain;
0325
0326 nport_sm_trace(nport);
0327
0328 switch (evt) {
0329
0330 case EFC_EVT_NPORT_ATTACH_OK:
0331 WARN_ON(nport != domain->nport);
0332 efc_sm_transition(ctx, __efc_nport_attached, NULL);
0333 break;
0334
0335 case EFC_EVT_NPORT_ALLOC_OK:
0336
0337 break;
0338 default:
0339 __efc_nport_common(__func__, ctx, evt, arg);
0340 }
0341 }
0342
0343 void
0344 __efc_nport_vport_init(struct efc_sm_ctx *ctx,
0345 enum efc_sm_event evt, void *arg)
0346 {
0347 struct efc_nport *nport = ctx->app;
0348 struct efc *efc = nport->efc;
0349
0350 nport_sm_trace(nport);
0351
0352 switch (evt) {
0353 case EFC_EVT_ENTER: {
0354 __be64 be_wwpn = cpu_to_be64(nport->wwpn);
0355
0356 if (nport->wwpn == 0)
0357 efc_log_debug(efc, "vport: letting f/w select WWN\n");
0358
0359 if (nport->fc_id != U32_MAX) {
0360 efc_log_debug(efc, "vport: hard coding port id: %x\n",
0361 nport->fc_id);
0362 }
0363
0364 efc_sm_transition(ctx, __efc_nport_vport_wait_alloc, NULL);
0365
0366 if (efc_cmd_nport_alloc(efc, nport, nport->domain,
0367 nport->wwpn == 0 ? NULL :
0368 (uint8_t *)&be_wwpn)) {
0369 efc_log_err(efc, "Can't allocate port\n");
0370 break;
0371 }
0372
0373 break;
0374 }
0375 default:
0376 __efc_nport_common(__func__, ctx, evt, arg);
0377 }
0378 }
0379
0380 void
0381 __efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
0382 enum efc_sm_event evt, void *arg)
0383 {
0384 struct efc_nport *nport = ctx->app;
0385 struct efc *efc = nport->efc;
0386
0387 nport_sm_trace(nport);
0388
0389 switch (evt) {
0390 case EFC_EVT_NPORT_ALLOC_OK: {
0391 struct fc_els_flogi *sp;
0392
0393 sp = (struct fc_els_flogi *)nport->service_params;
0394
0395 if (nport->wwnn == 0) {
0396 nport->wwnn = be64_to_cpu(nport->sli_wwnn);
0397 nport->wwpn = be64_to_cpu(nport->sli_wwpn);
0398 snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
0399 "%016llX", nport->wwpn);
0400 }
0401
0402
0403 sp->fl_wwpn = cpu_to_be64(nport->wwpn);
0404 sp->fl_wwnn = cpu_to_be64(nport->wwnn);
0405
0406
0407
0408
0409
0410
0411
0412
0413 if (nport->fc_id == U32_MAX) {
0414 struct efc_node *fabric;
0415
0416 fabric = efc_node_alloc(nport, FC_FID_FLOGI, false,
0417 false);
0418 if (!fabric) {
0419 efc_log_err(efc, "efc_node_alloc() failed\n");
0420 return;
0421 }
0422 efc_node_transition(fabric, __efc_vport_fabric_init,
0423 NULL);
0424 } else {
0425 snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
0426 "%016llX", nport->wwpn);
0427 efc_nport_attach(nport, nport->fc_id);
0428 }
0429 efc_sm_transition(ctx, __efc_nport_vport_allocated, NULL);
0430 break;
0431 }
0432 default:
0433 __efc_nport_common(__func__, ctx, evt, arg);
0434 }
0435 }
0436
0437 void
0438 __efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
0439 enum efc_sm_event evt, void *arg)
0440 {
0441 struct efc_nport *nport = ctx->app;
0442 struct efc *efc = nport->efc;
0443
0444 nport_sm_trace(nport);
0445
0446
0447
0448
0449
0450
0451
0452 switch (evt) {
0453 case EFC_EVT_NPORT_ATTACH_OK: {
0454 struct efc_node *node;
0455
0456
0457 node = efc_node_find(nport, FC_FID_FLOGI);
0458 if (!node) {
0459 efc_log_debug(efc, "can't find node %06x\n", FC_FID_FLOGI);
0460 break;
0461 }
0462
0463 efc_node_post_event(node, evt, NULL);
0464 efc_sm_transition(ctx, __efc_nport_attached, NULL);
0465 break;
0466 }
0467 default:
0468 __efc_nport_common(__func__, ctx, evt, arg);
0469 }
0470 }
0471
0472 static void
0473 efc_vport_update_spec(struct efc_nport *nport)
0474 {
0475 struct efc *efc = nport->efc;
0476 struct efc_vport *vport;
0477 unsigned long flags = 0;
0478
0479 spin_lock_irqsave(&efc->vport_lock, flags);
0480 list_for_each_entry(vport, &efc->vport_list, list_entry) {
0481 if (vport->nport == nport) {
0482 vport->wwnn = nport->wwnn;
0483 vport->wwpn = nport->wwpn;
0484 vport->tgt_data = nport->tgt_data;
0485 vport->ini_data = nport->ini_data;
0486 break;
0487 }
0488 }
0489 spin_unlock_irqrestore(&efc->vport_lock, flags);
0490 }
0491
0492 void
0493 __efc_nport_attached(struct efc_sm_ctx *ctx,
0494 enum efc_sm_event evt, void *arg)
0495 {
0496 struct efc_nport *nport = ctx->app;
0497 struct efc *efc = nport->efc;
0498
0499 nport_sm_trace(nport);
0500
0501 switch (evt) {
0502 case EFC_EVT_ENTER: {
0503 struct efc_node *node;
0504 unsigned long index;
0505
0506 efc_log_debug(efc,
0507 "[%s] NPORT attached WWPN %016llX WWNN %016llX\n",
0508 nport->display_name,
0509 nport->wwpn, nport->wwnn);
0510
0511 xa_for_each(&nport->lookup, index, node)
0512 efc_node_update_display_name(node);
0513
0514 efc->tt.new_nport(efc, nport);
0515
0516
0517
0518
0519
0520 if (nport->is_vport)
0521 efc_vport_update_spec(nport);
0522 break;
0523 }
0524
0525 case EFC_EVT_EXIT:
0526 efc_log_debug(efc,
0527 "[%s] NPORT deattached WWPN %016llX WWNN %016llX\n",
0528 nport->display_name,
0529 nport->wwpn, nport->wwnn);
0530
0531 efc->tt.del_nport(efc, nport);
0532 break;
0533 default:
0534 __efc_nport_common(__func__, ctx, evt, arg);
0535 }
0536 }
0537
0538 void
0539 __efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
0540 enum efc_sm_event evt, void *arg)
0541 {
0542 struct efc_nport *nport = ctx->app;
0543 struct efc_domain *domain = nport->domain;
0544 struct efc *efc = nport->efc;
0545
0546 nport_sm_trace(nport);
0547
0548 switch (evt) {
0549 case EFC_EVT_NPORT_ALLOC_OK:
0550 case EFC_EVT_NPORT_ALLOC_FAIL:
0551 case EFC_EVT_NPORT_ATTACH_OK:
0552 case EFC_EVT_NPORT_ATTACH_FAIL:
0553
0554 break;
0555
0556 case EFC_EVT_ALL_CHILD_NODES_FREE: {
0557
0558
0559
0560
0561 xa_erase(&domain->lookup, nport->fc_id);
0562 efc_sm_transition(ctx, __efc_nport_wait_port_free, NULL);
0563 if (efc_cmd_nport_free(efc, nport)) {
0564 efc_log_err(nport->efc, "efc_hw_port_free failed\n");
0565
0566 efc_nport_free(nport);
0567 }
0568 break;
0569 }
0570 default:
0571 __efc_nport_common(__func__, ctx, evt, arg);
0572 }
0573 }
0574
0575 void
0576 __efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
0577 enum efc_sm_event evt, void *arg)
0578 {
0579 struct efc_nport *nport = ctx->app;
0580
0581 nport_sm_trace(nport);
0582
0583 switch (evt) {
0584 case EFC_EVT_NPORT_ATTACH_OK:
0585
0586 break;
0587 case EFC_EVT_NPORT_FREE_OK: {
0588
0589 efc_nport_free(nport);
0590 break;
0591 }
0592 default:
0593 __efc_nport_common(__func__, ctx, evt, arg);
0594 }
0595 }
0596
0597 static int
0598 efc_vport_nport_alloc(struct efc_domain *domain, struct efc_vport *vport)
0599 {
0600 struct efc_nport *nport;
0601
0602 lockdep_assert_held(&domain->efc->lock);
0603
0604 nport = efc_nport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id,
0605 vport->enable_ini, vport->enable_tgt);
0606 vport->nport = nport;
0607 if (!nport)
0608 return -EIO;
0609
0610 kref_get(&nport->ref);
0611 nport->is_vport = true;
0612 nport->tgt_data = vport->tgt_data;
0613 nport->ini_data = vport->ini_data;
0614
0615 efc_sm_transition(&nport->sm, __efc_nport_vport_init, NULL);
0616
0617 return 0;
0618 }
0619
0620 int
0621 efc_vport_start(struct efc_domain *domain)
0622 {
0623 struct efc *efc = domain->efc;
0624 struct efc_vport *vport;
0625 struct efc_vport *next;
0626 int rc = 0;
0627 unsigned long flags = 0;
0628
0629
0630 spin_lock_irqsave(&efc->vport_lock, flags);
0631 list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
0632 if (!vport->nport) {
0633 if (efc_vport_nport_alloc(domain, vport))
0634 rc = -EIO;
0635 }
0636 }
0637 spin_unlock_irqrestore(&efc->vport_lock, flags);
0638
0639 return rc;
0640 }
0641
0642 int
0643 efc_nport_vport_new(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
0644 u32 fc_id, bool ini, bool tgt, void *tgt_data,
0645 void *ini_data)
0646 {
0647 struct efc *efc = domain->efc;
0648 struct efc_vport *vport;
0649 int rc = 0;
0650 unsigned long flags = 0;
0651
0652 if (ini && domain->efc->enable_ini == 0) {
0653 efc_log_debug(efc, "driver initiator mode not enabled\n");
0654 return -EIO;
0655 }
0656
0657 if (tgt && domain->efc->enable_tgt == 0) {
0658 efc_log_debug(efc, "driver target mode not enabled\n");
0659 return -EIO;
0660 }
0661
0662
0663
0664
0665
0666 vport = efc_vport_create_spec(domain->efc, wwnn, wwpn, fc_id, ini, tgt,
0667 tgt_data, ini_data);
0668 if (!vport) {
0669 efc_log_err(efc, "failed to create vport object entry\n");
0670 return -EIO;
0671 }
0672
0673 spin_lock_irqsave(&efc->lock, flags);
0674 rc = efc_vport_nport_alloc(domain, vport);
0675 spin_unlock_irqrestore(&efc->lock, flags);
0676
0677 return rc;
0678 }
0679
0680 int
0681 efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
0682 u64 wwpn, uint64_t wwnn)
0683 {
0684 struct efc_nport *nport;
0685 struct efc_vport *vport;
0686 struct efc_vport *next;
0687 unsigned long flags = 0;
0688
0689 spin_lock_irqsave(&efc->vport_lock, flags);
0690
0691 list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
0692 if (vport->wwpn == wwpn && vport->wwnn == wwnn) {
0693 list_del(&vport->list_entry);
0694 kfree(vport);
0695 break;
0696 }
0697 }
0698 spin_unlock_irqrestore(&efc->vport_lock, flags);
0699
0700 if (!domain) {
0701
0702 return 0;
0703 }
0704
0705 spin_lock_irqsave(&efc->lock, flags);
0706 list_for_each_entry(nport, &domain->nport_list, list_entry) {
0707 if (nport->wwpn == wwpn && nport->wwnn == wwnn) {
0708 kref_put(&nport->ref, nport->release);
0709
0710 efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
0711 break;
0712 }
0713 }
0714
0715 spin_unlock_irqrestore(&efc->lock, flags);
0716 return 0;
0717 }
0718
0719 void
0720 efc_vport_del_all(struct efc *efc)
0721 {
0722 struct efc_vport *vport;
0723 struct efc_vport *next;
0724 unsigned long flags = 0;
0725
0726 spin_lock_irqsave(&efc->vport_lock, flags);
0727 list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
0728 list_del(&vport->list_entry);
0729 kfree(vport);
0730 }
0731 spin_unlock_irqrestore(&efc->vport_lock, flags);
0732 }
0733
0734 struct efc_vport *
0735 efc_vport_create_spec(struct efc *efc, uint64_t wwnn, uint64_t wwpn,
0736 u32 fc_id, bool enable_ini,
0737 bool enable_tgt, void *tgt_data, void *ini_data)
0738 {
0739 struct efc_vport *vport;
0740 unsigned long flags = 0;
0741
0742
0743
0744
0745
0746
0747 spin_lock_irqsave(&efc->vport_lock, flags);
0748 list_for_each_entry(vport, &efc->vport_list, list_entry) {
0749 if ((wwpn && vport->wwpn == wwpn) &&
0750 (wwnn && vport->wwnn == wwnn)) {
0751 efc_log_err(efc,
0752 "VPORT %016llX %016llX already allocated\n",
0753 wwnn, wwpn);
0754 spin_unlock_irqrestore(&efc->vport_lock, flags);
0755 return NULL;
0756 }
0757 }
0758
0759 vport = kzalloc(sizeof(*vport), GFP_ATOMIC);
0760 if (!vport) {
0761 spin_unlock_irqrestore(&efc->vport_lock, flags);
0762 return NULL;
0763 }
0764
0765 vport->wwnn = wwnn;
0766 vport->wwpn = wwpn;
0767 vport->fc_id = fc_id;
0768 vport->enable_tgt = enable_tgt;
0769 vport->enable_ini = enable_ini;
0770 vport->tgt_data = tgt_data;
0771 vport->ini_data = ini_data;
0772
0773 INIT_LIST_HEAD(&vport->list_entry);
0774 list_add_tail(&vport->list_entry, &efc->vport_list);
0775 spin_unlock_irqrestore(&efc->vport_lock, flags);
0776 return vport;
0777 }