0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/timer.h>
0015 #include <linux/slab.h>
0016 #include <linux/err.h>
0017 #include <linux/export.h>
0018 #include <linux/log2.h>
0019
0020 #include <scsi/fc/fc_fc2.h>
0021
0022 #include <scsi/libfc.h>
0023
0024 #include "fc_libfc.h"
0025
0026 u16 fc_cpu_mask;
0027 EXPORT_SYMBOL(fc_cpu_mask);
0028 static u16 fc_cpu_order;
0029 static struct kmem_cache *fc_em_cachep;
0030 static struct workqueue_struct *fc_exch_workqueue;
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058 struct fc_exch_pool {
0059 spinlock_t lock;
0060 struct list_head ex_list;
0061 u16 next_index;
0062 u16 total_exches;
0063
0064 u16 left;
0065 u16 right;
0066 } ____cacheline_aligned_in_smp;
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083 struct fc_exch_mgr {
0084 struct fc_exch_pool __percpu *pool;
0085 mempool_t *ep_pool;
0086 struct fc_lport *lport;
0087 enum fc_class class;
0088 struct kref kref;
0089 u16 min_xid;
0090 u16 max_xid;
0091 u16 pool_max_index;
0092
0093 struct {
0094 atomic_t no_free_exch;
0095 atomic_t no_free_exch_xid;
0096 atomic_t xid_not_found;
0097 atomic_t xid_busy;
0098 atomic_t seq_not_found;
0099 atomic_t non_bls_resp;
0100 } stats;
0101 };
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114
0115 struct fc_exch_mgr_anchor {
0116 struct list_head ema_list;
0117 struct fc_exch_mgr *mp;
0118 bool (*match)(struct fc_frame *);
0119 };
0120
0121 static void fc_exch_rrq(struct fc_exch *);
0122 static void fc_seq_ls_acc(struct fc_frame *);
0123 static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
0124 enum fc_els_rjt_explan);
0125 static void fc_exch_els_rec(struct fc_frame *);
0126 static void fc_exch_els_rrq(struct fc_frame *);
0127
0128
0129
0130
0131
0132
0133
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206
0207
0208
0209
0210 static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
0211
0212
0213
0214
0215
0216
0217
0218
0219
0220
0221 static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
0222 unsigned int max_index)
0223 {
0224 const char *name = NULL;
0225
0226 if (op < max_index)
0227 name = table[op];
0228 if (!name)
0229 name = "unknown";
0230 return name;
0231 }
0232
0233
0234
0235
0236
0237 static const char *fc_exch_rctl_name(unsigned int op)
0238 {
0239 return fc_exch_name_lookup(op, fc_exch_rctl_names,
0240 ARRAY_SIZE(fc_exch_rctl_names));
0241 }
0242
0243
0244
0245
0246
0247 static inline void fc_exch_hold(struct fc_exch *ep)
0248 {
0249 atomic_inc(&ep->ex_refcnt);
0250 }
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262 static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
0263 u32 f_ctl)
0264 {
0265 struct fc_frame_header *fh = fc_frame_header_get(fp);
0266 u16 fill;
0267
0268 fr_sof(fp) = ep->class;
0269 if (ep->seq.cnt)
0270 fr_sof(fp) = fc_sof_normal(ep->class);
0271
0272 if (f_ctl & FC_FC_END_SEQ) {
0273 fr_eof(fp) = FC_EOF_T;
0274 if (fc_sof_needs_ack((enum fc_sof)ep->class))
0275 fr_eof(fp) = FC_EOF_N;
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285 fill = fr_len(fp) & 3;
0286 if (fill) {
0287 fill = 4 - fill;
0288
0289 skb_put(fp_skb(fp), fill);
0290 hton24(fh->fh_f_ctl, f_ctl | fill);
0291 }
0292 } else {
0293 WARN_ON(fr_len(fp) % 4 != 0);
0294 fr_eof(fp) = FC_EOF_N;
0295 }
0296
0297
0298 fh->fh_ox_id = htons(ep->oxid);
0299 fh->fh_rx_id = htons(ep->rxid);
0300 fh->fh_seq_id = ep->seq.id;
0301 fh->fh_seq_cnt = htons(ep->seq.cnt);
0302 }
0303
0304
0305
0306
0307
0308
0309
0310
0311 static void fc_exch_release(struct fc_exch *ep)
0312 {
0313 struct fc_exch_mgr *mp;
0314
0315 if (atomic_dec_and_test(&ep->ex_refcnt)) {
0316 mp = ep->em;
0317 if (ep->destructor)
0318 ep->destructor(&ep->seq, ep->arg);
0319 WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
0320 mempool_free(ep, mp->ep_pool);
0321 }
0322 }
0323
0324
0325
0326
0327
0328 static inline void fc_exch_timer_cancel(struct fc_exch *ep)
0329 {
0330 if (cancel_delayed_work(&ep->timeout_work)) {
0331 FC_EXCH_DBG(ep, "Exchange timer canceled\n");
0332 atomic_dec(&ep->ex_refcnt);
0333 }
0334 }
0335
0336
0337
0338
0339
0340
0341
0342
0343
0344
0345 static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
0346 unsigned int timer_msec)
0347 {
0348 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
0349 return;
0350
0351 FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
0352
0353 fc_exch_hold(ep);
0354 if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
0355 msecs_to_jiffies(timer_msec))) {
0356 FC_EXCH_DBG(ep, "Exchange already queued\n");
0357 fc_exch_release(ep);
0358 }
0359 }
0360
0361
0362
0363
0364
0365
0366 static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
0367 {
0368 spin_lock_bh(&ep->ex_lock);
0369 fc_exch_timer_set_locked(ep, timer_msec);
0370 spin_unlock_bh(&ep->ex_lock);
0371 }
0372
0373
0374
0375
0376
0377
0378
0379 static int fc_exch_done_locked(struct fc_exch *ep)
0380 {
0381 int rc = 1;
0382
0383
0384
0385
0386
0387
0388
0389 if (ep->state & FC_EX_DONE)
0390 return rc;
0391 ep->esb_stat |= ESB_ST_COMPLETE;
0392
0393 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
0394 ep->state |= FC_EX_DONE;
0395 fc_exch_timer_cancel(ep);
0396 rc = 0;
0397 }
0398 return rc;
0399 }
0400
0401 static struct fc_exch fc_quarantine_exch;
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412 static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
0413 u16 index)
0414 {
0415 struct fc_exch **exches = (struct fc_exch **)(pool + 1);
0416 return exches[index];
0417 }
0418
0419
0420
0421
0422
0423
0424
0425 static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
0426 struct fc_exch *ep)
0427 {
0428 ((struct fc_exch **)(pool + 1))[index] = ep;
0429 }
0430
0431
0432
0433
0434
0435 static void fc_exch_delete(struct fc_exch *ep)
0436 {
0437 struct fc_exch_pool *pool;
0438 u16 index;
0439
0440 pool = ep->pool;
0441 spin_lock_bh(&pool->lock);
0442 WARN_ON(pool->total_exches <= 0);
0443 pool->total_exches--;
0444
0445
0446 index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
0447 if (!(ep->state & FC_EX_QUARANTINE)) {
0448 if (pool->left == FC_XID_UNKNOWN)
0449 pool->left = index;
0450 else if (pool->right == FC_XID_UNKNOWN)
0451 pool->right = index;
0452 else
0453 pool->next_index = index;
0454 fc_exch_ptr_set(pool, index, NULL);
0455 } else {
0456 fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
0457 }
0458 list_del(&ep->ex_list);
0459 spin_unlock_bh(&pool->lock);
0460 fc_exch_release(ep);
0461 }
0462
0463 static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
0464 struct fc_frame *fp)
0465 {
0466 struct fc_exch *ep;
0467 struct fc_frame_header *fh = fc_frame_header_get(fp);
0468 int error = -ENXIO;
0469 u32 f_ctl;
0470 u8 fh_type = fh->fh_type;
0471
0472 ep = fc_seq_exch(sp);
0473
0474 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
0475 fc_frame_free(fp);
0476 goto out;
0477 }
0478
0479 WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
0480
0481 f_ctl = ntoh24(fh->fh_f_ctl);
0482 fc_exch_setup_hdr(ep, fp, f_ctl);
0483 fr_encaps(fp) = ep->encaps;
0484
0485
0486
0487
0488
0489
0490 if (fr_max_payload(fp))
0491 sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
0492 fr_max_payload(fp));
0493 else
0494 sp->cnt++;
0495
0496
0497
0498
0499 error = lport->tt.frame_send(lport, fp);
0500
0501 if (fh_type == FC_TYPE_BLS)
0502 goto out;
0503
0504
0505
0506
0507
0508
0509 ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;
0510 if (f_ctl & FC_FC_SEQ_INIT)
0511 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
0512 out:
0513 return error;
0514 }
0515
0516
0517
0518
0519
0520
0521
0522
0523
0524
0525 int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
0526 {
0527 struct fc_exch *ep;
0528 int error;
0529 ep = fc_seq_exch(sp);
0530 spin_lock_bh(&ep->ex_lock);
0531 error = fc_seq_send_locked(lport, sp, fp);
0532 spin_unlock_bh(&ep->ex_lock);
0533 return error;
0534 }
0535 EXPORT_SYMBOL(fc_seq_send);
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546 static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
0547 {
0548 struct fc_seq *sp;
0549
0550 sp = &ep->seq;
0551 sp->ssb_stat = 0;
0552 sp->cnt = 0;
0553 sp->id = seq_id;
0554 return sp;
0555 }
0556
0557
0558
0559
0560
0561
0562 static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
0563 {
0564 struct fc_exch *ep = fc_seq_exch(sp);
0565
0566 sp = fc_seq_alloc(ep, ep->seq_id++);
0567 FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
0568 ep->f_ctl, sp->id);
0569 return sp;
0570 }
0571
0572
0573
0574
0575
0576
0577 struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
0578 {
0579 struct fc_exch *ep = fc_seq_exch(sp);
0580
0581 spin_lock_bh(&ep->ex_lock);
0582 sp = fc_seq_start_next_locked(sp);
0583 spin_unlock_bh(&ep->ex_lock);
0584
0585 return sp;
0586 }
0587 EXPORT_SYMBOL(fc_seq_start_next);
0588
0589
0590
0591
0592
0593
0594 void fc_seq_set_resp(struct fc_seq *sp,
0595 void (*resp)(struct fc_seq *, struct fc_frame *, void *),
0596 void *arg)
0597 {
0598 struct fc_exch *ep = fc_seq_exch(sp);
0599 DEFINE_WAIT(wait);
0600
0601 spin_lock_bh(&ep->ex_lock);
0602 while (ep->resp_active && ep->resp_task != current) {
0603 prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
0604 spin_unlock_bh(&ep->ex_lock);
0605
0606 schedule();
0607
0608 spin_lock_bh(&ep->ex_lock);
0609 }
0610 finish_wait(&ep->resp_wq, &wait);
0611 ep->resp = resp;
0612 ep->arg = arg;
0613 spin_unlock_bh(&ep->ex_lock);
0614 }
0615 EXPORT_SYMBOL(fc_seq_set_resp);
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627
0628
0629
0630
0631
0632
0633 static int fc_exch_abort_locked(struct fc_exch *ep,
0634 unsigned int timer_msec)
0635 {
0636 struct fc_seq *sp;
0637 struct fc_frame *fp;
0638 int error;
0639
0640 FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
0641 if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
0642 ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
0643 FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
0644 ep->esb_stat, ep->state);
0645 return -ENXIO;
0646 }
0647
0648
0649
0650
0651 sp = fc_seq_start_next_locked(&ep->seq);
0652 if (!sp)
0653 return -ENOMEM;
0654
0655 if (timer_msec)
0656 fc_exch_timer_set_locked(ep, timer_msec);
0657
0658 if (ep->sid) {
0659
0660
0661
0662 fp = fc_frame_alloc(ep->lp, 0);
0663 if (fp) {
0664 ep->esb_stat |= ESB_ST_SEQ_INIT;
0665 fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
0666 FC_TYPE_BLS, FC_FC_END_SEQ |
0667 FC_FC_SEQ_INIT, 0);
0668 error = fc_seq_send_locked(ep->lp, sp, fp);
0669 } else {
0670 error = -ENOBUFS;
0671 }
0672 } else {
0673
0674
0675
0676
0677 error = 0;
0678 }
0679 ep->esb_stat |= ESB_ST_ABNORMAL;
0680 return error;
0681 }
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692 int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
0693 {
0694 struct fc_exch *ep;
0695 int error;
0696
0697 ep = fc_seq_exch(req_sp);
0698 spin_lock_bh(&ep->ex_lock);
0699 error = fc_exch_abort_locked(ep, timer_msec);
0700 spin_unlock_bh(&ep->ex_lock);
0701 return error;
0702 }
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729 static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
0730 struct fc_frame *fp)
0731 {
0732 void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
0733 void *arg;
0734 bool res = false;
0735
0736 spin_lock_bh(&ep->ex_lock);
0737 ep->resp_active++;
0738 if (ep->resp_task != current)
0739 ep->resp_task = !ep->resp_task ? current : NULL;
0740 resp = ep->resp;
0741 arg = ep->arg;
0742 spin_unlock_bh(&ep->ex_lock);
0743
0744 if (resp) {
0745 resp(sp, fp, arg);
0746 res = true;
0747 }
0748
0749 spin_lock_bh(&ep->ex_lock);
0750 if (--ep->resp_active == 0)
0751 ep->resp_task = NULL;
0752 spin_unlock_bh(&ep->ex_lock);
0753
0754 if (ep->resp_active == 0)
0755 wake_up(&ep->resp_wq);
0756
0757 return res;
0758 }
0759
0760
0761
0762
0763
0764 static void fc_exch_timeout(struct work_struct *work)
0765 {
0766 struct fc_exch *ep = container_of(work, struct fc_exch,
0767 timeout_work.work);
0768 struct fc_seq *sp = &ep->seq;
0769 u32 e_stat;
0770 int rc = 1;
0771
0772 FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
0773
0774 spin_lock_bh(&ep->ex_lock);
0775 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
0776 goto unlock;
0777
0778 e_stat = ep->esb_stat;
0779 if (e_stat & ESB_ST_COMPLETE) {
0780 ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
0781 spin_unlock_bh(&ep->ex_lock);
0782 if (e_stat & ESB_ST_REC_QUAL)
0783 fc_exch_rrq(ep);
0784 goto done;
0785 } else {
0786 if (e_stat & ESB_ST_ABNORMAL)
0787 rc = fc_exch_done_locked(ep);
0788 spin_unlock_bh(&ep->ex_lock);
0789 if (!rc)
0790 fc_exch_delete(ep);
0791 fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
0792 fc_seq_set_resp(sp, NULL, ep->arg);
0793 fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
0794 goto done;
0795 }
0796 unlock:
0797 spin_unlock_bh(&ep->ex_lock);
0798 done:
0799
0800
0801
0802 fc_exch_release(ep);
0803 }
0804
0805
0806
0807
0808
0809
0810
0811
0812 static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
0813 struct fc_exch_mgr *mp)
0814 {
0815 struct fc_exch *ep;
0816 unsigned int cpu;
0817 u16 index;
0818 struct fc_exch_pool *pool;
0819
0820
0821 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
0822 if (!ep) {
0823 atomic_inc(&mp->stats.no_free_exch);
0824 goto out;
0825 }
0826 memset(ep, 0, sizeof(*ep));
0827
0828 cpu = raw_smp_processor_id();
0829 pool = per_cpu_ptr(mp->pool, cpu);
0830 spin_lock_bh(&pool->lock);
0831
0832
0833 if (pool->left != FC_XID_UNKNOWN) {
0834 if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
0835 index = pool->left;
0836 pool->left = FC_XID_UNKNOWN;
0837 goto hit;
0838 }
0839 }
0840 if (pool->right != FC_XID_UNKNOWN) {
0841 if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
0842 index = pool->right;
0843 pool->right = FC_XID_UNKNOWN;
0844 goto hit;
0845 }
0846 }
0847
0848 index = pool->next_index;
0849
0850 while (fc_exch_ptr_get(pool, index)) {
0851 index = index == mp->pool_max_index ? 0 : index + 1;
0852 if (index == pool->next_index)
0853 goto err;
0854 }
0855 pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
0856 hit:
0857 fc_exch_hold(ep);
0858 spin_lock_init(&ep->ex_lock);
0859
0860
0861
0862
0863
0864 spin_lock_bh(&ep->ex_lock);
0865
0866 fc_exch_ptr_set(pool, index, ep);
0867 list_add_tail(&ep->ex_list, &pool->ex_list);
0868 fc_seq_alloc(ep, ep->seq_id++);
0869 pool->total_exches++;
0870 spin_unlock_bh(&pool->lock);
0871
0872
0873
0874
0875 ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
0876 ep->em = mp;
0877 ep->pool = pool;
0878 ep->lp = lport;
0879 ep->f_ctl = FC_FC_FIRST_SEQ;
0880 ep->rxid = FC_XID_UNKNOWN;
0881 ep->class = mp->class;
0882 ep->resp_active = 0;
0883 init_waitqueue_head(&ep->resp_wq);
0884 INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
0885 out:
0886 return ep;
0887 err:
0888 spin_unlock_bh(&pool->lock);
0889 atomic_inc(&mp->stats.no_free_exch_xid);
0890 mempool_free(ep, mp->ep_pool);
0891 return NULL;
0892 }
0893
0894
0895
0896
0897
0898
0899
0900
0901
0902
0903
0904
0905 static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
0906 struct fc_frame *fp)
0907 {
0908 struct fc_exch_mgr_anchor *ema;
0909 struct fc_exch *ep;
0910
0911 list_for_each_entry(ema, &lport->ema_list, ema_list) {
0912 if (!ema->match || ema->match(fp)) {
0913 ep = fc_exch_em_alloc(lport, ema->mp);
0914 if (ep)
0915 return ep;
0916 }
0917 }
0918 return NULL;
0919 }
0920
0921
0922
0923
0924
0925
0926 static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
0927 {
0928 struct fc_lport *lport = mp->lport;
0929 struct fc_exch_pool *pool;
0930 struct fc_exch *ep = NULL;
0931 u16 cpu = xid & fc_cpu_mask;
0932
0933 if (xid == FC_XID_UNKNOWN)
0934 return NULL;
0935
0936 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
0937 pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
0938 lport->host->host_no, lport->port_id, xid, cpu);
0939 return NULL;
0940 }
0941
0942 if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
0943 pool = per_cpu_ptr(mp->pool, cpu);
0944 spin_lock_bh(&pool->lock);
0945 ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
0946 if (ep == &fc_quarantine_exch) {
0947 FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
0948 ep = NULL;
0949 }
0950 if (ep) {
0951 WARN_ON(ep->xid != xid);
0952 fc_exch_hold(ep);
0953 }
0954 spin_unlock_bh(&pool->lock);
0955 }
0956 return ep;
0957 }
0958
0959
0960
0961
0962
0963
0964
0965
0966
0967 void fc_exch_done(struct fc_seq *sp)
0968 {
0969 struct fc_exch *ep = fc_seq_exch(sp);
0970 int rc;
0971
0972 spin_lock_bh(&ep->ex_lock);
0973 rc = fc_exch_done_locked(ep);
0974 spin_unlock_bh(&ep->ex_lock);
0975
0976 fc_seq_set_resp(sp, NULL, ep->arg);
0977 if (!rc)
0978 fc_exch_delete(ep);
0979 }
0980 EXPORT_SYMBOL(fc_exch_done);
0981
0982
0983
0984
0985
0986
0987
0988
0989
0990 static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
0991 struct fc_exch_mgr *mp,
0992 struct fc_frame *fp)
0993 {
0994 struct fc_exch *ep;
0995 struct fc_frame_header *fh;
0996
0997 ep = fc_exch_alloc(lport, fp);
0998 if (ep) {
0999 ep->class = fc_frame_class(fp);
1000
1001
1002
1003
1004 ep->f_ctl |= FC_FC_EX_CTX;
1005 ep->f_ctl &= ~FC_FC_FIRST_SEQ;
1006 fh = fc_frame_header_get(fp);
1007 ep->sid = ntoh24(fh->fh_d_id);
1008 ep->did = ntoh24(fh->fh_s_id);
1009 ep->oid = ep->did;
1010
1011
1012
1013
1014
1015
1016 ep->rxid = ep->xid;
1017 ep->oxid = ntohs(fh->fh_ox_id);
1018 ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
1019 if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
1020 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
1021
1022 fc_exch_hold(ep);
1023 spin_unlock_bh(&ep->ex_lock);
1024 }
1025 return ep;
1026 }
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038 static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
1039 struct fc_exch_mgr *mp,
1040 struct fc_frame *fp)
1041 {
1042 struct fc_frame_header *fh = fc_frame_header_get(fp);
1043 struct fc_exch *ep = NULL;
1044 struct fc_seq *sp = NULL;
1045 enum fc_pf_rjt_reason reject = FC_RJT_NONE;
1046 u32 f_ctl;
1047 u16 xid;
1048
1049 f_ctl = ntoh24(fh->fh_f_ctl);
1050 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
1051
1052
1053
1054
1055 if (f_ctl & FC_FC_EX_CTX) {
1056 xid = ntohs(fh->fh_ox_id);
1057 ep = fc_exch_find(mp, xid);
1058 if (!ep) {
1059 atomic_inc(&mp->stats.xid_not_found);
1060 reject = FC_RJT_OX_ID;
1061 goto out;
1062 }
1063 if (ep->rxid == FC_XID_UNKNOWN)
1064 ep->rxid = ntohs(fh->fh_rx_id);
1065 else if (ep->rxid != ntohs(fh->fh_rx_id)) {
1066 reject = FC_RJT_OX_ID;
1067 goto rel;
1068 }
1069 } else {
1070 xid = ntohs(fh->fh_rx_id);
1071
1072
1073
1074
1075
1076
1077 if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
1078 fc_frame_payload_op(fp) == ELS_TEST) {
1079 fh->fh_rx_id = htons(FC_XID_UNKNOWN);
1080 xid = FC_XID_UNKNOWN;
1081 }
1082
1083
1084
1085
1086 ep = fc_exch_find(mp, xid);
1087 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
1088 if (ep) {
1089 atomic_inc(&mp->stats.xid_busy);
1090 reject = FC_RJT_RX_ID;
1091 goto rel;
1092 }
1093 ep = fc_exch_resp(lport, mp, fp);
1094 if (!ep) {
1095 reject = FC_RJT_EXCH_EST;
1096 goto out;
1097 }
1098 xid = ep->xid;
1099 } else if (!ep) {
1100 atomic_inc(&mp->stats.xid_not_found);
1101 reject = FC_RJT_RX_ID;
1102 goto out;
1103 }
1104 }
1105
1106 spin_lock_bh(&ep->ex_lock);
1107
1108
1109
1110
1111 if (fc_sof_is_init(fr_sof(fp))) {
1112 sp = &ep->seq;
1113 sp->ssb_stat |= SSB_ST_RESP;
1114 sp->id = fh->fh_seq_id;
1115 } else {
1116 sp = &ep->seq;
1117 if (sp->id != fh->fh_seq_id) {
1118 atomic_inc(&mp->stats.seq_not_found);
1119 if (f_ctl & FC_FC_END_SEQ) {
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134 sp->ssb_stat |= SSB_ST_RESP;
1135 sp->id = fh->fh_seq_id;
1136 } else {
1137 spin_unlock_bh(&ep->ex_lock);
1138
1139
1140 reject = FC_RJT_SEQ_ID;
1141 goto rel;
1142 }
1143 }
1144 }
1145 WARN_ON(ep != fc_seq_exch(sp));
1146
1147 if (f_ctl & FC_FC_SEQ_INIT)
1148 ep->esb_stat |= ESB_ST_SEQ_INIT;
1149 spin_unlock_bh(&ep->ex_lock);
1150
1151 fr_seq(fp) = sp;
1152 out:
1153 return reject;
1154 rel:
1155 fc_exch_done(&ep->seq);
1156 fc_exch_release(ep);
1157 return reject;
1158 }
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
1169 struct fc_frame *fp)
1170 {
1171 struct fc_frame_header *fh = fc_frame_header_get(fp);
1172 struct fc_exch *ep;
1173 struct fc_seq *sp = NULL;
1174 u32 f_ctl;
1175 u16 xid;
1176
1177 f_ctl = ntoh24(fh->fh_f_ctl);
1178 WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
1179 xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
1180 ep = fc_exch_find(mp, xid);
1181 if (!ep)
1182 return NULL;
1183 if (ep->seq.id == fh->fh_seq_id) {
1184
1185
1186
1187 sp = &ep->seq;
1188 if ((f_ctl & FC_FC_EX_CTX) != 0 &&
1189 ep->rxid == FC_XID_UNKNOWN) {
1190 ep->rxid = ntohs(fh->fh_rx_id);
1191 }
1192 }
1193 fc_exch_release(ep);
1194 return sp;
1195 }
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 static void fc_exch_set_addr(struct fc_exch *ep,
1206 u32 orig_id, u32 resp_id)
1207 {
1208 ep->oid = orig_id;
1209 if (ep->esb_stat & ESB_ST_RESP) {
1210 ep->sid = resp_id;
1211 ep->did = orig_id;
1212 } else {
1213 ep->sid = orig_id;
1214 ep->did = resp_id;
1215 }
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227 void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
1228 struct fc_seq_els_data *els_data)
1229 {
1230 switch (els_cmd) {
1231 case ELS_LS_RJT:
1232 fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
1233 break;
1234 case ELS_LS_ACC:
1235 fc_seq_ls_acc(fp);
1236 break;
1237 case ELS_RRQ:
1238 fc_exch_els_rrq(fp);
1239 break;
1240 case ELS_REC:
1241 fc_exch_els_rec(fp);
1242 break;
1243 default:
1244 FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
1245 }
1246 }
1247 EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
1248
1249
1250
1251
1252
1253
1254
1255
1256 static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
1257 enum fc_rctl rctl, enum fc_fh_type fh_type)
1258 {
1259 u32 f_ctl;
1260 struct fc_exch *ep = fc_seq_exch(sp);
1261
1262 f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1263 f_ctl |= ep->f_ctl;
1264 fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
1265 fc_seq_send_locked(ep->lp, sp, fp);
1266 }
1267
1268
1269
1270
1271
1272
1273
1274
1275 static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
1276 {
1277 struct fc_frame *fp;
1278 struct fc_frame_header *rx_fh;
1279 struct fc_frame_header *fh;
1280 struct fc_exch *ep = fc_seq_exch(sp);
1281 struct fc_lport *lport = ep->lp;
1282 unsigned int f_ctl;
1283
1284
1285
1286
1287 if (fc_sof_needs_ack(fr_sof(rx_fp))) {
1288 fp = fc_frame_alloc(lport, 0);
1289 if (!fp) {
1290 FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
1291 return;
1292 }
1293
1294 fh = fc_frame_header_get(fp);
1295 fh->fh_r_ctl = FC_RCTL_ACK_1;
1296 fh->fh_type = FC_TYPE_BLS;
1297
1298
1299
1300
1301
1302
1303
1304
1305 rx_fh = fc_frame_header_get(rx_fp);
1306 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1307 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1308 FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
1309 FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
1310 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1311 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1312 hton24(fh->fh_f_ctl, f_ctl);
1313
1314 fc_exch_setup_hdr(ep, fp, f_ctl);
1315 fh->fh_seq_id = rx_fh->fh_seq_id;
1316 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1317 fh->fh_parm_offset = htonl(1);
1318
1319 fr_sof(fp) = fr_sof(rx_fp);
1320 if (f_ctl & FC_FC_END_SEQ)
1321 fr_eof(fp) = FC_EOF_T;
1322 else
1323 fr_eof(fp) = FC_EOF_N;
1324
1325 lport->tt.frame_send(lport, fp);
1326 }
1327 }
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337 static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
1338 enum fc_ba_rjt_reason reason,
1339 enum fc_ba_rjt_explan explan)
1340 {
1341 struct fc_frame *fp;
1342 struct fc_frame_header *rx_fh;
1343 struct fc_frame_header *fh;
1344 struct fc_ba_rjt *rp;
1345 struct fc_seq *sp;
1346 struct fc_lport *lport;
1347 unsigned int f_ctl;
1348
1349 lport = fr_dev(rx_fp);
1350 sp = fr_seq(rx_fp);
1351 fp = fc_frame_alloc(lport, sizeof(*rp));
1352 if (!fp) {
1353 FC_EXCH_DBG(fc_seq_exch(sp),
1354 "Drop BA_RJT request, out of memory\n");
1355 return;
1356 }
1357 fh = fc_frame_header_get(fp);
1358 rx_fh = fc_frame_header_get(rx_fp);
1359
1360 memset(fh, 0, sizeof(*fh) + sizeof(*rp));
1361
1362 rp = fc_frame_payload_get(fp, sizeof(*rp));
1363 rp->br_reason = reason;
1364 rp->br_explan = explan;
1365
1366
1367
1368
1369 memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
1370 memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
1371 fh->fh_ox_id = rx_fh->fh_ox_id;
1372 fh->fh_rx_id = rx_fh->fh_rx_id;
1373 fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
1374 fh->fh_r_ctl = FC_RCTL_BA_RJT;
1375 fh->fh_type = FC_TYPE_BLS;
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385 f_ctl = ntoh24(rx_fh->fh_f_ctl);
1386 f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
1387 FC_FC_END_CONN | FC_FC_SEQ_INIT |
1388 FC_FC_RETX_SEQ | FC_FC_UNI_TX;
1389 f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
1390 f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
1391 f_ctl &= ~FC_FC_FIRST_SEQ;
1392 hton24(fh->fh_f_ctl, f_ctl);
1393
1394 fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
1395 fr_eof(fp) = FC_EOF_T;
1396 if (fc_sof_needs_ack(fr_sof(fp)))
1397 fr_eof(fp) = FC_EOF_N;
1398
1399 lport->tt.frame_send(lport, fp);
1400 }
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411 static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
1412 {
1413 struct fc_frame *fp;
1414 struct fc_ba_acc *ap;
1415 struct fc_frame_header *fh;
1416 struct fc_seq *sp;
1417
1418 if (!ep)
1419 goto reject;
1420
1421 FC_EXCH_DBG(ep, "exch: ABTS received\n");
1422 fp = fc_frame_alloc(ep->lp, sizeof(*ap));
1423 if (!fp) {
1424 FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
1425 goto free;
1426 }
1427
1428 spin_lock_bh(&ep->ex_lock);
1429 if (ep->esb_stat & ESB_ST_COMPLETE) {
1430 spin_unlock_bh(&ep->ex_lock);
1431 FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
1432 fc_frame_free(fp);
1433 goto reject;
1434 }
1435 if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
1436 ep->esb_stat |= ESB_ST_REC_QUAL;
1437 fc_exch_hold(ep);
1438 }
1439 fc_exch_timer_set_locked(ep, ep->r_a_tov);
1440 fh = fc_frame_header_get(fp);
1441 ap = fc_frame_payload_get(fp, sizeof(*ap));
1442 memset(ap, 0, sizeof(*ap));
1443 sp = &ep->seq;
1444 ap->ba_high_seq_cnt = htons(0xffff);
1445 if (sp->ssb_stat & SSB_ST_RESP) {
1446 ap->ba_seq_id = sp->id;
1447 ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
1448 ap->ba_high_seq_cnt = fh->fh_seq_cnt;
1449 ap->ba_low_seq_cnt = htons(sp->cnt);
1450 }
1451 sp = fc_seq_start_next_locked(sp);
1452 fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
1453 ep->esb_stat |= ESB_ST_ABNORMAL;
1454 spin_unlock_bh(&ep->ex_lock);
1455
1456 free:
1457 fc_frame_free(rx_fp);
1458 return;
1459
1460 reject:
1461 fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
1462 goto free;
1463 }
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474 struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
1475 {
1476 struct fc_exch_mgr_anchor *ema;
1477
1478 WARN_ON(lport != fr_dev(fp));
1479 WARN_ON(fr_seq(fp));
1480 fr_seq(fp) = NULL;
1481
1482 list_for_each_entry(ema, &lport->ema_list, ema_list)
1483 if ((!ema->match || ema->match(fp)) &&
1484 fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
1485 break;
1486 return fr_seq(fp);
1487 }
1488 EXPORT_SYMBOL(fc_seq_assign);
1489
1490
1491
1492
1493
1494 void fc_seq_release(struct fc_seq *sp)
1495 {
1496 fc_exch_release(fc_seq_exch(sp));
1497 }
1498 EXPORT_SYMBOL(fc_seq_release);
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509 static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
1510 struct fc_frame *fp)
1511 {
1512 struct fc_frame_header *fh = fc_frame_header_get(fp);
1513 struct fc_seq *sp = NULL;
1514 struct fc_exch *ep = NULL;
1515 enum fc_pf_rjt_reason reject;
1516
1517
1518
1519
1520 lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
1521 if (!lport) {
1522 fc_frame_free(fp);
1523 return;
1524 }
1525 fr_dev(fp) = lport;
1526
1527 BUG_ON(fr_seq(fp));
1528
1529
1530
1531
1532
1533 if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
1534 return fc_lport_recv(lport, fp);
1535
1536 reject = fc_seq_lookup_recip(lport, mp, fp);
1537 if (reject == FC_RJT_NONE) {
1538 sp = fr_seq(fp);
1539 ep = fc_seq_exch(sp);
1540 fc_seq_send_ack(sp, fp);
1541 ep->encaps = fr_encaps(fp);
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 if (!fc_invoke_resp(ep, sp, fp))
1555 fc_lport_recv(lport, fp);
1556 fc_exch_release(ep);
1557 } else {
1558 FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
1559 reject);
1560 fc_frame_free(fp);
1561 }
1562 }
1563
1564
1565
1566
1567
1568
1569
1570
1571 static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1572 {
1573 struct fc_frame_header *fh = fc_frame_header_get(fp);
1574 struct fc_seq *sp;
1575 struct fc_exch *ep;
1576 enum fc_sof sof;
1577 u32 f_ctl;
1578 int rc;
1579
1580 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
1581 if (!ep) {
1582 atomic_inc(&mp->stats.xid_not_found);
1583 goto out;
1584 }
1585 if (ep->esb_stat & ESB_ST_COMPLETE) {
1586 atomic_inc(&mp->stats.xid_not_found);
1587 goto rel;
1588 }
1589 if (ep->rxid == FC_XID_UNKNOWN)
1590 ep->rxid = ntohs(fh->fh_rx_id);
1591 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
1592 atomic_inc(&mp->stats.xid_not_found);
1593 goto rel;
1594 }
1595 if (ep->did != ntoh24(fh->fh_s_id) &&
1596 ep->did != FC_FID_FLOGI) {
1597 atomic_inc(&mp->stats.xid_not_found);
1598 goto rel;
1599 }
1600 sof = fr_sof(fp);
1601 sp = &ep->seq;
1602 if (fc_sof_is_init(sof)) {
1603 sp->ssb_stat |= SSB_ST_RESP;
1604 sp->id = fh->fh_seq_id;
1605 }
1606
1607 f_ctl = ntoh24(fh->fh_f_ctl);
1608 fr_seq(fp) = sp;
1609
1610 spin_lock_bh(&ep->ex_lock);
1611 if (f_ctl & FC_FC_SEQ_INIT)
1612 ep->esb_stat |= ESB_ST_SEQ_INIT;
1613 spin_unlock_bh(&ep->ex_lock);
1614
1615 if (fc_sof_needs_ack(sof))
1616 fc_seq_send_ack(sp, fp);
1617
1618 if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
1619 (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
1620 (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
1621 spin_lock_bh(&ep->ex_lock);
1622 rc = fc_exch_done_locked(ep);
1623 WARN_ON(fc_seq_exch(sp) != ep);
1624 spin_unlock_bh(&ep->ex_lock);
1625 if (!rc) {
1626 fc_exch_delete(ep);
1627 } else {
1628 FC_EXCH_DBG(ep, "ep is completed already,"
1629 "hence skip calling the resp\n");
1630 goto skip_resp;
1631 }
1632 }
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647 if (!fc_invoke_resp(ep, sp, fp))
1648 fc_frame_free(fp);
1649
1650 skip_resp:
1651 fc_exch_release(ep);
1652 return;
1653 rel:
1654 fc_exch_release(ep);
1655 out:
1656 fc_frame_free(fp);
1657 }
1658
1659
1660
1661
1662
1663
1664
1665 static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1666 {
1667 struct fc_seq *sp;
1668
1669 sp = fc_seq_lookup_orig(mp, fp);
1670
1671 if (!sp)
1672 atomic_inc(&mp->stats.xid_not_found);
1673 else
1674 atomic_inc(&mp->stats.non_bls_resp);
1675
1676 fc_frame_free(fp);
1677 }
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687 static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1688 {
1689 struct fc_frame_header *fh;
1690 struct fc_ba_acc *ap;
1691 struct fc_seq *sp;
1692 u16 low;
1693 u16 high;
1694 int rc = 1, has_rec = 0;
1695
1696 fh = fc_frame_header_get(fp);
1697 FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
1698 fc_exch_rctl_name(fh->fh_r_ctl));
1699
1700 if (cancel_delayed_work_sync(&ep->timeout_work)) {
1701 FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
1702 fc_exch_release(ep);
1703 return;
1704 }
1705
1706 spin_lock_bh(&ep->ex_lock);
1707 switch (fh->fh_r_ctl) {
1708 case FC_RCTL_BA_ACC:
1709 ap = fc_frame_payload_get(fp, sizeof(*ap));
1710 if (!ap)
1711 break;
1712
1713
1714
1715
1716
1717
1718 low = ntohs(ap->ba_low_seq_cnt);
1719 high = ntohs(ap->ba_high_seq_cnt);
1720 if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
1721 (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
1722 ap->ba_seq_id == ep->seq_id) && low != high) {
1723 ep->esb_stat |= ESB_ST_REC_QUAL;
1724 fc_exch_hold(ep);
1725 has_rec = 1;
1726 }
1727 break;
1728 case FC_RCTL_BA_RJT:
1729 break;
1730 default:
1731 break;
1732 }
1733
1734
1735
1736
1737 sp = &ep->seq;
1738
1739
1740
1741 if (ep->fh_type != FC_TYPE_FCP &&
1742 ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
1743 rc = fc_exch_done_locked(ep);
1744 spin_unlock_bh(&ep->ex_lock);
1745
1746 fc_exch_hold(ep);
1747 if (!rc)
1748 fc_exch_delete(ep);
1749 if (!fc_invoke_resp(ep, sp, fp))
1750 fc_frame_free(fp);
1751 if (has_rec)
1752 fc_exch_timer_set(ep, ep->r_a_tov);
1753 fc_exch_release(ep);
1754 }
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764 static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
1765 {
1766 struct fc_frame_header *fh;
1767 struct fc_exch *ep;
1768 u32 f_ctl;
1769
1770 fh = fc_frame_header_get(fp);
1771 f_ctl = ntoh24(fh->fh_f_ctl);
1772 fr_seq(fp) = NULL;
1773
1774 ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
1775 ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
1776 if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
1777 spin_lock_bh(&ep->ex_lock);
1778 ep->esb_stat |= ESB_ST_SEQ_INIT;
1779 spin_unlock_bh(&ep->ex_lock);
1780 }
1781 if (f_ctl & FC_FC_SEQ_CTX) {
1782
1783
1784
1785
1786 switch (fh->fh_r_ctl) {
1787 case FC_RCTL_ACK_1:
1788 case FC_RCTL_ACK_0:
1789 break;
1790 default:
1791 if (ep)
1792 FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
1793 fh->fh_r_ctl,
1794 fc_exch_rctl_name(fh->fh_r_ctl));
1795 break;
1796 }
1797 fc_frame_free(fp);
1798 } else {
1799 switch (fh->fh_r_ctl) {
1800 case FC_RCTL_BA_RJT:
1801 case FC_RCTL_BA_ACC:
1802 if (ep)
1803 fc_exch_abts_resp(ep, fp);
1804 else
1805 fc_frame_free(fp);
1806 break;
1807 case FC_RCTL_BA_ABTS:
1808 if (ep)
1809 fc_exch_recv_abts(ep, fp);
1810 else
1811 fc_frame_free(fp);
1812 break;
1813 default:
1814 fc_frame_free(fp);
1815 break;
1816 }
1817 }
1818 if (ep)
1819 fc_exch_release(ep);
1820 }
1821
1822
1823
1824
1825
1826
1827
1828
1829 static void fc_seq_ls_acc(struct fc_frame *rx_fp)
1830 {
1831 struct fc_lport *lport;
1832 struct fc_els_ls_acc *acc;
1833 struct fc_frame *fp;
1834 struct fc_seq *sp;
1835
1836 lport = fr_dev(rx_fp);
1837 sp = fr_seq(rx_fp);
1838 fp = fc_frame_alloc(lport, sizeof(*acc));
1839 if (!fp) {
1840 FC_EXCH_DBG(fc_seq_exch(sp),
1841 "exch: drop LS_ACC, out of memory\n");
1842 return;
1843 }
1844 acc = fc_frame_payload_get(fp, sizeof(*acc));
1845 memset(acc, 0, sizeof(*acc));
1846 acc->la_cmd = ELS_LS_ACC;
1847 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1848 lport->tt.frame_send(lport, fp);
1849 }
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860 static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
1861 enum fc_els_rjt_explan explan)
1862 {
1863 struct fc_lport *lport;
1864 struct fc_els_ls_rjt *rjt;
1865 struct fc_frame *fp;
1866 struct fc_seq *sp;
1867
1868 lport = fr_dev(rx_fp);
1869 sp = fr_seq(rx_fp);
1870 fp = fc_frame_alloc(lport, sizeof(*rjt));
1871 if (!fp) {
1872 FC_EXCH_DBG(fc_seq_exch(sp),
1873 "exch: drop LS_ACC, out of memory\n");
1874 return;
1875 }
1876 rjt = fc_frame_payload_get(fp, sizeof(*rjt));
1877 memset(rjt, 0, sizeof(*rjt));
1878 rjt->er_cmd = ELS_LS_RJT;
1879 rjt->er_reason = reason;
1880 rjt->er_explan = explan;
1881 fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
1882 lport->tt.frame_send(lport, fp);
1883 }
1884
1885
1886
1887
1888
1889
1890
1891 static void fc_exch_reset(struct fc_exch *ep)
1892 {
1893 struct fc_seq *sp;
1894 int rc = 1;
1895
1896 spin_lock_bh(&ep->ex_lock);
1897 ep->state |= FC_EX_RST_CLEANUP;
1898 fc_exch_timer_cancel(ep);
1899 if (ep->esb_stat & ESB_ST_REC_QUAL)
1900 atomic_dec(&ep->ex_refcnt);
1901 ep->esb_stat &= ~ESB_ST_REC_QUAL;
1902 sp = &ep->seq;
1903 rc = fc_exch_done_locked(ep);
1904 spin_unlock_bh(&ep->ex_lock);
1905
1906 fc_exch_hold(ep);
1907
1908 if (!rc) {
1909 fc_exch_delete(ep);
1910 } else {
1911 FC_EXCH_DBG(ep, "ep is completed already,"
1912 "hence skip calling the resp\n");
1913 goto skip_resp;
1914 }
1915
1916 fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
1917 skip_resp:
1918 fc_seq_set_resp(sp, NULL, ep->arg);
1919 fc_exch_release(ep);
1920 }
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934 static void fc_exch_pool_reset(struct fc_lport *lport,
1935 struct fc_exch_pool *pool,
1936 u32 sid, u32 did)
1937 {
1938 struct fc_exch *ep;
1939 struct fc_exch *next;
1940
1941 spin_lock_bh(&pool->lock);
1942 restart:
1943 list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
1944 if ((lport == ep->lp) &&
1945 (sid == 0 || sid == ep->sid) &&
1946 (did == 0 || did == ep->did)) {
1947 fc_exch_hold(ep);
1948 spin_unlock_bh(&pool->lock);
1949
1950 fc_exch_reset(ep);
1951
1952 fc_exch_release(ep);
1953 spin_lock_bh(&pool->lock);
1954
1955
1956
1957
1958
1959 goto restart;
1960 }
1961 }
1962 pool->next_index = 0;
1963 pool->left = FC_XID_UNKNOWN;
1964 pool->right = FC_XID_UNKNOWN;
1965 spin_unlock_bh(&pool->lock);
1966 }
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979 void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
1980 {
1981 struct fc_exch_mgr_anchor *ema;
1982 unsigned int cpu;
1983
1984 list_for_each_entry(ema, &lport->ema_list, ema_list) {
1985 for_each_possible_cpu(cpu)
1986 fc_exch_pool_reset(lport,
1987 per_cpu_ptr(ema->mp->pool, cpu),
1988 sid, did);
1989 }
1990 }
1991 EXPORT_SYMBOL(fc_exch_mgr_reset);
1992
1993
1994
1995
1996
1997
1998
1999
2000 static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
2001 {
2002 struct fc_exch_mgr_anchor *ema;
2003
2004 list_for_each_entry(ema, &lport->ema_list, ema_list)
2005 if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
2006 return fc_exch_find(ema->mp, xid);
2007 return NULL;
2008 }
2009
2010
2011
2012
2013
2014
2015
2016 static void fc_exch_els_rec(struct fc_frame *rfp)
2017 {
2018 struct fc_lport *lport;
2019 struct fc_frame *fp;
2020 struct fc_exch *ep;
2021 struct fc_els_rec *rp;
2022 struct fc_els_rec_acc *acc;
2023 enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
2024 enum fc_els_rjt_explan explan;
2025 u32 sid;
2026 u16 xid, rxid, oxid;
2027
2028 lport = fr_dev(rfp);
2029 rp = fc_frame_payload_get(rfp, sizeof(*rp));
2030 explan = ELS_EXPL_INV_LEN;
2031 if (!rp)
2032 goto reject;
2033 sid = ntoh24(rp->rec_s_id);
2034 rxid = ntohs(rp->rec_rx_id);
2035 oxid = ntohs(rp->rec_ox_id);
2036
2037 explan = ELS_EXPL_OXID_RXID;
2038 if (sid == fc_host_port_id(lport->host))
2039 xid = oxid;
2040 else
2041 xid = rxid;
2042 if (xid == FC_XID_UNKNOWN) {
2043 FC_LPORT_DBG(lport,
2044 "REC request from %x: invalid rxid %x oxid %x\n",
2045 sid, rxid, oxid);
2046 goto reject;
2047 }
2048 ep = fc_exch_lookup(lport, xid);
2049 if (!ep) {
2050 FC_LPORT_DBG(lport,
2051 "REC request from %x: rxid %x oxid %x not found\n",
2052 sid, rxid, oxid);
2053 goto reject;
2054 }
2055 FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
2056 sid, rxid, oxid);
2057 if (ep->oid != sid || oxid != ep->oxid)
2058 goto rel;
2059 if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
2060 goto rel;
2061 fp = fc_frame_alloc(lport, sizeof(*acc));
2062 if (!fp) {
2063 FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
2064 goto out;
2065 }
2066
2067 acc = fc_frame_payload_get(fp, sizeof(*acc));
2068 memset(acc, 0, sizeof(*acc));
2069 acc->reca_cmd = ELS_LS_ACC;
2070 acc->reca_ox_id = rp->rec_ox_id;
2071 memcpy(acc->reca_ofid, rp->rec_s_id, 3);
2072 acc->reca_rx_id = htons(ep->rxid);
2073 if (ep->sid == ep->oid)
2074 hton24(acc->reca_rfid, ep->did);
2075 else
2076 hton24(acc->reca_rfid, ep->sid);
2077 acc->reca_fc4value = htonl(ep->seq.rec_data);
2078 acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
2079 ESB_ST_SEQ_INIT |
2080 ESB_ST_COMPLETE));
2081 fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
2082 lport->tt.frame_send(lport, fp);
2083 out:
2084 fc_exch_release(ep);
2085 return;
2086
2087 rel:
2088 fc_exch_release(ep);
2089 reject:
2090 fc_seq_ls_rjt(rfp, reason, explan);
2091 }
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101 static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
2102 {
2103 struct fc_exch *aborted_ep = arg;
2104 unsigned int op;
2105
2106 if (IS_ERR(fp)) {
2107 int err = PTR_ERR(fp);
2108
2109 if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
2110 goto cleanup;
2111 FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
2112 "frame error %d\n", err);
2113 return;
2114 }
2115
2116 op = fc_frame_payload_op(fp);
2117 fc_frame_free(fp);
2118
2119 switch (op) {
2120 case ELS_LS_RJT:
2121 FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
2122 fallthrough;
2123 case ELS_LS_ACC:
2124 goto cleanup;
2125 default:
2126 FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
2127 op);
2128 return;
2129 }
2130
2131 cleanup:
2132 fc_exch_done(&aborted_ep->seq);
2133
2134 fc_exch_release(aborted_ep);
2135 }
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175 struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
2176 struct fc_frame *fp,
2177 void (*resp)(struct fc_seq *,
2178 struct fc_frame *fp,
2179 void *arg),
2180 void (*destructor)(struct fc_seq *, void *),
2181 void *arg, u32 timer_msec)
2182 {
2183 struct fc_exch *ep;
2184 struct fc_seq *sp = NULL;
2185 struct fc_frame_header *fh;
2186 struct fc_fcp_pkt *fsp = NULL;
2187 int rc = 1;
2188
2189 ep = fc_exch_alloc(lport, fp);
2190 if (!ep) {
2191 fc_frame_free(fp);
2192 return NULL;
2193 }
2194 ep->esb_stat |= ESB_ST_SEQ_INIT;
2195 fh = fc_frame_header_get(fp);
2196 fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
2197 ep->resp = resp;
2198 ep->destructor = destructor;
2199 ep->arg = arg;
2200 ep->r_a_tov = lport->r_a_tov;
2201 ep->lp = lport;
2202 sp = &ep->seq;
2203
2204 ep->fh_type = fh->fh_type;
2205 ep->f_ctl = ntoh24(fh->fh_f_ctl);
2206 fc_exch_setup_hdr(ep, fp, ep->f_ctl);
2207 sp->cnt++;
2208
2209 if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
2210 fsp = fr_fsp(fp);
2211 fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
2212 }
2213
2214 if (unlikely(lport->tt.frame_send(lport, fp)))
2215 goto err;
2216
2217 if (timer_msec)
2218 fc_exch_timer_set_locked(ep, timer_msec);
2219 ep->f_ctl &= ~FC_FC_FIRST_SEQ;
2220
2221 if (ep->f_ctl & FC_FC_SEQ_INIT)
2222 ep->esb_stat &= ~ESB_ST_SEQ_INIT;
2223 spin_unlock_bh(&ep->ex_lock);
2224 return sp;
2225 err:
2226 if (fsp)
2227 fc_fcp_ddp_done(fsp);
2228 rc = fc_exch_done_locked(ep);
2229 spin_unlock_bh(&ep->ex_lock);
2230 if (!rc)
2231 fc_exch_delete(ep);
2232 return NULL;
2233 }
2234 EXPORT_SYMBOL(fc_exch_seq_send);
2235
2236
2237
2238
2239
2240
2241
2242
2243 static void fc_exch_rrq(struct fc_exch *ep)
2244 {
2245 struct fc_lport *lport;
2246 struct fc_els_rrq *rrq;
2247 struct fc_frame *fp;
2248 u32 did;
2249
2250 lport = ep->lp;
2251
2252 fp = fc_frame_alloc(lport, sizeof(*rrq));
2253 if (!fp)
2254 goto retry;
2255
2256 rrq = fc_frame_payload_get(fp, sizeof(*rrq));
2257 memset(rrq, 0, sizeof(*rrq));
2258 rrq->rrq_cmd = ELS_RRQ;
2259 hton24(rrq->rrq_s_id, ep->sid);
2260 rrq->rrq_ox_id = htons(ep->oxid);
2261 rrq->rrq_rx_id = htons(ep->rxid);
2262
2263 did = ep->did;
2264 if (ep->esb_stat & ESB_ST_RESP)
2265 did = ep->sid;
2266
2267 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
2268 lport->port_id, FC_TYPE_ELS,
2269 FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
2270
2271 if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
2272 lport->e_d_tov))
2273 return;
2274
2275 retry:
2276 FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
2277 spin_lock_bh(&ep->ex_lock);
2278 if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
2279 spin_unlock_bh(&ep->ex_lock);
2280
2281 fc_exch_release(ep);
2282 return;
2283 }
2284 ep->esb_stat |= ESB_ST_REC_QUAL;
2285 fc_exch_timer_set_locked(ep, ep->r_a_tov);
2286 spin_unlock_bh(&ep->ex_lock);
2287 }
2288
2289
2290
2291
2292
2293 static void fc_exch_els_rrq(struct fc_frame *fp)
2294 {
2295 struct fc_lport *lport;
2296 struct fc_exch *ep = NULL;
2297 struct fc_els_rrq *rp;
2298 u32 sid;
2299 u16 xid;
2300 enum fc_els_rjt_explan explan;
2301
2302 lport = fr_dev(fp);
2303 rp = fc_frame_payload_get(fp, sizeof(*rp));
2304 explan = ELS_EXPL_INV_LEN;
2305 if (!rp)
2306 goto reject;
2307
2308
2309
2310
2311 sid = ntoh24(rp->rrq_s_id);
2312 xid = fc_host_port_id(lport->host) == sid ?
2313 ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
2314 ep = fc_exch_lookup(lport, xid);
2315 explan = ELS_EXPL_OXID_RXID;
2316 if (!ep)
2317 goto reject;
2318 spin_lock_bh(&ep->ex_lock);
2319 FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
2320 sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
2321 if (ep->oxid != ntohs(rp->rrq_ox_id))
2322 goto unlock_reject;
2323 if (ep->rxid != ntohs(rp->rrq_rx_id) &&
2324 ep->rxid != FC_XID_UNKNOWN)
2325 goto unlock_reject;
2326 explan = ELS_EXPL_SID;
2327 if (ep->sid != sid)
2328 goto unlock_reject;
2329
2330
2331
2332
2333 if (ep->esb_stat & ESB_ST_REC_QUAL) {
2334 ep->esb_stat &= ~ESB_ST_REC_QUAL;
2335 atomic_dec(&ep->ex_refcnt);
2336 }
2337 if (ep->esb_stat & ESB_ST_COMPLETE)
2338 fc_exch_timer_cancel(ep);
2339
2340 spin_unlock_bh(&ep->ex_lock);
2341
2342
2343
2344
2345 fc_seq_ls_acc(fp);
2346 goto out;
2347
2348 unlock_reject:
2349 spin_unlock_bh(&ep->ex_lock);
2350 reject:
2351 fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
2352 out:
2353 if (ep)
2354 fc_exch_release(ep);
2355 }
2356
2357
2358
2359
2360
2361 void fc_exch_update_stats(struct fc_lport *lport)
2362 {
2363 struct fc_host_statistics *st;
2364 struct fc_exch_mgr_anchor *ema;
2365 struct fc_exch_mgr *mp;
2366
2367 st = &lport->host_stats;
2368
2369 list_for_each_entry(ema, &lport->ema_list, ema_list) {
2370 mp = ema->mp;
2371 st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
2372 st->fc_no_free_exch_xid +=
2373 atomic_read(&mp->stats.no_free_exch_xid);
2374 st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
2375 st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
2376 st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
2377 st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
2378 }
2379 }
2380 EXPORT_SYMBOL(fc_exch_update_stats);
2381
2382
2383
2384
2385
2386
2387
2388 struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
2389 struct fc_exch_mgr *mp,
2390 bool (*match)(struct fc_frame *))
2391 {
2392 struct fc_exch_mgr_anchor *ema;
2393
2394 ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
2395 if (!ema)
2396 return ema;
2397
2398 ema->mp = mp;
2399 ema->match = match;
2400
2401 list_add_tail(&ema->ema_list, &lport->ema_list);
2402 kref_get(&mp->kref);
2403 return ema;
2404 }
2405 EXPORT_SYMBOL(fc_exch_mgr_add);
2406
2407
2408
2409
2410
2411 static void fc_exch_mgr_destroy(struct kref *kref)
2412 {
2413 struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
2414
2415 mempool_destroy(mp->ep_pool);
2416 free_percpu(mp->pool);
2417 kfree(mp);
2418 }
2419
2420
2421
2422
2423
2424 void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
2425 {
2426
2427 list_del(&ema->ema_list);
2428 kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
2429 kfree(ema);
2430 }
2431 EXPORT_SYMBOL(fc_exch_mgr_del);
2432
2433
2434
2435
2436
2437
2438 int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
2439 {
2440 struct fc_exch_mgr_anchor *ema, *tmp;
2441
2442 list_for_each_entry(ema, &src->ema_list, ema_list) {
2443 if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
2444 goto err;
2445 }
2446 return 0;
2447 err:
2448 list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
2449 fc_exch_mgr_del(ema);
2450 return -ENOMEM;
2451 }
2452 EXPORT_SYMBOL(fc_exch_mgr_list_clone);
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462 struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
2463 enum fc_class class,
2464 u16 min_xid, u16 max_xid,
2465 bool (*match)(struct fc_frame *))
2466 {
2467 struct fc_exch_mgr *mp;
2468 u16 pool_exch_range;
2469 size_t pool_size;
2470 unsigned int cpu;
2471 struct fc_exch_pool *pool;
2472
2473 if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
2474 (min_xid & fc_cpu_mask) != 0) {
2475 FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
2476 min_xid, max_xid);
2477 return NULL;
2478 }
2479
2480
2481
2482
2483 mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
2484 if (!mp)
2485 return NULL;
2486
2487 mp->class = class;
2488 mp->lport = lport;
2489
2490 mp->min_xid = min_xid;
2491
2492
2493 pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
2494 sizeof(struct fc_exch *);
2495 if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
2496 mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
2497 min_xid - 1;
2498 } else {
2499 mp->max_xid = max_xid;
2500 pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
2501 (fc_cpu_mask + 1);
2502 }
2503
2504 mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
2505 if (!mp->ep_pool)
2506 goto free_mp;
2507
2508
2509
2510
2511
2512
2513 mp->pool_max_index = pool_exch_range - 1;
2514
2515
2516
2517
2518 pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
2519 mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
2520 if (!mp->pool)
2521 goto free_mempool;
2522 for_each_possible_cpu(cpu) {
2523 pool = per_cpu_ptr(mp->pool, cpu);
2524 pool->next_index = 0;
2525 pool->left = FC_XID_UNKNOWN;
2526 pool->right = FC_XID_UNKNOWN;
2527 spin_lock_init(&pool->lock);
2528 INIT_LIST_HEAD(&pool->ex_list);
2529 }
2530
2531 kref_init(&mp->kref);
2532 if (!fc_exch_mgr_add(lport, mp, match)) {
2533 free_percpu(mp->pool);
2534 goto free_mempool;
2535 }
2536
2537
2538
2539
2540
2541
2542 kref_put(&mp->kref, fc_exch_mgr_destroy);
2543 return mp;
2544
2545 free_mempool:
2546 mempool_destroy(mp->ep_pool);
2547 free_mp:
2548 kfree(mp);
2549 return NULL;
2550 }
2551 EXPORT_SYMBOL(fc_exch_mgr_alloc);
2552
2553
2554
2555
2556
2557 void fc_exch_mgr_free(struct fc_lport *lport)
2558 {
2559 struct fc_exch_mgr_anchor *ema, *next;
2560
2561 flush_workqueue(fc_exch_workqueue);
2562 list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
2563 fc_exch_mgr_del(ema);
2564 }
2565 EXPORT_SYMBOL(fc_exch_mgr_free);
2566
2567
2568
2569
2570
2571
2572
2573
2574 static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
2575 struct fc_lport *lport,
2576 struct fc_frame_header *fh)
2577 {
2578 struct fc_exch_mgr_anchor *ema;
2579 u16 xid;
2580
2581 if (f_ctl & FC_FC_EX_CTX)
2582 xid = ntohs(fh->fh_ox_id);
2583 else {
2584 xid = ntohs(fh->fh_rx_id);
2585 if (xid == FC_XID_UNKNOWN)
2586 return list_entry(lport->ema_list.prev,
2587 typeof(*ema), ema_list);
2588 }
2589
2590 list_for_each_entry(ema, &lport->ema_list, ema_list) {
2591 if ((xid >= ema->mp->min_xid) &&
2592 (xid <= ema->mp->max_xid))
2593 return ema;
2594 }
2595 return NULL;
2596 }
2597
2598
2599
2600
2601
2602 void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
2603 {
2604 struct fc_frame_header *fh = fc_frame_header_get(fp);
2605 struct fc_exch_mgr_anchor *ema;
2606 u32 f_ctl;
2607
2608
2609 if (!lport || lport->state == LPORT_ST_DISABLED) {
2610 FC_LIBFC_DBG("Receiving frames for an lport that "
2611 "has not been initialized correctly\n");
2612 fc_frame_free(fp);
2613 return;
2614 }
2615
2616 f_ctl = ntoh24(fh->fh_f_ctl);
2617 ema = fc_find_ema(f_ctl, lport, fh);
2618 if (!ema) {
2619 FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
2620 "fc_ctl <0x%x>, xid <0x%x>\n",
2621 f_ctl,
2622 (f_ctl & FC_FC_EX_CTX) ?
2623 ntohs(fh->fh_ox_id) :
2624 ntohs(fh->fh_rx_id));
2625 fc_frame_free(fp);
2626 return;
2627 }
2628
2629
2630
2631
2632 switch (fr_eof(fp)) {
2633 case FC_EOF_T:
2634 if (f_ctl & FC_FC_END_SEQ)
2635 skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
2636 fallthrough;
2637 case FC_EOF_N:
2638 if (fh->fh_type == FC_TYPE_BLS)
2639 fc_exch_recv_bls(ema->mp, fp);
2640 else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
2641 FC_FC_EX_CTX)
2642 fc_exch_recv_seq_resp(ema->mp, fp);
2643 else if (f_ctl & FC_FC_SEQ_CTX)
2644 fc_exch_recv_resp(ema->mp, fp);
2645 else
2646 fc_exch_recv_req(lport, ema->mp, fp);
2647 break;
2648 default:
2649 FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
2650 fr_eof(fp));
2651 fc_frame_free(fp);
2652 }
2653 }
2654 EXPORT_SYMBOL(fc_exch_recv);
2655
2656
2657
2658
2659
2660 int fc_exch_init(struct fc_lport *lport)
2661 {
2662 if (!lport->tt.exch_mgr_reset)
2663 lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
2664
2665 return 0;
2666 }
2667 EXPORT_SYMBOL(fc_exch_init);
2668
2669
2670
2671
2672 int fc_setup_exch_mgr(void)
2673 {
2674 fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
2675 0, SLAB_HWCACHE_ALIGN, NULL);
2676 if (!fc_em_cachep)
2677 return -ENOMEM;
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693 fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
2694 fc_cpu_mask = (1 << fc_cpu_order) - 1;
2695
2696 fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
2697 if (!fc_exch_workqueue)
2698 goto err;
2699 return 0;
2700 err:
2701 kmem_cache_destroy(fc_em_cachep);
2702 return -ENOMEM;
2703 }
2704
2705
2706
2707
2708 void fc_destroy_exch_mgr(void)
2709 {
2710 destroy_workqueue(fc_exch_workqueue);
2711 kmem_cache_destroy(fc_em_cachep);
2712 }