0001
0002
0003
0004
0005
0006 #ifndef DEF_RDMAVT_INCQP_H
0007 #define DEF_RDMAVT_INCQP_H
0008
0009 #include <rdma/rdma_vt.h>
0010 #include <rdma/ib_pack.h>
0011 #include <rdma/ib_verbs.h>
0012 #include <rdma/rdmavt_cq.h>
0013 #include <rdma/rvt-abi.h>
0014
0015
0016
0017 #define RVT_R_WRID_VALID 0
0018 #define RVT_R_REWIND_SGE 1
0019
0020
0021
0022
0023 #define RVT_R_REUSE_SGE 0x01
0024 #define RVT_R_RDMAR_SEQ 0x02
0025 #define RVT_R_RSP_NAK 0x04
0026 #define RVT_R_RSP_SEND 0x08
0027 #define RVT_R_COMM_EST 0x10
0028
0029
0030
0031
0032
0033
0034 #define RVT_KDETH_QP_PREFIX 0x80
0035 #define RVT_KDETH_QP_SUFFIX 0xffff
0036 #define RVT_KDETH_QP_PREFIX_MASK 0x00ff0000
0037 #define RVT_KDETH_QP_PREFIX_SHIFT 16
0038 #define RVT_KDETH_QP_BASE (u32)(RVT_KDETH_QP_PREFIX << \
0039 RVT_KDETH_QP_PREFIX_SHIFT)
0040 #define RVT_KDETH_QP_MAX (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
0041
0042
0043
0044
0045
0046
0047 #define RVT_AIP_QP_PREFIX 0x81
0048 #define RVT_AIP_QP_SUFFIX 0xffff
0049 #define RVT_AIP_QP_PREFIX_MASK 0x00ff0000
0050 #define RVT_AIP_QP_PREFIX_SHIFT 16
0051 #define RVT_AIP_QP_BASE (u32)(RVT_AIP_QP_PREFIX << \
0052 RVT_AIP_QP_PREFIX_SHIFT)
0053 #define RVT_AIP_QPN_MAX BIT(RVT_AIP_QP_PREFIX_SHIFT)
0054 #define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081 #define RVT_S_SIGNAL_REQ_WR 0x0001
0082 #define RVT_S_BUSY 0x0002
0083 #define RVT_S_TIMER 0x0004
0084 #define RVT_S_RESP_PENDING 0x0008
0085 #define RVT_S_ACK_PENDING 0x0010
0086 #define RVT_S_WAIT_FENCE 0x0020
0087 #define RVT_S_WAIT_RDMAR 0x0040
0088 #define RVT_S_WAIT_RNR 0x0080
0089 #define RVT_S_WAIT_SSN_CREDIT 0x0100
0090 #define RVT_S_WAIT_DMA 0x0200
0091 #define RVT_S_WAIT_PIO 0x0400
0092 #define RVT_S_WAIT_TX 0x0800
0093 #define RVT_S_WAIT_DMA_DESC 0x1000
0094 #define RVT_S_WAIT_KMEM 0x2000
0095 #define RVT_S_WAIT_PSN 0x4000
0096 #define RVT_S_WAIT_ACK 0x8000
0097 #define RVT_S_SEND_ONE 0x10000
0098 #define RVT_S_UNLIMITED_CREDIT 0x20000
0099 #define RVT_S_ECN 0x40000
0100 #define RVT_S_MAX_BIT_MASK 0x800000
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110 #define RVT_S_ANY_WAIT_IO \
0111 (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
0112 RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
0113
0114
0115
0116
0117 #define RVT_S_ANY_WAIT_SEND (RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR | \
0118 RVT_S_WAIT_RNR | RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_DMA | \
0119 RVT_S_WAIT_PSN | RVT_S_WAIT_ACK)
0120
0121 #define RVT_S_ANY_WAIT (RVT_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
0122
0123
0124 #define RVT_OPCODE_QP_MASK 0xE0
0125
0126
0127 #define RVT_POST_SEND_OK 0x01
0128 #define RVT_POST_RECV_OK 0x02
0129 #define RVT_PROCESS_RECV_OK 0x04
0130 #define RVT_PROCESS_SEND_OK 0x08
0131 #define RVT_PROCESS_NEXT_SEND_OK 0x10
0132 #define RVT_FLUSH_SEND 0x20
0133 #define RVT_FLUSH_RECV 0x40
0134 #define RVT_PROCESS_OR_FLUSH_SEND \
0135 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND)
0136 #define RVT_SEND_OR_FLUSH_OR_RECV_OK \
0137 (RVT_PROCESS_SEND_OK | RVT_FLUSH_SEND | RVT_PROCESS_RECV_OK)
0138
0139
0140
0141
0142 #define RVT_SEND_RESERVE_USED IB_SEND_RESERVED_START
0143 #define RVT_SEND_COMPLETION_ONLY (IB_SEND_RESERVED_START << 1)
0144
0145
0146
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156 struct rvt_ud_wr {
0157 struct ib_ud_wr wr;
0158 struct rdma_ah_attr *attr;
0159 };
0160
0161
0162
0163
0164
0165
0166 struct rvt_swqe {
0167 union {
0168 struct ib_send_wr wr;
0169 struct rvt_ud_wr ud_wr;
0170 struct ib_reg_wr reg_wr;
0171 struct ib_rdma_wr rdma_wr;
0172 struct ib_atomic_wr atomic_wr;
0173 };
0174 u32 psn;
0175 u32 lpsn;
0176 u32 ssn;
0177 u32 length;
0178 void *priv;
0179 struct rvt_sge sg_list[];
0180 };
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191
0192
0193
0194
0195 struct rvt_krwq {
0196 spinlock_t p_lock;
0197 u32 head;
0198
0199
0200 spinlock_t c_lock ____cacheline_aligned_in_smp;
0201 u32 tail;
0202 u32 count;
0203 struct rvt_rwqe *curr_wq;
0204 struct rvt_rwqe wq[];
0205 };
0206
0207
0208
0209
0210
0211
0212 static inline struct rvt_ah *rvt_get_swqe_ah(struct rvt_swqe *swqe)
0213 {
0214 return ibah_to_rvtah(swqe->ud_wr.wr.ah);
0215 }
0216
0217
0218
0219
0220
0221
0222 static inline struct rdma_ah_attr *rvt_get_swqe_ah_attr(struct rvt_swqe *swqe)
0223 {
0224 return swqe->ud_wr.attr;
0225 }
0226
0227
0228
0229
0230
0231
0232 static inline u32 rvt_get_swqe_remote_qpn(struct rvt_swqe *swqe)
0233 {
0234 return swqe->ud_wr.wr.remote_qpn;
0235 }
0236
0237
0238
0239
0240
0241
0242 static inline u32 rvt_get_swqe_remote_qkey(struct rvt_swqe *swqe)
0243 {
0244 return swqe->ud_wr.wr.remote_qkey;
0245 }
0246
0247
0248
0249
0250
0251
0252 static inline u16 rvt_get_swqe_pkey_index(struct rvt_swqe *swqe)
0253 {
0254 return swqe->ud_wr.wr.pkey_index;
0255 }
0256
0257 struct rvt_rq {
0258 struct rvt_rwq *wq;
0259 struct rvt_krwq *kwq;
0260 u32 size;
0261 u8 max_sge;
0262
0263 spinlock_t lock ____cacheline_aligned_in_smp;
0264 };
0265
0266
0267
0268
0269
0270
0271
0272
0273
0274
0275
0276 static inline u32 rvt_get_rq_count(struct rvt_rq *rq, u32 head, u32 tail)
0277 {
0278 u32 count = head - tail;
0279
0280 if ((s32)count < 0)
0281 count += rq->size;
0282 return count;
0283 }
0284
0285
0286
0287
0288
0289 struct rvt_ack_entry {
0290 struct rvt_sge rdma_sge;
0291 u64 atomic_data;
0292 u32 psn;
0293 u32 lpsn;
0294 u8 opcode;
0295 u8 sent;
0296 void *priv;
0297 };
0298
0299 #define RC_QP_SCALING_INTERVAL 5
0300
0301 #define RVT_OPERATION_PRIV 0x00000001
0302 #define RVT_OPERATION_ATOMIC 0x00000002
0303 #define RVT_OPERATION_ATOMIC_SGE 0x00000004
0304 #define RVT_OPERATION_LOCAL 0x00000008
0305 #define RVT_OPERATION_USE_RESERVE 0x00000010
0306 #define RVT_OPERATION_IGN_RNR_CNT 0x00000020
0307
0308 #define RVT_OPERATION_MAX (IB_WR_RESERVED10 + 1)
0309
0310
0311
0312
0313
0314
0315
0316
0317
0318
0319
0320
0321
0322 struct rvt_operation_params {
0323 size_t length;
0324 u32 qpt_support;
0325 u32 flags;
0326 };
0327
0328
0329
0330
0331
0332 struct rvt_qp {
0333 struct ib_qp ibqp;
0334 void *priv;
0335
0336 struct rdma_ah_attr remote_ah_attr;
0337 struct rdma_ah_attr alt_ah_attr;
0338 struct rvt_qp __rcu *next;
0339 struct rvt_swqe *s_wq;
0340 struct rvt_mmap_info *ip;
0341
0342 unsigned long timeout_jiffies;
0343
0344 int srate_mbps;
0345 pid_t pid;
0346 u32 remote_qpn;
0347 u32 qkey;
0348 u32 s_size;
0349
0350 u16 pmtu;
0351 u8 log_pmtu;
0352 u8 state;
0353 u8 allowed_ops;
0354 u8 qp_access_flags;
0355 u8 alt_timeout;
0356 u8 timeout;
0357 u8 s_srate;
0358 u8 s_mig_state;
0359 u8 port_num;
0360 u8 s_pkey_index;
0361 u8 s_alt_pkey_index;
0362 u8 r_max_rd_atomic;
0363 u8 s_max_rd_atomic;
0364 u8 s_retry_cnt;
0365 u8 s_rnr_retry_cnt;
0366 u8 r_min_rnr_timer;
0367 u8 s_max_sge;
0368 u8 s_draining;
0369
0370
0371 atomic_t refcount ____cacheline_aligned_in_smp;
0372 wait_queue_head_t wait;
0373
0374 struct rvt_ack_entry *s_ack_queue;
0375 struct rvt_sge_state s_rdma_read_sge;
0376
0377 spinlock_t r_lock ____cacheline_aligned_in_smp;
0378 u32 r_psn;
0379 unsigned long r_aflags;
0380 u64 r_wr_id;
0381 u32 r_ack_psn;
0382 u32 r_len;
0383 u32 r_rcv_len;
0384 u32 r_msn;
0385
0386 u8 r_state;
0387 u8 r_flags;
0388 u8 r_head_ack_queue;
0389 u8 r_adefered;
0390
0391 struct list_head rspwait;
0392
0393 struct rvt_sge_state r_sge;
0394 struct rvt_rq r_rq;
0395
0396
0397 spinlock_t s_hlock ____cacheline_aligned_in_smp;
0398 u32 s_head;
0399 u32 s_next_psn;
0400 u32 s_avail;
0401 u32 s_ssn;
0402 atomic_t s_reserved_used;
0403
0404 spinlock_t s_lock ____cacheline_aligned_in_smp;
0405 u32 s_flags;
0406 struct rvt_sge_state *s_cur_sge;
0407 struct rvt_swqe *s_wqe;
0408 struct rvt_sge_state s_sge;
0409 struct rvt_mregion *s_rdma_mr;
0410 u32 s_len;
0411 u32 s_rdma_read_len;
0412 u32 s_last_psn;
0413 u32 s_sending_psn;
0414 u32 s_sending_hpsn;
0415 u32 s_psn;
0416 u32 s_ack_rdma_psn;
0417 u32 s_ack_psn;
0418 u32 s_tail;
0419 u32 s_cur;
0420 u32 s_acked;
0421 u32 s_last;
0422 u32 s_lsn;
0423 u32 s_ahgpsn;
0424 u16 s_cur_size;
0425 u16 s_rdma_ack_cnt;
0426 u8 s_hdrwords;
0427 s8 s_ahgidx;
0428 u8 s_state;
0429 u8 s_ack_state;
0430 u8 s_nak_state;
0431 u8 r_nak_state;
0432 u8 s_retry;
0433 u8 s_rnr_retry;
0434 u8 s_num_rd_atomic;
0435 u8 s_tail_ack_queue;
0436 u8 s_acked_ack_queue;
0437
0438 struct rvt_sge_state s_ack_rdma_sge;
0439 struct timer_list s_timer;
0440 struct hrtimer s_rnr_timer;
0441
0442 atomic_t local_ops_pending;
0443
0444
0445
0446
0447 struct rvt_sge *r_sg_list
0448 ____cacheline_aligned_in_smp;
0449 };
0450
0451 struct rvt_srq {
0452 struct ib_srq ibsrq;
0453 struct rvt_rq rq;
0454 struct rvt_mmap_info *ip;
0455
0456 u32 limit;
0457 };
0458
0459 static inline struct rvt_srq *ibsrq_to_rvtsrq(struct ib_srq *ibsrq)
0460 {
0461 return container_of(ibsrq, struct rvt_srq, ibsrq);
0462 }
0463
0464 static inline struct rvt_qp *ibqp_to_rvtqp(struct ib_qp *ibqp)
0465 {
0466 return container_of(ibqp, struct rvt_qp, ibqp);
0467 }
0468
0469 #define RVT_QPN_MAX BIT(24)
0470 #define RVT_QPNMAP_ENTRIES (RVT_QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
0471 #define RVT_BITS_PER_PAGE (PAGE_SIZE * BITS_PER_BYTE)
0472 #define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE - 1)
0473 #define RVT_QPN_MASK IB_QPN_MASK
0474
0475
0476
0477
0478
0479
0480 struct rvt_qpn_map {
0481 void *page;
0482 };
0483
0484 struct rvt_qpn_table {
0485 spinlock_t lock;
0486 unsigned flags;
0487 u32 last;
0488 u32 nmaps;
0489 u16 limit;
0490 u8 incr;
0491
0492 struct rvt_qpn_map map[RVT_QPNMAP_ENTRIES];
0493 };
0494
0495 struct rvt_qp_ibdev {
0496 u32 qp_table_size;
0497 u32 qp_table_bits;
0498 struct rvt_qp __rcu **qp_table;
0499 spinlock_t qpt_lock;
0500 struct rvt_qpn_table qpn_table;
0501 };
0502
0503
0504
0505
0506
0507
0508 struct rvt_mcast_qp {
0509 struct list_head list;
0510 struct rvt_qp *qp;
0511 };
0512
0513 struct rvt_mcast_addr {
0514 union ib_gid mgid;
0515 u16 lid;
0516 };
0517
0518 struct rvt_mcast {
0519 struct rb_node rb_node;
0520 struct rvt_mcast_addr mcast_addr;
0521 struct list_head qp_list;
0522 wait_queue_head_t wait;
0523 atomic_t refcount;
0524 int n_attached;
0525 };
0526
0527
0528
0529
0530
0531 static inline struct rvt_swqe *rvt_get_swqe_ptr(struct rvt_qp *qp,
0532 unsigned n)
0533 {
0534 return (struct rvt_swqe *)((char *)qp->s_wq +
0535 (sizeof(struct rvt_swqe) +
0536 qp->s_max_sge *
0537 sizeof(struct rvt_sge)) * n);
0538 }
0539
0540
0541
0542
0543
0544 static inline struct rvt_rwqe *rvt_get_rwqe_ptr(struct rvt_rq *rq, unsigned n)
0545 {
0546 return (struct rvt_rwqe *)
0547 ((char *)rq->kwq->curr_wq +
0548 (sizeof(struct rvt_rwqe) +
0549 rq->max_sge * sizeof(struct ib_sge)) * n);
0550 }
0551
0552
0553
0554
0555
0556 static inline bool rvt_is_user_qp(struct rvt_qp *qp)
0557 {
0558 return !!qp->pid;
0559 }
0560
0561
0562
0563
0564
0565 static inline void rvt_get_qp(struct rvt_qp *qp)
0566 {
0567 atomic_inc(&qp->refcount);
0568 }
0569
0570
0571
0572
0573
0574 static inline void rvt_put_qp(struct rvt_qp *qp)
0575 {
0576 if (qp && atomic_dec_and_test(&qp->refcount))
0577 wake_up(&qp->wait);
0578 }
0579
0580
0581
0582
0583
0584
0585
0586 static inline void rvt_put_swqe(struct rvt_swqe *wqe)
0587 {
0588 int i;
0589
0590 for (i = 0; i < wqe->wr.num_sge; i++) {
0591 struct rvt_sge *sge = &wqe->sg_list[i];
0592
0593 rvt_put_mr(sge->mr);
0594 }
0595 }
0596
0597
0598
0599
0600
0601
0602
0603
0604
0605 static inline void rvt_qp_wqe_reserve(
0606 struct rvt_qp *qp,
0607 struct rvt_swqe *wqe)
0608 {
0609 atomic_inc(&qp->s_reserved_used);
0610 }
0611
0612
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624
0625
0626
0627 static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
0628 {
0629 if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
0630 atomic_dec(&qp->s_reserved_used);
0631
0632 smp_mb__after_atomic();
0633 }
0634 }
0635
0636 extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
0637
0638
0639
0640
0641
0642 static inline int rvt_cmp_msn(u32 a, u32 b)
0643 {
0644 return (((int)a) - ((int)b)) << 8;
0645 }
0646
0647 __be32 rvt_compute_aeth(struct rvt_qp *qp);
0648
0649 void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
0650
0651 u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
0652
0653
0654
0655
0656
0657
0658
0659
0660 static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
0661 {
0662 return (len + qp->pmtu - 1) >> qp->log_pmtu;
0663 }
0664
0665
0666
0667
0668
0669
0670
0671 static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
0672 {
0673 return len >> qp->log_pmtu;
0674 }
0675
0676
0677
0678
0679
0680
0681
0682 static inline unsigned long rvt_timeout_to_jiffies(u8 timeout)
0683 {
0684 if (timeout > 31)
0685 timeout = 31;
0686
0687 return usecs_to_jiffies(1U << timeout) * 4096UL / 1000UL;
0688 }
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698 static inline struct rvt_qp *rvt_lookup_qpn(struct rvt_dev_info *rdi,
0699 struct rvt_ibport *rvp,
0700 u32 qpn) __must_hold(RCU)
0701 {
0702 struct rvt_qp *qp = NULL;
0703
0704 if (unlikely(qpn <= 1)) {
0705 qp = rcu_dereference(rvp->qp[qpn]);
0706 } else {
0707 u32 n = hash_32(qpn, rdi->qp_dev->qp_table_bits);
0708
0709 for (qp = rcu_dereference(rdi->qp_dev->qp_table[n]); qp;
0710 qp = rcu_dereference(qp->next))
0711 if (qp->ibqp.qp_num == qpn)
0712 break;
0713 }
0714 return qp;
0715 }
0716
0717
0718
0719
0720
0721
0722
0723 static inline void rvt_mod_retry_timer_ext(struct rvt_qp *qp, u8 shift)
0724 {
0725 struct ib_qp *ibqp = &qp->ibqp;
0726 struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
0727
0728 lockdep_assert_held(&qp->s_lock);
0729 qp->s_flags |= RVT_S_TIMER;
0730
0731 mod_timer(&qp->s_timer, jiffies + rdi->busy_jiffies +
0732 (qp->timeout_jiffies << shift));
0733 }
0734
0735 static inline void rvt_mod_retry_timer(struct rvt_qp *qp)
0736 {
0737 return rvt_mod_retry_timer_ext(qp, 0);
0738 }
0739
0740
0741
0742
0743
0744
0745
0746
0747 static inline void rvt_put_qp_swqe(struct rvt_qp *qp, struct rvt_swqe *wqe)
0748 {
0749 rvt_put_swqe(wqe);
0750 if (qp->allowed_ops == IB_OPCODE_UD)
0751 rdma_destroy_ah_attr(wqe->ud_wr.attr);
0752 }
0753
0754
0755
0756
0757
0758
0759
0760
0761 static inline u32
0762 rvt_qp_swqe_incr(struct rvt_qp *qp, u32 val)
0763 {
0764 if (++val >= qp->s_size)
0765 val = 0;
0766 return val;
0767 }
0768
0769 int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err);
0770
0771
0772
0773
0774
0775
0776
0777
0778
0779
0780
0781
0782 static inline void rvt_recv_cq(struct rvt_qp *qp, struct ib_wc *wc,
0783 bool solicited)
0784 {
0785 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.recv_cq);
0786
0787 if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
0788 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
0789 }
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799
0800
0801
0802 static inline void rvt_send_cq(struct rvt_qp *qp, struct ib_wc *wc,
0803 bool solicited)
0804 {
0805 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq);
0806
0807 if (unlikely(!rvt_cq_enter(cq, wc, solicited)))
0808 rvt_error_qp(qp, IB_WC_LOC_QP_OP_ERR);
0809 }
0810
0811
0812
0813
0814
0815
0816
0817
0818
0819
0820
0821
0822
0823
0824
0825
0826
0827 static inline u32
0828 rvt_qp_complete_swqe(struct rvt_qp *qp,
0829 struct rvt_swqe *wqe,
0830 enum ib_wc_opcode opcode,
0831 enum ib_wc_status status)
0832 {
0833 bool need_completion;
0834 u64 wr_id;
0835 u32 byte_len, last;
0836 int flags = wqe->wr.send_flags;
0837
0838 rvt_qp_wqe_unreserve(qp, flags);
0839 rvt_put_qp_swqe(qp, wqe);
0840
0841 need_completion =
0842 !(flags & RVT_SEND_RESERVE_USED) &&
0843 (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
0844 (flags & IB_SEND_SIGNALED) ||
0845 status != IB_WC_SUCCESS);
0846 if (need_completion) {
0847 wr_id = wqe->wr.wr_id;
0848 byte_len = wqe->length;
0849
0850 }
0851 last = rvt_qp_swqe_incr(qp, qp->s_last);
0852
0853 smp_store_release(&qp->s_last, last);
0854 if (need_completion) {
0855 struct ib_wc w = {
0856 .wr_id = wr_id,
0857 .status = status,
0858 .opcode = opcode,
0859 .qp = &qp->ibqp,
0860 .byte_len = byte_len,
0861 };
0862 rvt_send_cq(qp, &w, status != IB_WC_SUCCESS);
0863 }
0864 return last;
0865 }
0866
0867 extern const int ib_rvt_state_ops[];
0868
0869 struct rvt_dev_info;
0870 int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only);
0871 void rvt_comm_est(struct rvt_qp *qp);
0872 void rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err);
0873 unsigned long rvt_rnr_tbl_to_usec(u32 index);
0874 enum hrtimer_restart rvt_rc_rnr_retry(struct hrtimer *t);
0875 void rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth);
0876 void rvt_del_timers_sync(struct rvt_qp *qp);
0877 void rvt_stop_rc_timers(struct rvt_qp *qp);
0878 void rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift);
0879 static inline void rvt_add_retry_timer(struct rvt_qp *qp)
0880 {
0881 rvt_add_retry_timer_ext(qp, 0);
0882 }
0883
0884 void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
0885 void *data, u32 length,
0886 bool release, bool copy_last);
0887 void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
0888 enum ib_wc_status status);
0889 void rvt_ruc_loopback(struct rvt_qp *qp);
0890
0891
0892
0893
0894
0895
0896
0897
0898
0899 struct rvt_qp_iter {
0900 struct rvt_qp *qp;
0901
0902 struct rvt_dev_info *rdi;
0903
0904 void (*cb)(struct rvt_qp *qp, u64 v);
0905
0906 u64 v;
0907
0908 int specials;
0909
0910 int n;
0911 };
0912
0913
0914
0915
0916
0917
0918
0919
0920 static inline u32 ib_cq_tail(struct ib_cq *send_cq)
0921 {
0922 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
0923
0924 return ibcq_to_rvtcq(send_cq)->ip ?
0925 RDMA_READ_UAPI_ATOMIC(cq->queue->tail) :
0926 ibcq_to_rvtcq(send_cq)->kqueue->tail;
0927 }
0928
0929
0930
0931
0932
0933
0934
0935
0936 static inline u32 ib_cq_head(struct ib_cq *send_cq)
0937 {
0938 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq);
0939
0940 return ibcq_to_rvtcq(send_cq)->ip ?
0941 RDMA_READ_UAPI_ATOMIC(cq->queue->head) :
0942 ibcq_to_rvtcq(send_cq)->kqueue->head;
0943 }
0944
0945
0946
0947
0948
0949
0950
0951
0952 static inline void rvt_free_rq(struct rvt_rq *rq)
0953 {
0954 kvfree(rq->kwq);
0955 rq->kwq = NULL;
0956 vfree(rq->wq);
0957 rq->wq = NULL;
0958 }
0959
0960
0961
0962
0963
0964
0965
0966 static inline struct rvt_ibport *rvt_to_iport(struct rvt_qp *qp)
0967 {
0968 struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
0969
0970 return rdi->ports[qp->port_num - 1];
0971 }
0972
0973
0974
0975
0976
0977
0978
0979
0980
0981 static inline bool rvt_rc_credit_avail(struct rvt_qp *qp, struct rvt_swqe *wqe)
0982 {
0983 lockdep_assert_held(&qp->s_lock);
0984 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
0985 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
0986 struct rvt_ibport *rvp = rvt_to_iport(qp);
0987
0988 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
0989 rvp->n_rc_crwaits++;
0990 return false;
0991 }
0992 return true;
0993 }
0994
0995 struct rvt_qp_iter *rvt_qp_iter_init(struct rvt_dev_info *rdi,
0996 u64 v,
0997 void (*cb)(struct rvt_qp *qp, u64 v));
0998 int rvt_qp_iter_next(struct rvt_qp_iter *iter);
0999 void rvt_qp_iter(struct rvt_dev_info *rdi,
1000 u64 v,
1001 void (*cb)(struct rvt_qp *qp, u64 v));
1002 void rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey);
1003 #endif