0001
0002
0003
0004
0005
0006
0007 #ifndef RXE_HDR_H
0008 #define RXE_HDR_H
0009
0010
0011
0012
0013
0014 struct rxe_pkt_info {
0015 struct rxe_dev *rxe;
0016 struct rxe_qp *qp;
0017 struct rxe_send_wqe *wqe;
0018 u8 *hdr;
0019 u32 mask;
0020 u32 psn;
0021 u16 pkey_index;
0022 u16 paylen;
0023 u8 port_num;
0024 u8 opcode;
0025 };
0026
0027
0028 static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
0029 {
0030 BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
0031 return (void *)skb->cb;
0032 }
0033
0034 static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
0035 {
0036 return container_of((void *)pkt, struct sk_buff, cb);
0037 }
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 #define RXE_ICRC_SIZE (4)
0056 #define RXE_MAX_HDR_LENGTH (80)
0057
0058
0059
0060
0061 struct rxe_bth {
0062 u8 opcode;
0063 u8 flags;
0064 __be16 pkey;
0065 __be32 qpn;
0066 __be32 apsn;
0067 };
0068
0069 #define BTH_TVER (0)
0070 #define BTH_DEF_PKEY (0xffff)
0071
0072 #define BTH_SE_MASK (0x80)
0073 #define BTH_MIG_MASK (0x40)
0074 #define BTH_PAD_MASK (0x30)
0075 #define BTH_TVER_MASK (0x0f)
0076 #define BTH_FECN_MASK (0x80000000)
0077 #define BTH_BECN_MASK (0x40000000)
0078 #define BTH_RESV6A_MASK (0x3f000000)
0079 #define BTH_QPN_MASK (0x00ffffff)
0080 #define BTH_ACK_MASK (0x80000000)
0081 #define BTH_RESV7_MASK (0x7f000000)
0082 #define BTH_PSN_MASK (0x00ffffff)
0083
0084 static inline u8 __bth_opcode(void *arg)
0085 {
0086 struct rxe_bth *bth = arg;
0087
0088 return bth->opcode;
0089 }
0090
0091 static inline void __bth_set_opcode(void *arg, u8 opcode)
0092 {
0093 struct rxe_bth *bth = arg;
0094
0095 bth->opcode = opcode;
0096 }
0097
0098 static inline u8 __bth_se(void *arg)
0099 {
0100 struct rxe_bth *bth = arg;
0101
0102 return 0 != (BTH_SE_MASK & bth->flags);
0103 }
0104
0105 static inline void __bth_set_se(void *arg, int se)
0106 {
0107 struct rxe_bth *bth = arg;
0108
0109 if (se)
0110 bth->flags |= BTH_SE_MASK;
0111 else
0112 bth->flags &= ~BTH_SE_MASK;
0113 }
0114
0115 static inline u8 __bth_mig(void *arg)
0116 {
0117 struct rxe_bth *bth = arg;
0118
0119 return 0 != (BTH_MIG_MASK & bth->flags);
0120 }
0121
0122 static inline void __bth_set_mig(void *arg, u8 mig)
0123 {
0124 struct rxe_bth *bth = arg;
0125
0126 if (mig)
0127 bth->flags |= BTH_MIG_MASK;
0128 else
0129 bth->flags &= ~BTH_MIG_MASK;
0130 }
0131
0132 static inline u8 __bth_pad(void *arg)
0133 {
0134 struct rxe_bth *bth = arg;
0135
0136 return (BTH_PAD_MASK & bth->flags) >> 4;
0137 }
0138
0139 static inline void __bth_set_pad(void *arg, u8 pad)
0140 {
0141 struct rxe_bth *bth = arg;
0142
0143 bth->flags = (BTH_PAD_MASK & (pad << 4)) |
0144 (~BTH_PAD_MASK & bth->flags);
0145 }
0146
0147 static inline u8 __bth_tver(void *arg)
0148 {
0149 struct rxe_bth *bth = arg;
0150
0151 return BTH_TVER_MASK & bth->flags;
0152 }
0153
0154 static inline void __bth_set_tver(void *arg, u8 tver)
0155 {
0156 struct rxe_bth *bth = arg;
0157
0158 bth->flags = (BTH_TVER_MASK & tver) |
0159 (~BTH_TVER_MASK & bth->flags);
0160 }
0161
0162 static inline u16 __bth_pkey(void *arg)
0163 {
0164 struct rxe_bth *bth = arg;
0165
0166 return be16_to_cpu(bth->pkey);
0167 }
0168
0169 static inline void __bth_set_pkey(void *arg, u16 pkey)
0170 {
0171 struct rxe_bth *bth = arg;
0172
0173 bth->pkey = cpu_to_be16(pkey);
0174 }
0175
0176 static inline u32 __bth_qpn(void *arg)
0177 {
0178 struct rxe_bth *bth = arg;
0179
0180 return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
0181 }
0182
0183 static inline void __bth_set_qpn(void *arg, u32 qpn)
0184 {
0185 struct rxe_bth *bth = arg;
0186 u32 resvqpn = be32_to_cpu(bth->qpn);
0187
0188 bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
0189 (~BTH_QPN_MASK & resvqpn));
0190 }
0191
0192 static inline int __bth_fecn(void *arg)
0193 {
0194 struct rxe_bth *bth = arg;
0195
0196 return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
0197 }
0198
0199 static inline void __bth_set_fecn(void *arg, int fecn)
0200 {
0201 struct rxe_bth *bth = arg;
0202
0203 if (fecn)
0204 bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
0205 else
0206 bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
0207 }
0208
0209 static inline int __bth_becn(void *arg)
0210 {
0211 struct rxe_bth *bth = arg;
0212
0213 return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
0214 }
0215
0216 static inline void __bth_set_becn(void *arg, int becn)
0217 {
0218 struct rxe_bth *bth = arg;
0219
0220 if (becn)
0221 bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
0222 else
0223 bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
0224 }
0225
0226 static inline u8 __bth_resv6a(void *arg)
0227 {
0228 struct rxe_bth *bth = arg;
0229
0230 return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
0231 }
0232
0233 static inline void __bth_set_resv6a(void *arg)
0234 {
0235 struct rxe_bth *bth = arg;
0236
0237 bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
0238 }
0239
0240 static inline int __bth_ack(void *arg)
0241 {
0242 struct rxe_bth *bth = arg;
0243
0244 return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
0245 }
0246
0247 static inline void __bth_set_ack(void *arg, int ack)
0248 {
0249 struct rxe_bth *bth = arg;
0250
0251 if (ack)
0252 bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
0253 else
0254 bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
0255 }
0256
0257 static inline void __bth_set_resv7(void *arg)
0258 {
0259 struct rxe_bth *bth = arg;
0260
0261 bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
0262 }
0263
0264 static inline u32 __bth_psn(void *arg)
0265 {
0266 struct rxe_bth *bth = arg;
0267
0268 return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
0269 }
0270
0271 static inline void __bth_set_psn(void *arg, u32 psn)
0272 {
0273 struct rxe_bth *bth = arg;
0274 u32 apsn = be32_to_cpu(bth->apsn);
0275
0276 bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
0277 (~BTH_PSN_MASK & apsn));
0278 }
0279
0280 static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
0281 {
0282 return __bth_opcode(pkt->hdr);
0283 }
0284
0285 static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
0286 {
0287 __bth_set_opcode(pkt->hdr, opcode);
0288 }
0289
0290 static inline u8 bth_se(struct rxe_pkt_info *pkt)
0291 {
0292 return __bth_se(pkt->hdr);
0293 }
0294
0295 static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
0296 {
0297 __bth_set_se(pkt->hdr, se);
0298 }
0299
0300 static inline u8 bth_mig(struct rxe_pkt_info *pkt)
0301 {
0302 return __bth_mig(pkt->hdr);
0303 }
0304
0305 static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
0306 {
0307 __bth_set_mig(pkt->hdr, mig);
0308 }
0309
0310 static inline u8 bth_pad(struct rxe_pkt_info *pkt)
0311 {
0312 return __bth_pad(pkt->hdr);
0313 }
0314
0315 static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
0316 {
0317 __bth_set_pad(pkt->hdr, pad);
0318 }
0319
0320 static inline u8 bth_tver(struct rxe_pkt_info *pkt)
0321 {
0322 return __bth_tver(pkt->hdr);
0323 }
0324
0325 static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
0326 {
0327 __bth_set_tver(pkt->hdr, tver);
0328 }
0329
0330 static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
0331 {
0332 return __bth_pkey(pkt->hdr);
0333 }
0334
0335 static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
0336 {
0337 __bth_set_pkey(pkt->hdr, pkey);
0338 }
0339
0340 static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
0341 {
0342 return __bth_qpn(pkt->hdr);
0343 }
0344
0345 static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
0346 {
0347 __bth_set_qpn(pkt->hdr, qpn);
0348 }
0349
0350 static inline int bth_fecn(struct rxe_pkt_info *pkt)
0351 {
0352 return __bth_fecn(pkt->hdr);
0353 }
0354
0355 static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
0356 {
0357 __bth_set_fecn(pkt->hdr, fecn);
0358 }
0359
0360 static inline int bth_becn(struct rxe_pkt_info *pkt)
0361 {
0362 return __bth_becn(pkt->hdr);
0363 }
0364
0365 static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
0366 {
0367 __bth_set_becn(pkt->hdr, becn);
0368 }
0369
0370 static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
0371 {
0372 return __bth_resv6a(pkt->hdr);
0373 }
0374
0375 static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
0376 {
0377 __bth_set_resv6a(pkt->hdr);
0378 }
0379
0380 static inline int bth_ack(struct rxe_pkt_info *pkt)
0381 {
0382 return __bth_ack(pkt->hdr);
0383 }
0384
0385 static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
0386 {
0387 __bth_set_ack(pkt->hdr, ack);
0388 }
0389
0390 static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
0391 {
0392 __bth_set_resv7(pkt->hdr);
0393 }
0394
0395 static inline u32 bth_psn(struct rxe_pkt_info *pkt)
0396 {
0397 return __bth_psn(pkt->hdr);
0398 }
0399
0400 static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
0401 {
0402 __bth_set_psn(pkt->hdr, psn);
0403 }
0404
0405 static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
0406 int mig, int pad, u16 pkey, u32 qpn, int ack_req,
0407 u32 psn)
0408 {
0409 struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr);
0410
0411 bth->opcode = opcode;
0412 bth->flags = (pad << 4) & BTH_PAD_MASK;
0413 if (se)
0414 bth->flags |= BTH_SE_MASK;
0415 if (mig)
0416 bth->flags |= BTH_MIG_MASK;
0417 bth->pkey = cpu_to_be16(pkey);
0418 bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
0419 psn &= BTH_PSN_MASK;
0420 if (ack_req)
0421 psn |= BTH_ACK_MASK;
0422 bth->apsn = cpu_to_be32(psn);
0423 }
0424
0425
0426
0427
0428 struct rxe_rdeth {
0429 __be32 een;
0430 };
0431
0432 #define RDETH_EEN_MASK (0x00ffffff)
0433
0434 static inline u8 __rdeth_een(void *arg)
0435 {
0436 struct rxe_rdeth *rdeth = arg;
0437
0438 return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
0439 }
0440
0441 static inline void __rdeth_set_een(void *arg, u32 een)
0442 {
0443 struct rxe_rdeth *rdeth = arg;
0444
0445 rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
0446 }
0447
0448 static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
0449 {
0450 return __rdeth_een(pkt->hdr +
0451 rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
0452 }
0453
0454 static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
0455 {
0456 __rdeth_set_een(pkt->hdr +
0457 rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
0458 }
0459
0460
0461
0462
0463 struct rxe_deth {
0464 __be32 qkey;
0465 __be32 sqp;
0466 };
0467
0468 #define GSI_QKEY (0x80010000)
0469 #define DETH_SQP_MASK (0x00ffffff)
0470
0471 static inline u32 __deth_qkey(void *arg)
0472 {
0473 struct rxe_deth *deth = arg;
0474
0475 return be32_to_cpu(deth->qkey);
0476 }
0477
0478 static inline void __deth_set_qkey(void *arg, u32 qkey)
0479 {
0480 struct rxe_deth *deth = arg;
0481
0482 deth->qkey = cpu_to_be32(qkey);
0483 }
0484
0485 static inline u32 __deth_sqp(void *arg)
0486 {
0487 struct rxe_deth *deth = arg;
0488
0489 return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
0490 }
0491
0492 static inline void __deth_set_sqp(void *arg, u32 sqp)
0493 {
0494 struct rxe_deth *deth = arg;
0495
0496 deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
0497 }
0498
0499 static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
0500 {
0501 return __deth_qkey(pkt->hdr +
0502 rxe_opcode[pkt->opcode].offset[RXE_DETH]);
0503 }
0504
0505 static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
0506 {
0507 __deth_set_qkey(pkt->hdr +
0508 rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
0509 }
0510
0511 static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
0512 {
0513 return __deth_sqp(pkt->hdr +
0514 rxe_opcode[pkt->opcode].offset[RXE_DETH]);
0515 }
0516
0517 static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
0518 {
0519 __deth_set_sqp(pkt->hdr +
0520 rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
0521 }
0522
0523
0524
0525
0526 struct rxe_reth {
0527 __be64 va;
0528 __be32 rkey;
0529 __be32 len;
0530 };
0531
0532 static inline u64 __reth_va(void *arg)
0533 {
0534 struct rxe_reth *reth = arg;
0535
0536 return be64_to_cpu(reth->va);
0537 }
0538
0539 static inline void __reth_set_va(void *arg, u64 va)
0540 {
0541 struct rxe_reth *reth = arg;
0542
0543 reth->va = cpu_to_be64(va);
0544 }
0545
0546 static inline u32 __reth_rkey(void *arg)
0547 {
0548 struct rxe_reth *reth = arg;
0549
0550 return be32_to_cpu(reth->rkey);
0551 }
0552
0553 static inline void __reth_set_rkey(void *arg, u32 rkey)
0554 {
0555 struct rxe_reth *reth = arg;
0556
0557 reth->rkey = cpu_to_be32(rkey);
0558 }
0559
0560 static inline u32 __reth_len(void *arg)
0561 {
0562 struct rxe_reth *reth = arg;
0563
0564 return be32_to_cpu(reth->len);
0565 }
0566
0567 static inline void __reth_set_len(void *arg, u32 len)
0568 {
0569 struct rxe_reth *reth = arg;
0570
0571 reth->len = cpu_to_be32(len);
0572 }
0573
0574 static inline u64 reth_va(struct rxe_pkt_info *pkt)
0575 {
0576 return __reth_va(pkt->hdr +
0577 rxe_opcode[pkt->opcode].offset[RXE_RETH]);
0578 }
0579
0580 static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
0581 {
0582 __reth_set_va(pkt->hdr +
0583 rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
0584 }
0585
0586 static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
0587 {
0588 return __reth_rkey(pkt->hdr +
0589 rxe_opcode[pkt->opcode].offset[RXE_RETH]);
0590 }
0591
0592 static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
0593 {
0594 __reth_set_rkey(pkt->hdr +
0595 rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
0596 }
0597
0598 static inline u32 reth_len(struct rxe_pkt_info *pkt)
0599 {
0600 return __reth_len(pkt->hdr +
0601 rxe_opcode[pkt->opcode].offset[RXE_RETH]);
0602 }
0603
0604 static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
0605 {
0606 __reth_set_len(pkt->hdr +
0607 rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
0608 }
0609
0610
0611
0612
0613 struct rxe_atmeth {
0614 __be64 va;
0615 __be32 rkey;
0616 __be64 swap_add;
0617 __be64 comp;
0618 } __packed;
0619
0620 static inline u64 __atmeth_va(void *arg)
0621 {
0622 struct rxe_atmeth *atmeth = arg;
0623
0624 return be64_to_cpu(atmeth->va);
0625 }
0626
0627 static inline void __atmeth_set_va(void *arg, u64 va)
0628 {
0629 struct rxe_atmeth *atmeth = arg;
0630
0631 atmeth->va = cpu_to_be64(va);
0632 }
0633
0634 static inline u32 __atmeth_rkey(void *arg)
0635 {
0636 struct rxe_atmeth *atmeth = arg;
0637
0638 return be32_to_cpu(atmeth->rkey);
0639 }
0640
0641 static inline void __atmeth_set_rkey(void *arg, u32 rkey)
0642 {
0643 struct rxe_atmeth *atmeth = arg;
0644
0645 atmeth->rkey = cpu_to_be32(rkey);
0646 }
0647
0648 static inline u64 __atmeth_swap_add(void *arg)
0649 {
0650 struct rxe_atmeth *atmeth = arg;
0651
0652 return be64_to_cpu(atmeth->swap_add);
0653 }
0654
0655 static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
0656 {
0657 struct rxe_atmeth *atmeth = arg;
0658
0659 atmeth->swap_add = cpu_to_be64(swap_add);
0660 }
0661
0662 static inline u64 __atmeth_comp(void *arg)
0663 {
0664 struct rxe_atmeth *atmeth = arg;
0665
0666 return be64_to_cpu(atmeth->comp);
0667 }
0668
0669 static inline void __atmeth_set_comp(void *arg, u64 comp)
0670 {
0671 struct rxe_atmeth *atmeth = arg;
0672
0673 atmeth->comp = cpu_to_be64(comp);
0674 }
0675
0676 static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
0677 {
0678 return __atmeth_va(pkt->hdr +
0679 rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
0680 }
0681
0682 static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
0683 {
0684 __atmeth_set_va(pkt->hdr +
0685 rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
0686 }
0687
0688 static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
0689 {
0690 return __atmeth_rkey(pkt->hdr +
0691 rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
0692 }
0693
0694 static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
0695 {
0696 __atmeth_set_rkey(pkt->hdr +
0697 rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
0698 }
0699
0700 static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
0701 {
0702 return __atmeth_swap_add(pkt->hdr +
0703 rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
0704 }
0705
0706 static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
0707 {
0708 __atmeth_set_swap_add(pkt->hdr +
0709 rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
0710 }
0711
0712 static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
0713 {
0714 return __atmeth_comp(pkt->hdr +
0715 rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
0716 }
0717
0718 static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
0719 {
0720 __atmeth_set_comp(pkt->hdr +
0721 rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
0722 }
0723
0724
0725
0726
0727 struct rxe_aeth {
0728 __be32 smsn;
0729 };
0730
0731 #define AETH_SYN_MASK (0xff000000)
0732 #define AETH_MSN_MASK (0x00ffffff)
0733
0734 enum aeth_syndrome {
0735 AETH_TYPE_MASK = 0xe0,
0736 AETH_ACK = 0x00,
0737 AETH_RNR_NAK = 0x20,
0738 AETH_RSVD = 0x40,
0739 AETH_NAK = 0x60,
0740 AETH_ACK_UNLIMITED = 0x1f,
0741 AETH_NAK_PSN_SEQ_ERROR = 0x60,
0742 AETH_NAK_INVALID_REQ = 0x61,
0743 AETH_NAK_REM_ACC_ERR = 0x62,
0744 AETH_NAK_REM_OP_ERR = 0x63,
0745 AETH_NAK_INV_RD_REQ = 0x64,
0746 };
0747
0748 static inline u8 __aeth_syn(void *arg)
0749 {
0750 struct rxe_aeth *aeth = arg;
0751
0752 return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
0753 }
0754
0755 static inline void __aeth_set_syn(void *arg, u8 syn)
0756 {
0757 struct rxe_aeth *aeth = arg;
0758 u32 smsn = be32_to_cpu(aeth->smsn);
0759
0760 aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
0761 (~AETH_SYN_MASK & smsn));
0762 }
0763
0764 static inline u32 __aeth_msn(void *arg)
0765 {
0766 struct rxe_aeth *aeth = arg;
0767
0768 return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
0769 }
0770
0771 static inline void __aeth_set_msn(void *arg, u32 msn)
0772 {
0773 struct rxe_aeth *aeth = arg;
0774 u32 smsn = be32_to_cpu(aeth->smsn);
0775
0776 aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
0777 (~AETH_MSN_MASK & smsn));
0778 }
0779
0780 static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
0781 {
0782 return __aeth_syn(pkt->hdr +
0783 rxe_opcode[pkt->opcode].offset[RXE_AETH]);
0784 }
0785
0786 static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
0787 {
0788 __aeth_set_syn(pkt->hdr +
0789 rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
0790 }
0791
0792 static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
0793 {
0794 return __aeth_msn(pkt->hdr +
0795 rxe_opcode[pkt->opcode].offset[RXE_AETH]);
0796 }
0797
0798 static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
0799 {
0800 __aeth_set_msn(pkt->hdr +
0801 rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
0802 }
0803
0804
0805
0806
0807 struct rxe_atmack {
0808 __be64 orig;
0809 };
0810
0811 static inline u64 __atmack_orig(void *arg)
0812 {
0813 struct rxe_atmack *atmack = arg;
0814
0815 return be64_to_cpu(atmack->orig);
0816 }
0817
0818 static inline void __atmack_set_orig(void *arg, u64 orig)
0819 {
0820 struct rxe_atmack *atmack = arg;
0821
0822 atmack->orig = cpu_to_be64(orig);
0823 }
0824
0825 static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
0826 {
0827 return __atmack_orig(pkt->hdr +
0828 rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
0829 }
0830
0831 static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
0832 {
0833 __atmack_set_orig(pkt->hdr +
0834 rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
0835 }
0836
0837
0838
0839
0840 struct rxe_immdt {
0841 __be32 imm;
0842 };
0843
0844 static inline __be32 __immdt_imm(void *arg)
0845 {
0846 struct rxe_immdt *immdt = arg;
0847
0848 return immdt->imm;
0849 }
0850
0851 static inline void __immdt_set_imm(void *arg, __be32 imm)
0852 {
0853 struct rxe_immdt *immdt = arg;
0854
0855 immdt->imm = imm;
0856 }
0857
0858 static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
0859 {
0860 return __immdt_imm(pkt->hdr +
0861 rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
0862 }
0863
0864 static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
0865 {
0866 __immdt_set_imm(pkt->hdr +
0867 rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
0868 }
0869
0870
0871
0872
0873 struct rxe_ieth {
0874 __be32 rkey;
0875 };
0876
0877 static inline u32 __ieth_rkey(void *arg)
0878 {
0879 struct rxe_ieth *ieth = arg;
0880
0881 return be32_to_cpu(ieth->rkey);
0882 }
0883
0884 static inline void __ieth_set_rkey(void *arg, u32 rkey)
0885 {
0886 struct rxe_ieth *ieth = arg;
0887
0888 ieth->rkey = cpu_to_be32(rkey);
0889 }
0890
0891 static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
0892 {
0893 return __ieth_rkey(pkt->hdr +
0894 rxe_opcode[pkt->opcode].offset[RXE_IETH]);
0895 }
0896
0897 static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
0898 {
0899 __ieth_set_rkey(pkt->hdr +
0900 rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
0901 }
0902
0903 enum rxe_hdr_length {
0904 RXE_BTH_BYTES = sizeof(struct rxe_bth),
0905 RXE_DETH_BYTES = sizeof(struct rxe_deth),
0906 RXE_IMMDT_BYTES = sizeof(struct rxe_immdt),
0907 RXE_RETH_BYTES = sizeof(struct rxe_reth),
0908 RXE_AETH_BYTES = sizeof(struct rxe_aeth),
0909 RXE_ATMACK_BYTES = sizeof(struct rxe_atmack),
0910 RXE_ATMETH_BYTES = sizeof(struct rxe_atmeth),
0911 RXE_IETH_BYTES = sizeof(struct rxe_ieth),
0912 RXE_RDETH_BYTES = sizeof(struct rxe_rdeth),
0913 };
0914
0915 static inline size_t header_size(struct rxe_pkt_info *pkt)
0916 {
0917 return rxe_opcode[pkt->opcode].length;
0918 }
0919
0920 static inline void *payload_addr(struct rxe_pkt_info *pkt)
0921 {
0922 return pkt->hdr + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
0923 }
0924
0925 static inline size_t payload_size(struct rxe_pkt_info *pkt)
0926 {
0927 return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
0928 - bth_pad(pkt) - RXE_ICRC_SIZE;
0929 }
0930
0931 #endif