0001
0002
0003
0004
0005
0006
0007 #ifndef _QED_CHAIN_H
0008 #define _QED_CHAIN_H
0009
0010 #include <linux/types.h>
0011 #include <asm/byteorder.h>
0012 #include <linux/kernel.h>
0013 #include <linux/list.h>
0014 #include <linux/sizes.h>
0015 #include <linux/slab.h>
0016 #include <linux/qed/common_hsi.h>
0017
0018 enum qed_chain_mode {
0019
0020 QED_CHAIN_MODE_NEXT_PTR,
0021
0022
0023 QED_CHAIN_MODE_SINGLE,
0024
0025
0026 QED_CHAIN_MODE_PBL,
0027 };
0028
0029 enum qed_chain_use_mode {
0030 QED_CHAIN_USE_TO_PRODUCE,
0031 QED_CHAIN_USE_TO_CONSUME,
0032 QED_CHAIN_USE_TO_CONSUME_PRODUCE,
0033 };
0034
0035 enum qed_chain_cnt_type {
0036
0037 QED_CHAIN_CNT_TYPE_U16,
0038
0039
0040 QED_CHAIN_CNT_TYPE_U32,
0041 };
0042
0043 struct qed_chain_next {
0044 struct regpair next_phys;
0045 void *next_virt;
0046 };
0047
0048 struct qed_chain_pbl_u16 {
0049 u16 prod_page_idx;
0050 u16 cons_page_idx;
0051 };
0052
0053 struct qed_chain_pbl_u32 {
0054 u32 prod_page_idx;
0055 u32 cons_page_idx;
0056 };
0057
0058 struct qed_chain_u16 {
0059
0060 u16 prod_idx;
0061 u16 cons_idx;
0062 };
0063
0064 struct qed_chain_u32 {
0065
0066 u32 prod_idx;
0067 u32 cons_idx;
0068 };
0069
0070 struct addr_tbl_entry {
0071 void *virt_addr;
0072 dma_addr_t dma_map;
0073 };
0074
0075 struct qed_chain {
0076
0077
0078
0079
0080
0081 void *p_prod_elem;
0082 void *p_cons_elem;
0083
0084
0085
0086 struct {
0087
0088
0089
0090
0091 struct addr_tbl_entry *pp_addr_tbl;
0092
0093 union {
0094 struct qed_chain_pbl_u16 u16;
0095 struct qed_chain_pbl_u32 u32;
0096 } c;
0097 } pbl;
0098
0099 union {
0100 struct qed_chain_u16 chain16;
0101 struct qed_chain_u32 chain32;
0102 } u;
0103
0104
0105 u32 capacity;
0106 u32 page_cnt;
0107
0108 enum qed_chain_mode mode;
0109
0110
0111 u16 elem_per_page;
0112 u16 elem_per_page_mask;
0113 u16 elem_size;
0114 u16 next_page_mask;
0115 u16 usable_per_page;
0116 u8 elem_unusable;
0117
0118 enum qed_chain_cnt_type cnt_type;
0119
0120
0121
0122
0123
0124 u32 page_size;
0125
0126
0127 struct {
0128 __le64 *table_virt;
0129 dma_addr_t table_phys;
0130 size_t table_size;
0131 } pbl_sp;
0132
0133
0134
0135
0136
0137 void *p_virt_addr;
0138 dma_addr_t p_phys_addr;
0139
0140
0141 u32 size;
0142
0143 enum qed_chain_use_mode intended_use;
0144
0145 bool b_external_pbl;
0146 };
0147
0148 struct qed_chain_init_params {
0149 enum qed_chain_mode mode;
0150 enum qed_chain_use_mode intended_use;
0151 enum qed_chain_cnt_type cnt_type;
0152
0153 u32 page_size;
0154 u32 num_elems;
0155 size_t elem_size;
0156
0157 void *ext_pbl_virt;
0158 dma_addr_t ext_pbl_phys;
0159 };
0160
0161 #define QED_CHAIN_PAGE_SIZE SZ_4K
0162
0163 #define ELEMS_PER_PAGE(elem_size, page_size) \
0164 ((page_size) / (elem_size))
0165
0166 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
0167 (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
0168 (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / (elem_size))) : \
0169 0)
0170
0171 #define USABLE_ELEMS_PER_PAGE(elem_size, page_size, mode) \
0172 ((u32)(ELEMS_PER_PAGE((elem_size), (page_size)) - \
0173 UNUSABLE_ELEMS_PER_PAGE((elem_size), (mode))))
0174
0175 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, page_size, mode) \
0176 DIV_ROUND_UP((elem_cnt), \
0177 USABLE_ELEMS_PER_PAGE((elem_size), (page_size), (mode)))
0178
0179 #define is_chain_u16(p) \
0180 ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
0181 #define is_chain_u32(p) \
0182 ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
0183
0184
0185
0186 static inline u16 qed_chain_get_prod_idx(const struct qed_chain *chain)
0187 {
0188 return chain->u.chain16.prod_idx;
0189 }
0190
0191 static inline u16 qed_chain_get_cons_idx(const struct qed_chain *chain)
0192 {
0193 return chain->u.chain16.cons_idx;
0194 }
0195
0196 static inline u32 qed_chain_get_prod_idx_u32(const struct qed_chain *chain)
0197 {
0198 return chain->u.chain32.prod_idx;
0199 }
0200
0201 static inline u32 qed_chain_get_cons_idx_u32(const struct qed_chain *chain)
0202 {
0203 return chain->u.chain32.cons_idx;
0204 }
0205
0206 static inline u16 qed_chain_get_elem_used(const struct qed_chain *chain)
0207 {
0208 u32 prod = qed_chain_get_prod_idx(chain);
0209 u32 cons = qed_chain_get_cons_idx(chain);
0210 u16 elem_per_page = chain->elem_per_page;
0211 u16 used;
0212
0213 if (prod < cons)
0214 prod += (u32)U16_MAX + 1;
0215
0216 used = (u16)(prod - cons);
0217 if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
0218 used -= (u16)(prod / elem_per_page - cons / elem_per_page);
0219
0220 return used;
0221 }
0222
0223 static inline u16 qed_chain_get_elem_left(const struct qed_chain *chain)
0224 {
0225 return (u16)(chain->capacity - qed_chain_get_elem_used(chain));
0226 }
0227
0228 static inline u32 qed_chain_get_elem_used_u32(const struct qed_chain *chain)
0229 {
0230 u64 prod = qed_chain_get_prod_idx_u32(chain);
0231 u64 cons = qed_chain_get_cons_idx_u32(chain);
0232 u16 elem_per_page = chain->elem_per_page;
0233 u32 used;
0234
0235 if (prod < cons)
0236 prod += (u64)U32_MAX + 1;
0237
0238 used = (u32)(prod - cons);
0239 if (chain->mode == QED_CHAIN_MODE_NEXT_PTR)
0240 used -= (u32)(prod / elem_per_page - cons / elem_per_page);
0241
0242 return used;
0243 }
0244
0245 static inline u32 qed_chain_get_elem_left_u32(const struct qed_chain *chain)
0246 {
0247 return chain->capacity - qed_chain_get_elem_used_u32(chain);
0248 }
0249
0250 static inline u16 qed_chain_get_usable_per_page(const struct qed_chain *chain)
0251 {
0252 return chain->usable_per_page;
0253 }
0254
0255 static inline u8 qed_chain_get_unusable_per_page(const struct qed_chain *chain)
0256 {
0257 return chain->elem_unusable;
0258 }
0259
0260 static inline u32 qed_chain_get_page_cnt(const struct qed_chain *chain)
0261 {
0262 return chain->page_cnt;
0263 }
0264
0265 static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain)
0266 {
0267 return chain->pbl_sp.table_phys;
0268 }
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279
0280
0281 static inline void
0282 qed_chain_advance_page(struct qed_chain *p_chain,
0283 void **p_next_elem, void *idx_to_inc, void *page_to_inc)
0284 {
0285 struct qed_chain_next *p_next = NULL;
0286 u32 page_index = 0;
0287
0288 switch (p_chain->mode) {
0289 case QED_CHAIN_MODE_NEXT_PTR:
0290 p_next = *p_next_elem;
0291 *p_next_elem = p_next->next_virt;
0292 if (is_chain_u16(p_chain))
0293 *(u16 *)idx_to_inc += p_chain->elem_unusable;
0294 else
0295 *(u32 *)idx_to_inc += p_chain->elem_unusable;
0296 break;
0297 case QED_CHAIN_MODE_SINGLE:
0298 *p_next_elem = p_chain->p_virt_addr;
0299 break;
0300
0301 case QED_CHAIN_MODE_PBL:
0302 if (is_chain_u16(p_chain)) {
0303 if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
0304 *(u16 *)page_to_inc = 0;
0305 page_index = *(u16 *)page_to_inc;
0306 } else {
0307 if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
0308 *(u32 *)page_to_inc = 0;
0309 page_index = *(u32 *)page_to_inc;
0310 }
0311 *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
0312 }
0313 }
0314
0315 #define is_unusable_idx(p, idx) \
0316 (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
0317
0318 #define is_unusable_idx_u32(p, idx) \
0319 (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
0320 #define is_unusable_next_idx(p, idx) \
0321 ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
0322 (p)->usable_per_page)
0323
0324 #define is_unusable_next_idx_u32(p, idx) \
0325 ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
0326 (p)->usable_per_page)
0327
0328 #define test_and_skip(p, idx) \
0329 do { \
0330 if (is_chain_u16(p)) { \
0331 if (is_unusable_idx(p, idx)) \
0332 (p)->u.chain16.idx += (p)->elem_unusable; \
0333 } else { \
0334 if (is_unusable_idx_u32(p, idx)) \
0335 (p)->u.chain32.idx += (p)->elem_unusable; \
0336 } \
0337 } while (0)
0338
0339
0340
0341
0342
0343
0344
0345
0346
0347
0348
0349 static inline void qed_chain_return_produced(struct qed_chain *p_chain)
0350 {
0351 if (is_chain_u16(p_chain))
0352 p_chain->u.chain16.cons_idx++;
0353 else
0354 p_chain->u.chain32.cons_idx++;
0355 test_and_skip(p_chain, cons_idx);
0356 }
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369 static inline void *qed_chain_produce(struct qed_chain *p_chain)
0370 {
0371 void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
0372
0373 if (is_chain_u16(p_chain)) {
0374 if ((p_chain->u.chain16.prod_idx &
0375 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
0376 p_prod_idx = &p_chain->u.chain16.prod_idx;
0377 p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
0378 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
0379 p_prod_idx, p_prod_page_idx);
0380 }
0381 p_chain->u.chain16.prod_idx++;
0382 } else {
0383 if ((p_chain->u.chain32.prod_idx &
0384 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
0385 p_prod_idx = &p_chain->u.chain32.prod_idx;
0386 p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
0387 qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
0388 p_prod_idx, p_prod_page_idx);
0389 }
0390 p_chain->u.chain32.prod_idx++;
0391 }
0392
0393 p_ret = p_chain->p_prod_elem;
0394 p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
0395 p_chain->elem_size);
0396
0397 return p_ret;
0398 }
0399
0400
0401
0402
0403
0404
0405
0406
0407 static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
0408 {
0409 return p_chain->capacity;
0410 }
0411
0412
0413
0414
0415
0416
0417
0418
0419
0420
0421
0422 static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
0423 {
0424 test_and_skip(p_chain, prod_idx);
0425 if (is_chain_u16(p_chain))
0426 p_chain->u.chain16.prod_idx++;
0427 else
0428 p_chain->u.chain32.prod_idx++;
0429 }
0430
0431
0432
0433
0434
0435
0436
0437
0438
0439
0440 static inline void *qed_chain_consume(struct qed_chain *p_chain)
0441 {
0442 void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
0443
0444 if (is_chain_u16(p_chain)) {
0445 if ((p_chain->u.chain16.cons_idx &
0446 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
0447 p_cons_idx = &p_chain->u.chain16.cons_idx;
0448 p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
0449 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
0450 p_cons_idx, p_cons_page_idx);
0451 }
0452 p_chain->u.chain16.cons_idx++;
0453 } else {
0454 if ((p_chain->u.chain32.cons_idx &
0455 p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
0456 p_cons_idx = &p_chain->u.chain32.cons_idx;
0457 p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
0458 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
0459 p_cons_idx, p_cons_page_idx);
0460 }
0461 p_chain->u.chain32.cons_idx++;
0462 }
0463
0464 p_ret = p_chain->p_cons_elem;
0465 p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
0466 p_chain->elem_size);
0467
0468 return p_ret;
0469 }
0470
0471
0472
0473
0474
0475
0476
0477
0478 static inline void qed_chain_reset(struct qed_chain *p_chain)
0479 {
0480 u32 i;
0481
0482 if (is_chain_u16(p_chain)) {
0483 p_chain->u.chain16.prod_idx = 0;
0484 p_chain->u.chain16.cons_idx = 0;
0485 } else {
0486 p_chain->u.chain32.prod_idx = 0;
0487 p_chain->u.chain32.cons_idx = 0;
0488 }
0489 p_chain->p_cons_elem = p_chain->p_virt_addr;
0490 p_chain->p_prod_elem = p_chain->p_virt_addr;
0491
0492 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
0493
0494
0495
0496
0497
0498 u32 reset_val = p_chain->page_cnt - 1;
0499
0500 if (is_chain_u16(p_chain)) {
0501 p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
0502 p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
0503 } else {
0504 p_chain->pbl.c.u32.prod_page_idx = reset_val;
0505 p_chain->pbl.c.u32.cons_page_idx = reset_val;
0506 }
0507 }
0508
0509 switch (p_chain->intended_use) {
0510 case QED_CHAIN_USE_TO_CONSUME:
0511
0512 for (i = 0; i < p_chain->capacity; i++)
0513 qed_chain_recycle_consumed(p_chain);
0514 break;
0515
0516 case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
0517 case QED_CHAIN_USE_TO_PRODUCE:
0518 default:
0519
0520 break;
0521 }
0522 }
0523
0524
0525
0526
0527
0528
0529
0530
0531
0532 static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
0533 {
0534 struct qed_chain_next *p_next = NULL;
0535 void *p_virt_addr = NULL;
0536 u32 size, last_page_idx;
0537
0538 if (!p_chain->p_virt_addr)
0539 goto out;
0540
0541 switch (p_chain->mode) {
0542 case QED_CHAIN_MODE_NEXT_PTR:
0543 size = p_chain->elem_size * p_chain->usable_per_page;
0544 p_virt_addr = p_chain->p_virt_addr;
0545 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
0546 while (p_next->next_virt != p_chain->p_virt_addr) {
0547 p_virt_addr = p_next->next_virt;
0548 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
0549 size);
0550 }
0551 break;
0552 case QED_CHAIN_MODE_SINGLE:
0553 p_virt_addr = p_chain->p_virt_addr;
0554 break;
0555 case QED_CHAIN_MODE_PBL:
0556 last_page_idx = p_chain->page_cnt - 1;
0557 p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
0558 break;
0559 }
0560
0561 size = p_chain->elem_size * (p_chain->usable_per_page - 1);
0562 p_virt_addr = (u8 *)p_virt_addr + size;
0563 out:
0564 return p_virt_addr;
0565 }
0566
0567
0568
0569
0570
0571
0572
0573
0574
0575
0576 static inline void qed_chain_set_prod(struct qed_chain *p_chain,
0577 u32 prod_idx, void *p_prod_elem)
0578 {
0579 if (p_chain->mode == QED_CHAIN_MODE_PBL) {
0580 u32 cur_prod, page_mask, page_cnt, page_diff;
0581
0582 cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
0583 p_chain->u.chain32.prod_idx;
0584
0585
0586 page_mask = ~p_chain->elem_per_page_mask;
0587
0588
0589
0590
0591
0592
0593
0594
0595 page_diff = (((cur_prod - 1) & page_mask) -
0596 ((prod_idx - 1) & page_mask)) /
0597 p_chain->elem_per_page;
0598
0599 page_cnt = qed_chain_get_page_cnt(p_chain);
0600 if (is_chain_u16(p_chain))
0601 p_chain->pbl.c.u16.prod_page_idx =
0602 (p_chain->pbl.c.u16.prod_page_idx -
0603 page_diff + page_cnt) % page_cnt;
0604 else
0605 p_chain->pbl.c.u32.prod_page_idx =
0606 (p_chain->pbl.c.u32.prod_page_idx -
0607 page_diff + page_cnt) % page_cnt;
0608 }
0609
0610 if (is_chain_u16(p_chain))
0611 p_chain->u.chain16.prod_idx = (u16) prod_idx;
0612 else
0613 p_chain->u.chain32.prod_idx = prod_idx;
0614 p_chain->p_prod_elem = p_prod_elem;
0615 }
0616
0617
0618
0619
0620
0621
0622
0623
0624 static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
0625 {
0626 u32 i, page_cnt;
0627
0628 if (p_chain->mode != QED_CHAIN_MODE_PBL)
0629 return;
0630
0631 page_cnt = qed_chain_get_page_cnt(p_chain);
0632
0633 for (i = 0; i < page_cnt; i++)
0634 memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
0635 p_chain->page_size);
0636 }
0637
0638 #endif