0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/pci.h>
0019 #include <linux/netdevice.h>
0020 #include <linux/vmalloc.h>
0021 #include "liquidio_common.h"
0022 #include "octeon_droq.h"
0023 #include "octeon_iq.h"
0024 #include "response_manager.h"
0025 #include "octeon_device.h"
0026 #include "octeon_main.h"
0027 #include "octeon_network.h"
0028 #include "cn66xx_regs.h"
0029 #include "cn66xx_device.h"
0030 #include "cn23xx_pf_device.h"
0031 #include "cn23xx_vf_device.h"
0032
0033 struct niclist {
0034 struct list_head list;
0035 void *ptr;
0036 };
0037
0038 struct __dispatch {
0039 struct list_head list;
0040 struct octeon_recv_info *rinfo;
0041 octeon_dispatch_fn_t disp_fn;
0042 };
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
0056 u16 opcode, u16 subcode)
0057 {
0058 int idx;
0059 struct list_head *dispatch;
0060 void *fn_arg = NULL;
0061 u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
0062
0063 idx = combined_opcode & OCTEON_OPCODE_MASK;
0064
0065 spin_lock_bh(&octeon_dev->dispatch.lock);
0066
0067 if (octeon_dev->dispatch.count == 0) {
0068 spin_unlock_bh(&octeon_dev->dispatch.lock);
0069 return NULL;
0070 }
0071
0072 if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) {
0073 fn_arg = octeon_dev->dispatch.dlist[idx].arg;
0074 } else {
0075 list_for_each(dispatch,
0076 &octeon_dev->dispatch.dlist[idx].list) {
0077 if (((struct octeon_dispatch *)dispatch)->opcode ==
0078 combined_opcode) {
0079 fn_arg = ((struct octeon_dispatch *)
0080 dispatch)->arg;
0081 break;
0082 }
0083 }
0084 }
0085
0086 spin_unlock_bh(&octeon_dev->dispatch.lock);
0087 return fn_arg;
0088 }
0089
0090
0091
0092
0093
0094 u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
0095 {
0096 u32 pkt_count = 0;
0097 u32 last_count;
0098
0099 pkt_count = readl(droq->pkts_sent_reg);
0100
0101 last_count = pkt_count - droq->pkt_count;
0102 droq->pkt_count = pkt_count;
0103
0104
0105 if (last_count)
0106 atomic_add(last_count, &droq->pkts_pending);
0107
0108 return last_count;
0109 }
0110
0111 static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq)
0112 {
0113 u32 count = 0;
0114
0115
0116
0117
0118
0119
0120 droq->max_empty_descs = 0;
0121
0122 do {
0123 droq->max_empty_descs++;
0124 count += droq->buffer_size;
0125 } while (count < (64 * 1024));
0126
0127 droq->max_empty_descs = droq->max_count - droq->max_empty_descs;
0128 }
0129
0130 static void octeon_droq_reset_indices(struct octeon_droq *droq)
0131 {
0132 droq->read_idx = 0;
0133 droq->write_idx = 0;
0134 droq->refill_idx = 0;
0135 droq->refill_count = 0;
0136 atomic_set(&droq->pkts_pending, 0);
0137 }
0138
0139 static void
0140 octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
0141 struct octeon_droq *droq)
0142 {
0143 u32 i;
0144 struct octeon_skb_page_info *pg_info;
0145
0146 for (i = 0; i < droq->max_count; i++) {
0147 pg_info = &droq->recv_buf_list[i].pg_info;
0148 if (!pg_info)
0149 continue;
0150
0151 if (pg_info->dma)
0152 lio_unmap_ring(oct->pci_dev,
0153 (u64)pg_info->dma);
0154 pg_info->dma = 0;
0155
0156 if (pg_info->page)
0157 recv_buffer_destroy(droq->recv_buf_list[i].buffer,
0158 pg_info);
0159
0160 droq->recv_buf_list[i].buffer = NULL;
0161 }
0162
0163 octeon_droq_reset_indices(droq);
0164 }
0165
0166 static int
0167 octeon_droq_setup_ring_buffers(struct octeon_device *oct,
0168 struct octeon_droq *droq)
0169 {
0170 u32 i;
0171 void *buf;
0172 struct octeon_droq_desc *desc_ring = droq->desc_ring;
0173
0174 for (i = 0; i < droq->max_count; i++) {
0175 buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info);
0176
0177 if (!buf) {
0178 dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
0179 __func__);
0180 droq->stats.rx_alloc_failure++;
0181 return -ENOMEM;
0182 }
0183
0184 droq->recv_buf_list[i].buffer = buf;
0185 droq->recv_buf_list[i].data = get_rbd(buf);
0186 desc_ring[i].info_ptr = 0;
0187 desc_ring[i].buffer_ptr =
0188 lio_map_ring(droq->recv_buf_list[i].buffer);
0189 }
0190
0191 octeon_droq_reset_indices(droq);
0192
0193 octeon_droq_compute_max_packet_bufs(droq);
0194
0195 return 0;
0196 }
0197
0198 int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
0199 {
0200 struct octeon_droq *droq = oct->droq[q_no];
0201
0202 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
0203
0204 octeon_droq_destroy_ring_buffers(oct, droq);
0205 vfree(droq->recv_buf_list);
0206
0207 if (droq->desc_ring)
0208 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
0209 droq->desc_ring, droq->desc_ring_dma);
0210
0211 memset(droq, 0, OCT_DROQ_SIZE);
0212 oct->io_qmask.oq &= ~(1ULL << q_no);
0213 vfree(oct->droq[q_no]);
0214 oct->droq[q_no] = NULL;
0215 oct->num_oqs--;
0216
0217 return 0;
0218 }
0219
0220 int octeon_init_droq(struct octeon_device *oct,
0221 u32 q_no,
0222 u32 num_descs,
0223 u32 desc_size,
0224 void *app_ctx)
0225 {
0226 struct octeon_droq *droq;
0227 u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
0228 u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
0229 int numa_node = dev_to_node(&oct->pci_dev->dev);
0230
0231 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
0232
0233 droq = oct->droq[q_no];
0234 memset(droq, 0, OCT_DROQ_SIZE);
0235
0236 droq->oct_dev = oct;
0237 droq->q_no = q_no;
0238 if (app_ctx)
0239 droq->app_ctx = app_ctx;
0240 else
0241 droq->app_ctx = (void *)(size_t)q_no;
0242
0243 c_num_descs = num_descs;
0244 c_buf_size = desc_size;
0245 if (OCTEON_CN6XXX(oct)) {
0246 struct octeon_config *conf6x = CHIP_CONF(oct, cn6xxx);
0247
0248 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
0249 c_refill_threshold =
0250 (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
0251 } else if (OCTEON_CN23XX_PF(oct)) {
0252 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_pf);
0253
0254 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
0255 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
0256 } else if (OCTEON_CN23XX_VF(oct)) {
0257 struct octeon_config *conf23 = CHIP_CONF(oct, cn23xx_vf);
0258
0259 c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf23);
0260 c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf23);
0261 } else {
0262 return 1;
0263 }
0264
0265 droq->max_count = c_num_descs;
0266 droq->buffer_size = c_buf_size;
0267
0268 desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
0269 droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
0270 (dma_addr_t *)&droq->desc_ring_dma);
0271
0272 if (!droq->desc_ring) {
0273 dev_err(&oct->pci_dev->dev,
0274 "Output queue %d ring alloc failed\n", q_no);
0275 return 1;
0276 }
0277
0278 dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
0279 q_no, droq->desc_ring, droq->desc_ring_dma);
0280 dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
0281 droq->max_count);
0282
0283 droq->recv_buf_list = vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE),
0284 numa_node);
0285 if (!droq->recv_buf_list)
0286 droq->recv_buf_list = vzalloc(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE));
0287 if (!droq->recv_buf_list) {
0288 dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
0289 goto init_droq_fail;
0290 }
0291
0292 if (octeon_droq_setup_ring_buffers(oct, droq))
0293 goto init_droq_fail;
0294
0295 droq->pkts_per_intr = c_pkts_per_intr;
0296 droq->refill_threshold = c_refill_threshold;
0297
0298 dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n",
0299 droq->max_empty_descs);
0300
0301 INIT_LIST_HEAD(&droq->dispatch_list);
0302
0303
0304 oct->fn_list.setup_oq_regs(oct, q_no);
0305
0306 oct->io_qmask.oq |= BIT_ULL(q_no);
0307
0308 return 0;
0309
0310 init_droq_fail:
0311 octeon_delete_droq(oct, q_no);
0312 return 1;
0313 }
0314
0315
0316
0317
0318
0319
0320
0321
0322
0323
0324
0325
0326
0327
0328
0329
0330
0331
0332 static inline struct octeon_recv_info *octeon_create_recv_info(
0333 struct octeon_device *octeon_dev,
0334 struct octeon_droq *droq,
0335 u32 buf_cnt,
0336 u32 idx)
0337 {
0338 struct octeon_droq_info *info;
0339 struct octeon_recv_pkt *recv_pkt;
0340 struct octeon_recv_info *recv_info;
0341 u32 i, bytes_left;
0342 struct octeon_skb_page_info *pg_info;
0343
0344 info = (struct octeon_droq_info *)droq->recv_buf_list[idx].data;
0345
0346 recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch));
0347 if (!recv_info)
0348 return NULL;
0349
0350 recv_pkt = recv_info->recv_pkt;
0351 recv_pkt->rh = info->rh;
0352 recv_pkt->length = (u32)info->length;
0353 recv_pkt->buffer_count = (u16)buf_cnt;
0354 recv_pkt->octeon_id = (u16)octeon_dev->octeon_id;
0355
0356 i = 0;
0357 bytes_left = (u32)info->length;
0358
0359 while (buf_cnt) {
0360 {
0361 pg_info = &droq->recv_buf_list[idx].pg_info;
0362
0363 lio_unmap_ring(octeon_dev->pci_dev,
0364 (u64)pg_info->dma);
0365 pg_info->page = NULL;
0366 pg_info->dma = 0;
0367 }
0368
0369 recv_pkt->buffer_size[i] =
0370 (bytes_left >=
0371 droq->buffer_size) ? droq->buffer_size : bytes_left;
0372
0373 recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer;
0374 droq->recv_buf_list[idx].buffer = NULL;
0375
0376 idx = incr_index(idx, 1, droq->max_count);
0377 bytes_left -= droq->buffer_size;
0378 i++;
0379 buf_cnt--;
0380 }
0381
0382 return recv_info;
0383 }
0384
0385
0386
0387
0388 static inline u32
0389 octeon_droq_refill_pullup_descs(struct octeon_droq *droq,
0390 struct octeon_droq_desc *desc_ring)
0391 {
0392 u32 desc_refilled = 0;
0393
0394 u32 refill_index = droq->refill_idx;
0395
0396 while (refill_index != droq->read_idx) {
0397 if (droq->recv_buf_list[refill_index].buffer) {
0398 droq->recv_buf_list[droq->refill_idx].buffer =
0399 droq->recv_buf_list[refill_index].buffer;
0400 droq->recv_buf_list[droq->refill_idx].data =
0401 droq->recv_buf_list[refill_index].data;
0402 desc_ring[droq->refill_idx].buffer_ptr =
0403 desc_ring[refill_index].buffer_ptr;
0404 droq->recv_buf_list[refill_index].buffer = NULL;
0405 desc_ring[refill_index].buffer_ptr = 0;
0406 do {
0407 droq->refill_idx = incr_index(droq->refill_idx,
0408 1,
0409 droq->max_count);
0410 desc_refilled++;
0411 droq->refill_count--;
0412 } while (droq->recv_buf_list[droq->refill_idx].buffer);
0413 }
0414 refill_index = incr_index(refill_index, 1, droq->max_count);
0415 }
0416 return desc_refilled;
0417 }
0418
0419
0420
0421
0422
0423
0424
0425
0426
0427
0428
0429
0430 static u32
0431 octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
0432 {
0433 struct octeon_droq_desc *desc_ring;
0434 void *buf = NULL;
0435 u8 *data;
0436 u32 desc_refilled = 0;
0437 struct octeon_skb_page_info *pg_info;
0438
0439 desc_ring = droq->desc_ring;
0440
0441 while (droq->refill_count && (desc_refilled < droq->max_count)) {
0442
0443
0444
0445 if (!droq->recv_buf_list[droq->refill_idx].buffer) {
0446 pg_info =
0447 &droq->recv_buf_list[droq->refill_idx].pg_info;
0448
0449
0450
0451 if (pg_info->page)
0452 buf = recv_buffer_reuse(octeon_dev, pg_info);
0453 else
0454 buf = recv_buffer_alloc(octeon_dev, pg_info);
0455
0456
0457
0458 if (!buf) {
0459 droq->stats.rx_alloc_failure++;
0460 break;
0461 }
0462 droq->recv_buf_list[droq->refill_idx].buffer =
0463 buf;
0464 data = get_rbd(buf);
0465 } else {
0466 data = get_rbd(droq->recv_buf_list
0467 [droq->refill_idx].buffer);
0468 }
0469
0470 droq->recv_buf_list[droq->refill_idx].data = data;
0471
0472 desc_ring[droq->refill_idx].buffer_ptr =
0473 lio_map_ring(droq->recv_buf_list[
0474 droq->refill_idx].buffer);
0475
0476 droq->refill_idx = incr_index(droq->refill_idx, 1,
0477 droq->max_count);
0478 desc_refilled++;
0479 droq->refill_count--;
0480 }
0481
0482 if (droq->refill_count)
0483 desc_refilled +=
0484 octeon_droq_refill_pullup_descs(droq, desc_ring);
0485
0486
0487
0488
0489
0490
0491 return desc_refilled;
0492 }
0493
0494
0495
0496
0497
0498 int octeon_retry_droq_refill(struct octeon_droq *droq)
0499 {
0500 struct octeon_device *oct = droq->oct_dev;
0501 int desc_refilled, reschedule = 1;
0502 u32 pkts_credit;
0503
0504 pkts_credit = readl(droq->pkts_credit_reg);
0505 desc_refilled = octeon_droq_refill(oct, droq);
0506 if (desc_refilled) {
0507
0508
0509
0510
0511 wmb();
0512 writel(desc_refilled, droq->pkts_credit_reg);
0513
0514 if (pkts_credit + desc_refilled >= CN23XX_SLI_DEF_BP)
0515 reschedule = 0;
0516 }
0517
0518 return reschedule;
0519 }
0520
0521 static inline u32
0522 octeon_droq_get_bufcount(u32 buf_size, u32 total_len)
0523 {
0524 return DIV_ROUND_UP(total_len, buf_size);
0525 }
0526
0527 static int
0528 octeon_droq_dispatch_pkt(struct octeon_device *oct,
0529 struct octeon_droq *droq,
0530 union octeon_rh *rh,
0531 struct octeon_droq_info *info)
0532 {
0533 u32 cnt;
0534 octeon_dispatch_fn_t disp_fn;
0535 struct octeon_recv_info *rinfo;
0536
0537 cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length);
0538
0539 disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode,
0540 (u16)rh->r.subcode);
0541 if (disp_fn) {
0542 rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx);
0543 if (rinfo) {
0544 struct __dispatch *rdisp = rinfo->rsvd;
0545
0546 rdisp->rinfo = rinfo;
0547 rdisp->disp_fn = disp_fn;
0548 rinfo->recv_pkt->rh = *rh;
0549 list_add_tail(&rdisp->list,
0550 &droq->dispatch_list);
0551 } else {
0552 droq->stats.dropped_nomem++;
0553 }
0554 } else {
0555 dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
0556 (unsigned int)rh->r.opcode,
0557 (unsigned int)rh->r.subcode);
0558 droq->stats.dropped_nodispatch++;
0559 }
0560
0561 return cnt;
0562 }
0563
0564 static inline void octeon_droq_drop_packets(struct octeon_device *oct,
0565 struct octeon_droq *droq,
0566 u32 cnt)
0567 {
0568 u32 i = 0, buf_cnt;
0569 struct octeon_droq_info *info;
0570
0571 for (i = 0; i < cnt; i++) {
0572 info = (struct octeon_droq_info *)
0573 droq->recv_buf_list[droq->read_idx].data;
0574 octeon_swap_8B_data((u64 *)info, 2);
0575
0576 if (info->length) {
0577 info->length += OCTNET_FRM_LENGTH_SIZE;
0578 droq->stats.bytes_received += info->length;
0579 buf_cnt = octeon_droq_get_bufcount(droq->buffer_size,
0580 (u32)info->length);
0581 } else {
0582 dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n");
0583 buf_cnt = 1;
0584 }
0585
0586 droq->read_idx = incr_index(droq->read_idx, buf_cnt,
0587 droq->max_count);
0588 droq->refill_count += buf_cnt;
0589 }
0590 }
0591
0592 static u32
0593 octeon_droq_fast_process_packets(struct octeon_device *oct,
0594 struct octeon_droq *droq,
0595 u32 pkts_to_process)
0596 {
0597 u32 pkt, total_len = 0, pkt_count, retval;
0598 struct octeon_droq_info *info;
0599 union octeon_rh *rh;
0600
0601 pkt_count = pkts_to_process;
0602
0603 for (pkt = 0; pkt < pkt_count; pkt++) {
0604 u32 pkt_len = 0;
0605 struct sk_buff *nicbuf = NULL;
0606 struct octeon_skb_page_info *pg_info;
0607 void *buf;
0608
0609 info = (struct octeon_droq_info *)
0610 droq->recv_buf_list[droq->read_idx].data;
0611 octeon_swap_8B_data((u64 *)info, 2);
0612
0613 if (!info->length) {
0614 dev_err(&oct->pci_dev->dev,
0615 "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n",
0616 droq->q_no, droq->read_idx, pkt_count);
0617 print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS,
0618 (u8 *)info,
0619 OCT_DROQ_INFO_SIZE);
0620 break;
0621 }
0622
0623
0624 rh = &info->rh;
0625
0626 info->length += OCTNET_FRM_LENGTH_SIZE;
0627 rh->r_dh.len += (ROUNDUP8(OCT_DROQ_INFO_SIZE) / sizeof(u64));
0628 total_len += (u32)info->length;
0629 if (opcode_slow_path(rh)) {
0630 u32 buf_cnt;
0631
0632 buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info);
0633 droq->read_idx = incr_index(droq->read_idx,
0634 buf_cnt, droq->max_count);
0635 droq->refill_count += buf_cnt;
0636 } else {
0637 if (info->length <= droq->buffer_size) {
0638 pkt_len = (u32)info->length;
0639 nicbuf = droq->recv_buf_list[
0640 droq->read_idx].buffer;
0641 pg_info = &droq->recv_buf_list[
0642 droq->read_idx].pg_info;
0643 if (recv_buffer_recycle(oct, pg_info))
0644 pg_info->page = NULL;
0645 droq->recv_buf_list[droq->read_idx].buffer =
0646 NULL;
0647
0648 droq->read_idx = incr_index(droq->read_idx, 1,
0649 droq->max_count);
0650 droq->refill_count++;
0651 } else {
0652 nicbuf = octeon_fast_packet_alloc((u32)
0653 info->length);
0654 pkt_len = 0;
0655
0656
0657
0658 while (pkt_len < info->length) {
0659 int cpy_len, idx = droq->read_idx;
0660
0661 cpy_len = ((pkt_len + droq->buffer_size)
0662 > info->length) ?
0663 ((u32)info->length - pkt_len) :
0664 droq->buffer_size;
0665
0666 if (nicbuf) {
0667 octeon_fast_packet_next(droq,
0668 nicbuf,
0669 cpy_len,
0670 idx);
0671 buf = droq->recv_buf_list[
0672 idx].buffer;
0673 recv_buffer_fast_free(buf);
0674 droq->recv_buf_list[idx].buffer
0675 = NULL;
0676 } else {
0677 droq->stats.rx_alloc_failure++;
0678 }
0679
0680 pkt_len += cpy_len;
0681 droq->read_idx =
0682 incr_index(droq->read_idx, 1,
0683 droq->max_count);
0684 droq->refill_count++;
0685 }
0686 }
0687
0688 if (nicbuf) {
0689 if (droq->ops.fptr) {
0690 droq->ops.fptr(oct->octeon_id,
0691 nicbuf, pkt_len,
0692 rh, &droq->napi,
0693 droq->ops.farg);
0694 } else {
0695 recv_buffer_free(nicbuf);
0696 }
0697 }
0698 }
0699
0700 if (droq->refill_count >= droq->refill_threshold) {
0701 int desc_refilled = octeon_droq_refill(oct, droq);
0702
0703 if (desc_refilled) {
0704
0705
0706
0707
0708 wmb();
0709 writel(desc_refilled, droq->pkts_credit_reg);
0710 }
0711 }
0712 }
0713
0714
0715 droq->stats.pkts_received += pkt;
0716 droq->stats.bytes_received += total_len;
0717
0718 retval = pkt;
0719 if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) {
0720 octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt));
0721
0722 droq->stats.dropped_toomany += (pkts_to_process - pkt);
0723 retval = pkts_to_process;
0724 }
0725
0726 atomic_sub(retval, &droq->pkts_pending);
0727
0728 if (droq->refill_count >= droq->refill_threshold &&
0729 readl(droq->pkts_credit_reg) < CN23XX_SLI_DEF_BP) {
0730 octeon_droq_check_hw_for_pkts(droq);
0731
0732
0733 if (!atomic_read(&droq->pkts_pending))
0734 octeon_schedule_rxq_oom_work(oct, droq);
0735 }
0736
0737 return retval;
0738 }
0739
0740 int
0741 octeon_droq_process_packets(struct octeon_device *oct,
0742 struct octeon_droq *droq,
0743 u32 budget)
0744 {
0745 u32 pkt_count = 0;
0746 struct list_head *tmp, *tmp2;
0747
0748 octeon_droq_check_hw_for_pkts(droq);
0749 pkt_count = atomic_read(&droq->pkts_pending);
0750
0751 if (!pkt_count)
0752 return 0;
0753
0754 if (pkt_count > budget)
0755 pkt_count = budget;
0756
0757 octeon_droq_fast_process_packets(oct, droq, pkt_count);
0758
0759 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
0760 struct __dispatch *rdisp = (struct __dispatch *)tmp;
0761
0762 list_del(tmp);
0763 rdisp->disp_fn(rdisp->rinfo,
0764 octeon_get_dispatch_arg
0765 (oct,
0766 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
0767 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
0768 }
0769
0770
0771 if (atomic_read(&droq->pkts_pending))
0772 return 1;
0773
0774 return 0;
0775 }
0776
0777
0778
0779
0780
0781
0782 int
0783 octeon_droq_process_poll_pkts(struct octeon_device *oct,
0784 struct octeon_droq *droq, u32 budget)
0785 {
0786 struct list_head *tmp, *tmp2;
0787 u32 pkts_available = 0, pkts_processed = 0;
0788 u32 total_pkts_processed = 0;
0789
0790 if (budget > droq->max_count)
0791 budget = droq->max_count;
0792
0793 while (total_pkts_processed < budget) {
0794 octeon_droq_check_hw_for_pkts(droq);
0795
0796 pkts_available = min((budget - total_pkts_processed),
0797 (u32)(atomic_read(&droq->pkts_pending)));
0798
0799 if (pkts_available == 0)
0800 break;
0801
0802 pkts_processed =
0803 octeon_droq_fast_process_packets(oct, droq,
0804 pkts_available);
0805
0806 total_pkts_processed += pkts_processed;
0807 }
0808
0809 list_for_each_safe(tmp, tmp2, &droq->dispatch_list) {
0810 struct __dispatch *rdisp = (struct __dispatch *)tmp;
0811
0812 list_del(tmp);
0813 rdisp->disp_fn(rdisp->rinfo,
0814 octeon_get_dispatch_arg
0815 (oct,
0816 (u16)rdisp->rinfo->recv_pkt->rh.r.opcode,
0817 (u16)rdisp->rinfo->recv_pkt->rh.r.subcode));
0818 }
0819
0820 return total_pkts_processed;
0821 }
0822
0823
0824 int
0825 octeon_enable_irq(struct octeon_device *oct, u32 q_no)
0826 {
0827 switch (oct->chip_id) {
0828 case OCTEON_CN66XX:
0829 case OCTEON_CN68XX: {
0830 struct octeon_cn6xxx *cn6xxx =
0831 (struct octeon_cn6xxx *)oct->chip;
0832 unsigned long flags;
0833 u32 value;
0834
0835 spin_lock_irqsave
0836 (&cn6xxx->lock_for_droq_int_enb_reg, flags);
0837 value = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB);
0838 value |= (1 << q_no);
0839 octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, value);
0840 value = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
0841 value |= (1 << q_no);
0842 octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, value);
0843
0844
0845
0846 spin_unlock_irqrestore
0847 (&cn6xxx->lock_for_droq_int_enb_reg, flags);
0848 }
0849 break;
0850 case OCTEON_CN23XX_PF_VID:
0851 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
0852 break;
0853
0854 case OCTEON_CN23XX_VF_VID:
0855 lio_enable_irq(oct->droq[q_no], oct->instr_queue[q_no]);
0856 break;
0857 default:
0858 dev_err(&oct->pci_dev->dev, "%s Unknown Chip\n", __func__);
0859 return 1;
0860 }
0861
0862 return 0;
0863 }
0864
0865 int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no,
0866 struct octeon_droq_ops *ops)
0867 {
0868 struct octeon_config *oct_cfg = NULL;
0869 struct octeon_droq *droq;
0870
0871 oct_cfg = octeon_get_conf(oct);
0872
0873 if (!oct_cfg)
0874 return -EINVAL;
0875
0876 if (!(ops)) {
0877 dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n",
0878 __func__);
0879 return -EINVAL;
0880 }
0881
0882 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
0883 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
0884 __func__, q_no, (oct->num_oqs - 1));
0885 return -EINVAL;
0886 }
0887
0888 droq = oct->droq[q_no];
0889 memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops));
0890
0891 return 0;
0892 }
0893
0894 int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
0895 {
0896 struct octeon_config *oct_cfg = NULL;
0897 struct octeon_droq *droq;
0898
0899 oct_cfg = octeon_get_conf(oct);
0900
0901 if (!oct_cfg)
0902 return -EINVAL;
0903
0904 if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
0905 dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
0906 __func__, q_no, oct->num_oqs - 1);
0907 return -EINVAL;
0908 }
0909
0910 droq = oct->droq[q_no];
0911
0912 if (!droq) {
0913 dev_info(&oct->pci_dev->dev,
0914 "Droq id (%d) not available.\n", q_no);
0915 return 0;
0916 }
0917
0918 droq->ops.fptr = NULL;
0919 droq->ops.farg = NULL;
0920 droq->ops.drop_on_max = 0;
0921
0922 return 0;
0923 }
0924
0925 int octeon_create_droq(struct octeon_device *oct,
0926 u32 q_no, u32 num_descs,
0927 u32 desc_size, void *app_ctx)
0928 {
0929 struct octeon_droq *droq;
0930 int numa_node = dev_to_node(&oct->pci_dev->dev);
0931
0932 if (oct->droq[q_no]) {
0933 dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
0934 q_no);
0935 return 1;
0936 }
0937
0938
0939 droq = vmalloc_node(sizeof(*droq), numa_node);
0940 if (!droq)
0941 droq = vmalloc(sizeof(*droq));
0942 if (!droq)
0943 return -1;
0944
0945 memset(droq, 0, sizeof(struct octeon_droq));
0946
0947
0948 octeon_set_droq_pkt_op(oct, q_no, 0);
0949 oct->droq[q_no] = droq;
0950
0951
0952 if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) {
0953 vfree(oct->droq[q_no]);
0954 oct->droq[q_no] = NULL;
0955 return -1;
0956 }
0957
0958 oct->num_oqs++;
0959
0960 dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__,
0961 oct->num_oqs);
0962
0963
0964
0965
0966
0967
0968 return 0;
0969 }