0001
0002
0003
0004
0005
0006
0007 #include <linux/types.h>
0008 #include <linux/fsl/mc.h>
0009 #include <soc/fsl/dpaa2-io.h>
0010 #include <linux/init.h>
0011 #include <linux/module.h>
0012 #include <linux/platform_device.h>
0013 #include <linux/interrupt.h>
0014 #include <linux/dma-mapping.h>
0015 #include <linux/dim.h>
0016 #include <linux/slab.h>
0017
0018 #include "dpio.h"
0019 #include "qbman-portal.h"
0020
0021 struct dpaa2_io {
0022 struct dpaa2_io_desc dpio_desc;
0023 struct qbman_swp_desc swp_desc;
0024 struct qbman_swp *swp;
0025 struct list_head node;
0026
0027 spinlock_t lock_mgmt_cmd;
0028
0029 spinlock_t lock_notifications;
0030 struct list_head notifications;
0031 struct device *dev;
0032
0033
0034 struct dim rx_dim;
0035
0036 spinlock_t dim_lock;
0037 u16 event_ctr;
0038 u64 bytes;
0039 u64 frames;
0040 };
0041
0042 struct dpaa2_io_store {
0043 unsigned int max;
0044 dma_addr_t paddr;
0045 struct dpaa2_dq *vaddr;
0046 void *alloced_addr;
0047 unsigned int idx;
0048 struct qbman_swp *swp;
0049 struct device *dev;
0050 };
0051
0052
0053 static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
0054 static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
0055 static DEFINE_SPINLOCK(dpio_list_lock);
0056
0057 static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
0058 int cpu)
0059 {
0060 if (d)
0061 return d;
0062
0063 if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
0064 return NULL;
0065
0066
0067
0068
0069
0070 if (cpu < 0)
0071 cpu = raw_smp_processor_id();
0072
0073
0074 return dpio_by_cpu[cpu];
0075 }
0076
0077 static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
0078 {
0079 if (d)
0080 return d;
0081
0082 d = service_select_by_cpu(d, -1);
0083 if (d)
0084 return d;
0085
0086 spin_lock(&dpio_list_lock);
0087 d = list_entry(dpio_list.next, struct dpaa2_io, node);
0088 list_del(&d->node);
0089 list_add_tail(&d->node, &dpio_list);
0090 spin_unlock(&dpio_list_lock);
0091
0092 return d;
0093 }
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103 struct dpaa2_io *dpaa2_io_service_select(int cpu)
0104 {
0105 if (cpu == DPAA2_IO_ANY_CPU)
0106 return service_select(NULL);
0107
0108 return service_select_by_cpu(NULL, cpu);
0109 }
0110 EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
0111
0112 static void dpaa2_io_dim_work(struct work_struct *w)
0113 {
0114 struct dim *dim = container_of(w, struct dim, work);
0115 struct dim_cq_moder moder =
0116 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
0117 struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim);
0118
0119 dpaa2_io_set_irq_coalescing(d, moder.usec);
0120 dim->state = DIM_START_MEASURE;
0121 }
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132
0133 struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
0134 struct device *dev)
0135 {
0136 struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
0137 u32 qman_256_cycles_per_ns;
0138
0139 if (!obj)
0140 return NULL;
0141
0142
0143 if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
0144 kfree(obj);
0145 return NULL;
0146 }
0147
0148 obj->dpio_desc = *desc;
0149 obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
0150 obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
0151 obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk;
0152 obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
0153
0154
0155
0156
0157
0158 qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000);
0159 obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns;
0160 obj->swp = qbman_swp_init(&obj->swp_desc);
0161
0162 if (!obj->swp) {
0163 kfree(obj);
0164 return NULL;
0165 }
0166
0167 INIT_LIST_HEAD(&obj->node);
0168 spin_lock_init(&obj->lock_mgmt_cmd);
0169 spin_lock_init(&obj->lock_notifications);
0170 spin_lock_init(&obj->dim_lock);
0171 INIT_LIST_HEAD(&obj->notifications);
0172
0173
0174 qbman_swp_interrupt_set_trigger(obj->swp,
0175 QBMAN_SWP_INTERRUPT_DQRI);
0176 qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
0177 if (obj->dpio_desc.receives_notifications)
0178 qbman_swp_push_set(obj->swp, 0, 1);
0179
0180 spin_lock(&dpio_list_lock);
0181 list_add_tail(&obj->node, &dpio_list);
0182 if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
0183 dpio_by_cpu[desc->cpu] = obj;
0184 spin_unlock(&dpio_list_lock);
0185
0186 obj->dev = dev;
0187
0188 memset(&obj->rx_dim, 0, sizeof(obj->rx_dim));
0189 INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work);
0190 obj->event_ctr = 0;
0191 obj->bytes = 0;
0192 obj->frames = 0;
0193
0194 return obj;
0195 }
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205
0206 void dpaa2_io_down(struct dpaa2_io *d)
0207 {
0208 spin_lock(&dpio_list_lock);
0209 dpio_by_cpu[d->dpio_desc.cpu] = NULL;
0210 list_del(&d->node);
0211 spin_unlock(&dpio_list_lock);
0212
0213 kfree(d);
0214 }
0215
0216 #define DPAA_POLL_MAX 32
0217
0218
0219
0220
0221
0222
0223
0224
0225
0226 irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
0227 {
0228 const struct dpaa2_dq *dq;
0229 int max = 0;
0230 struct qbman_swp *swp;
0231 u32 status;
0232
0233 obj->event_ctr++;
0234
0235 swp = obj->swp;
0236 status = qbman_swp_interrupt_read_status(swp);
0237 if (!status)
0238 return IRQ_NONE;
0239
0240 dq = qbman_swp_dqrr_next(swp);
0241 while (dq) {
0242 if (qbman_result_is_SCN(dq)) {
0243 struct dpaa2_io_notification_ctx *ctx;
0244 u64 q64;
0245
0246 q64 = qbman_result_SCN_ctx(dq);
0247 ctx = (void *)(uintptr_t)q64;
0248 ctx->cb(ctx);
0249 } else {
0250 pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
0251 }
0252 qbman_swp_dqrr_consume(swp, dq);
0253 ++max;
0254 if (max > DPAA_POLL_MAX)
0255 goto done;
0256 dq = qbman_swp_dqrr_next(swp);
0257 }
0258 done:
0259 qbman_swp_interrupt_clear_status(swp, status);
0260 qbman_swp_interrupt_set_inhibit(swp, 0);
0261 return IRQ_HANDLED;
0262 }
0263
0264
0265
0266
0267
0268
0269
0270
0271 int dpaa2_io_get_cpu(struct dpaa2_io *d)
0272 {
0273 return d->dpio_desc.cpu;
0274 }
0275 EXPORT_SYMBOL(dpaa2_io_get_cpu);
0276
0277
0278
0279
0280
0281
0282
0283
0284
0285
0286
0287
0288
0289
0290
0291
0292
0293
0294
0295
0296 int dpaa2_io_service_register(struct dpaa2_io *d,
0297 struct dpaa2_io_notification_ctx *ctx,
0298 struct device *dev)
0299 {
0300 struct device_link *link;
0301 unsigned long irqflags;
0302
0303 d = service_select_by_cpu(d, ctx->desired_cpu);
0304 if (!d)
0305 return -ENODEV;
0306
0307 link = device_link_add(dev, d->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
0308 if (!link)
0309 return -EINVAL;
0310
0311 ctx->dpio_id = d->dpio_desc.dpio_id;
0312 ctx->qman64 = (u64)(uintptr_t)ctx;
0313 ctx->dpio_private = d;
0314 spin_lock_irqsave(&d->lock_notifications, irqflags);
0315 list_add(&ctx->node, &d->notifications);
0316 spin_unlock_irqrestore(&d->lock_notifications, irqflags);
0317
0318
0319 if (ctx->is_cdan)
0320 return qbman_swp_CDAN_set_context_enable(d->swp,
0321 (u16)ctx->id,
0322 ctx->qman64);
0323 return 0;
0324 }
0325 EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
0326
0327
0328
0329
0330
0331
0332
0333
0334
0335
0336 void dpaa2_io_service_deregister(struct dpaa2_io *service,
0337 struct dpaa2_io_notification_ctx *ctx,
0338 struct device *dev)
0339 {
0340 struct dpaa2_io *d = ctx->dpio_private;
0341 unsigned long irqflags;
0342
0343 if (ctx->is_cdan)
0344 qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
0345
0346 spin_lock_irqsave(&d->lock_notifications, irqflags);
0347 list_del(&ctx->node);
0348 spin_unlock_irqrestore(&d->lock_notifications, irqflags);
0349
0350 }
0351 EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366 int dpaa2_io_service_rearm(struct dpaa2_io *d,
0367 struct dpaa2_io_notification_ctx *ctx)
0368 {
0369 unsigned long irqflags;
0370 int err;
0371
0372 d = service_select_by_cpu(d, ctx->desired_cpu);
0373 if (!unlikely(d))
0374 return -ENODEV;
0375
0376 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
0377 if (ctx->is_cdan)
0378 err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
0379 else
0380 err = qbman_swp_fq_schedule(d->swp, ctx->id);
0381 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
0382
0383 return err;
0384 }
0385 EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
0386
0387
0388
0389
0390
0391
0392
0393
0394
0395 int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
0396 struct dpaa2_io_store *s)
0397 {
0398 struct qbman_pull_desc pd;
0399 int err;
0400
0401 qbman_pull_desc_clear(&pd);
0402 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
0403 qbman_pull_desc_set_numframes(&pd, (u8)s->max);
0404 qbman_pull_desc_set_fq(&pd, fqid);
0405
0406 d = service_select(d);
0407 if (!d)
0408 return -ENODEV;
0409 s->swp = d->swp;
0410 err = qbman_swp_pull(d->swp, &pd);
0411 if (err)
0412 s->swp = NULL;
0413
0414 return err;
0415 }
0416 EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
0417
0418
0419
0420
0421
0422
0423
0424
0425
0426 int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
0427 struct dpaa2_io_store *s)
0428 {
0429 struct qbman_pull_desc pd;
0430 int err;
0431
0432 qbman_pull_desc_clear(&pd);
0433 qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
0434 qbman_pull_desc_set_numframes(&pd, (u8)s->max);
0435 qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
0436
0437 d = service_select(d);
0438 if (!d)
0439 return -ENODEV;
0440
0441 s->swp = d->swp;
0442 err = qbman_swp_pull(d->swp, &pd);
0443 if (err)
0444 s->swp = NULL;
0445
0446 return err;
0447 }
0448 EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
0449
0450
0451
0452
0453
0454
0455
0456
0457
0458
0459 int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
0460 u32 fqid,
0461 const struct dpaa2_fd *fd)
0462 {
0463 struct qbman_eq_desc ed;
0464
0465 d = service_select(d);
0466 if (!d)
0467 return -ENODEV;
0468
0469 qbman_eq_desc_clear(&ed);
0470 qbman_eq_desc_set_no_orp(&ed, 0);
0471 qbman_eq_desc_set_fq(&ed, fqid);
0472
0473 return qbman_swp_enqueue(d->swp, &ed, fd);
0474 }
0475 EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
0476
0477
0478
0479
0480
0481
0482
0483
0484
0485
0486
0487
0488 int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
0489 u32 fqid,
0490 const struct dpaa2_fd *fd,
0491 int nb)
0492 {
0493 struct qbman_eq_desc ed;
0494
0495 d = service_select(d);
0496 if (!d)
0497 return -ENODEV;
0498
0499 qbman_eq_desc_clear(&ed);
0500 qbman_eq_desc_set_no_orp(&ed, 0);
0501 qbman_eq_desc_set_fq(&ed, fqid);
0502
0503 return qbman_swp_enqueue_multiple(d->swp, &ed, fd, NULL, nb);
0504 }
0505 EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
0506
0507
0508
0509
0510
0511
0512
0513
0514
0515
0516
0517
0518 int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
0519 u32 *fqid,
0520 const struct dpaa2_fd *fd,
0521 int nb)
0522 {
0523 struct qbman_eq_desc *ed;
0524 int i, ret;
0525
0526 ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
0527 if (!ed)
0528 return -ENOMEM;
0529
0530 d = service_select(d);
0531 if (!d) {
0532 ret = -ENODEV;
0533 goto out;
0534 }
0535
0536 for (i = 0; i < nb; i++) {
0537 qbman_eq_desc_clear(&ed[i]);
0538 qbman_eq_desc_set_no_orp(&ed[i], 0);
0539 qbman_eq_desc_set_fq(&ed[i], fqid[i]);
0540 }
0541
0542 ret = qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
0543 out:
0544 kfree(ed);
0545 return ret;
0546 }
0547 EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
0548
0549
0550
0551
0552
0553
0554
0555
0556
0557
0558
0559
0560 int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
0561 u32 qdid, u8 prio, u16 qdbin,
0562 const struct dpaa2_fd *fd)
0563 {
0564 struct qbman_eq_desc ed;
0565
0566 d = service_select(d);
0567 if (!d)
0568 return -ENODEV;
0569
0570 qbman_eq_desc_clear(&ed);
0571 qbman_eq_desc_set_no_orp(&ed, 0);
0572 qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
0573
0574 return qbman_swp_enqueue(d->swp, &ed, fd);
0575 }
0576 EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
0577
0578
0579
0580
0581
0582
0583
0584
0585
0586
0587 int dpaa2_io_service_release(struct dpaa2_io *d,
0588 u16 bpid,
0589 const u64 *buffers,
0590 unsigned int num_buffers)
0591 {
0592 struct qbman_release_desc rd;
0593
0594 d = service_select(d);
0595 if (!d)
0596 return -ENODEV;
0597
0598 qbman_release_desc_clear(&rd);
0599 qbman_release_desc_set_bpid(&rd, bpid);
0600
0601 return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
0602 }
0603 EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613
0614
0615
0616 int dpaa2_io_service_acquire(struct dpaa2_io *d,
0617 u16 bpid,
0618 u64 *buffers,
0619 unsigned int num_buffers)
0620 {
0621 unsigned long irqflags;
0622 int err;
0623
0624 d = service_select(d);
0625 if (!d)
0626 return -ENODEV;
0627
0628 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
0629 err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
0630 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
0631
0632 return err;
0633 }
0634 EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
0635
0636
0637
0638
0639
0640
0641
0642
0643
0644
0645
0646
0647
0648
0649
0650
0651
0652 struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
0653 struct device *dev)
0654 {
0655 struct dpaa2_io_store *ret;
0656 size_t size;
0657
0658 if (!max_frames || (max_frames > 32))
0659 return NULL;
0660
0661 ret = kmalloc(sizeof(*ret), GFP_KERNEL);
0662 if (!ret)
0663 return NULL;
0664
0665 ret->max = max_frames;
0666 size = max_frames * sizeof(struct dpaa2_dq) + 64;
0667 ret->alloced_addr = kzalloc(size, GFP_KERNEL);
0668 if (!ret->alloced_addr) {
0669 kfree(ret);
0670 return NULL;
0671 }
0672
0673 ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
0674 ret->paddr = dma_map_single(dev, ret->vaddr,
0675 sizeof(struct dpaa2_dq) * max_frames,
0676 DMA_FROM_DEVICE);
0677 if (dma_mapping_error(dev, ret->paddr)) {
0678 kfree(ret->alloced_addr);
0679 kfree(ret);
0680 return NULL;
0681 }
0682
0683 ret->idx = 0;
0684 ret->dev = dev;
0685
0686 return ret;
0687 }
0688 EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
0689
0690
0691
0692
0693
0694
0695 void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
0696 {
0697 dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
0698 DMA_FROM_DEVICE);
0699 kfree(s->alloced_addr);
0700 kfree(s);
0701 }
0702 EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721 struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
0722 {
0723 int match;
0724 struct dpaa2_dq *ret = &s->vaddr[s->idx];
0725
0726 match = qbman_result_has_new_result(s->swp, ret);
0727 if (!match) {
0728 *is_last = 0;
0729 return NULL;
0730 }
0731
0732 s->idx++;
0733
0734 if (dpaa2_dq_is_pull_complete(ret)) {
0735 *is_last = 1;
0736 s->idx = 0;
0737
0738
0739
0740
0741
0742 if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
0743 ret = NULL;
0744 } else {
0745 prefetch(&s->vaddr[s->idx]);
0746 *is_last = 0;
0747 }
0748
0749 return ret;
0750 }
0751 EXPORT_SYMBOL_GPL(dpaa2_io_store_next);
0752
0753
0754
0755
0756
0757
0758
0759
0760
0761
0762
0763
0764
0765 int dpaa2_io_query_fq_count(struct dpaa2_io *d, u32 fqid,
0766 u32 *fcnt, u32 *bcnt)
0767 {
0768 struct qbman_fq_query_np_rslt state;
0769 struct qbman_swp *swp;
0770 unsigned long irqflags;
0771 int ret;
0772
0773 d = service_select(d);
0774 if (!d)
0775 return -ENODEV;
0776
0777 swp = d->swp;
0778 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
0779 ret = qbman_fq_query_state(swp, fqid, &state);
0780 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
0781 if (ret)
0782 return ret;
0783 *fcnt = qbman_fq_state_frame_count(&state);
0784 *bcnt = qbman_fq_state_byte_count(&state);
0785
0786 return 0;
0787 }
0788 EXPORT_SYMBOL_GPL(dpaa2_io_query_fq_count);
0789
0790
0791
0792
0793
0794
0795
0796
0797
0798
0799 int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
0800 {
0801 struct qbman_bp_query_rslt state;
0802 struct qbman_swp *swp;
0803 unsigned long irqflags;
0804 int ret;
0805
0806 d = service_select(d);
0807 if (!d)
0808 return -ENODEV;
0809
0810 swp = d->swp;
0811 spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
0812 ret = qbman_bp_query(swp, bpid, &state);
0813 spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
0814 if (ret)
0815 return ret;
0816 *num = qbman_bp_info_num_free_bufs(&state);
0817 return 0;
0818 }
0819 EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
0820
0821
0822
0823
0824
0825
0826
0827
0828 int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff)
0829 {
0830 struct qbman_swp *swp = d->swp;
0831
0832 return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1,
0833 irq_holdoff);
0834 }
0835 EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing);
0836
0837
0838
0839
0840
0841
0842 void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff)
0843 {
0844 struct qbman_swp *swp = d->swp;
0845
0846 qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff);
0847 }
0848 EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing);
0849
0850
0851
0852
0853
0854
0855 void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
0856 int use_adaptive_rx_coalesce)
0857 {
0858 d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce;
0859 }
0860 EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing);
0861
0862
0863
0864
0865
0866
0867
0868
0869 int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d)
0870 {
0871 return d->swp->use_adaptive_rx_coalesce;
0872 }
0873 EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing);
0874
0875
0876
0877
0878
0879
0880
0881 void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes)
0882 {
0883 struct dim_sample dim_sample = {};
0884
0885 if (!d->swp->use_adaptive_rx_coalesce)
0886 return;
0887
0888 spin_lock(&d->dim_lock);
0889
0890 d->bytes += bytes;
0891 d->frames += frames;
0892
0893 dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample);
0894 net_dim(&d->rx_dim, dim_sample);
0895
0896 spin_unlock(&d->dim_lock);
0897 }
0898 EXPORT_SYMBOL(dpaa2_io_update_net_dim);