0001
0002
0003
0004 #include <linux/rtnetlink.h>
0005 #include <net/pkt_cls.h>
0006 #include <net/pkt_sched.h>
0007 #include <net/red.h>
0008
0009 #include "../nfpcore/nfp_cpp.h"
0010 #include "../nfp_app.h"
0011 #include "../nfp_main.h"
0012 #include "../nfp_net.h"
0013 #include "../nfp_port.h"
0014 #include "main.h"
0015
0016 static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
0017 {
0018 return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED;
0019 }
0020
0021 static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
0022 {
0023 return qdisc->children[id] &&
0024 qdisc->children[id] != NFP_QDISC_UNTRACKED;
0025 }
0026
0027 static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot)
0028 {
0029 return rtnl_dereference(*slot);
0030 }
0031
0032 static void
0033 nfp_abm_stats_propagate(struct nfp_alink_stats *parent,
0034 struct nfp_alink_stats *child)
0035 {
0036 parent->tx_pkts += child->tx_pkts;
0037 parent->tx_bytes += child->tx_bytes;
0038 parent->backlog_pkts += child->backlog_pkts;
0039 parent->backlog_bytes += child->backlog_bytes;
0040 parent->overlimits += child->overlimits;
0041 parent->drops += child->drops;
0042 }
0043
0044 static void
0045 nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
0046 unsigned int queue)
0047 {
0048 struct nfp_cpp *cpp = alink->abm->app->cpp;
0049 unsigned int i;
0050 int err;
0051
0052 if (!qdisc->offloaded)
0053 return;
0054
0055 for (i = 0; i < qdisc->red.num_bands; i++) {
0056 err = nfp_abm_ctrl_read_q_stats(alink, i, queue,
0057 &qdisc->red.band[i].stats);
0058 if (err)
0059 nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n",
0060 i, queue, err);
0061
0062 err = nfp_abm_ctrl_read_q_xstats(alink, i, queue,
0063 &qdisc->red.band[i].xstats);
0064 if (err)
0065 nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n",
0066 i, queue, err);
0067 }
0068 }
0069
0070 static void
0071 nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
0072 {
0073 unsigned int i;
0074
0075 if (qdisc->type != NFP_QDISC_MQ)
0076 return;
0077
0078 for (i = 0; i < alink->total_queues; i++)
0079 if (nfp_abm_qdisc_child_valid(qdisc, i))
0080 nfp_abm_stats_update_red(alink, qdisc->children[i], i);
0081 }
0082
0083 static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now)
0084 {
0085 alink->last_stats_update = time_now;
0086 if (alink->root_qdisc)
0087 nfp_abm_stats_update_mq(alink, alink->root_qdisc);
0088 }
0089
0090 static void nfp_abm_stats_update(struct nfp_abm_link *alink)
0091 {
0092 u64 now;
0093
0094
0095
0096
0097
0098 now = ktime_get();
0099 if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL)
0100 return;
0101
0102 __nfp_abm_stats_update(alink, now);
0103 }
0104
0105 static void
0106 nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
0107 unsigned int start, unsigned int end)
0108 {
0109 unsigned int i;
0110
0111 for (i = start; i < end; i++)
0112 if (nfp_abm_qdisc_child_valid(qdisc, i)) {
0113 qdisc->children[i]->use_cnt--;
0114 qdisc->children[i] = NULL;
0115 }
0116 }
0117
0118 static void
0119 nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
0120 {
0121 unsigned int i;
0122
0123
0124 if (qdisc->use_cnt)
0125 nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
0126 qdisc->handle);
0127
0128 if (!nfp_abm_qdisc_is_red(qdisc))
0129 return;
0130
0131 for (i = 0; i < qdisc->red.num_bands; i++) {
0132 qdisc->red.band[i].stats.backlog_pkts = 0;
0133 qdisc->red.band[i].stats.backlog_bytes = 0;
0134 }
0135 }
0136
0137 static int
0138 __nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band,
0139 unsigned int queue, struct nfp_alink_stats *prev_stats,
0140 struct nfp_alink_xstats *prev_xstats)
0141 {
0142 u64 backlog_pkts, backlog_bytes;
0143 int err;
0144
0145
0146
0147
0148 backlog_pkts = prev_stats->backlog_pkts;
0149 backlog_bytes = prev_stats->backlog_bytes;
0150
0151 err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats);
0152 if (err) {
0153 nfp_err(alink->abm->app->cpp,
0154 "RED stats init (%d, %d) failed with error %d\n",
0155 band, queue, err);
0156 return err;
0157 }
0158
0159 err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats);
0160 if (err) {
0161 nfp_err(alink->abm->app->cpp,
0162 "RED xstats init (%d, %d) failed with error %d\n",
0163 band, queue, err);
0164 return err;
0165 }
0166
0167 prev_stats->backlog_pkts = backlog_pkts;
0168 prev_stats->backlog_bytes = backlog_bytes;
0169 return 0;
0170 }
0171
0172 static int
0173 nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
0174 unsigned int queue)
0175 {
0176 unsigned int i;
0177 int err;
0178
0179 for (i = 0; i < qdisc->red.num_bands; i++) {
0180 err = __nfp_abm_stats_init(alink, i, queue,
0181 &qdisc->red.band[i].prev_stats,
0182 &qdisc->red.band[i].prev_xstats);
0183 if (err)
0184 return err;
0185 }
0186
0187 return 0;
0188 }
0189
0190 static void
0191 nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
0192 unsigned int queue)
0193 {
0194 bool good_red, good_gred;
0195 unsigned int i;
0196
0197 good_red = qdisc->type == NFP_QDISC_RED &&
0198 qdisc->params_ok &&
0199 qdisc->use_cnt == 1 &&
0200 !alink->has_prio &&
0201 !qdisc->children[0];
0202 good_gred = qdisc->type == NFP_QDISC_GRED &&
0203 qdisc->params_ok &&
0204 qdisc->use_cnt == 1;
0205 qdisc->offload_mark = good_red || good_gred;
0206
0207
0208 if (qdisc->offload_mark && !qdisc->offloaded)
0209 if (nfp_abm_stats_init(alink, qdisc, queue))
0210 qdisc->offload_mark = false;
0211
0212 if (!qdisc->offload_mark)
0213 return;
0214
0215 for (i = 0; i < alink->abm->num_bands; i++) {
0216 enum nfp_abm_q_action act;
0217
0218 nfp_abm_ctrl_set_q_lvl(alink, i, queue,
0219 qdisc->red.band[i].threshold);
0220 act = qdisc->red.band[i].ecn ?
0221 NFP_ABM_ACT_MARK_DROP : NFP_ABM_ACT_DROP;
0222 nfp_abm_ctrl_set_q_act(alink, i, queue, act);
0223 }
0224 }
0225
0226 static void
0227 nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
0228 {
0229 unsigned int i;
0230
0231 qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ;
0232 if (!qdisc->offload_mark)
0233 return;
0234
0235 for (i = 0; i < alink->total_queues; i++) {
0236 struct nfp_qdisc *child = qdisc->children[i];
0237
0238 if (!nfp_abm_qdisc_child_valid(qdisc, i))
0239 continue;
0240
0241 nfp_abm_offload_compile_red(alink, child, i);
0242 }
0243 }
0244
0245 void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
0246 {
0247 struct nfp_abm *abm = alink->abm;
0248 struct radix_tree_iter iter;
0249 struct nfp_qdisc *qdisc;
0250 void __rcu **slot;
0251 size_t i;
0252
0253
0254 for (i = 0; i < abm->num_bands; i++)
0255 __bitmap_set(abm->threshold_undef,
0256 i * NFP_NET_MAX_RX_RINGS + alink->queue_base,
0257 alink->total_queues);
0258
0259
0260 radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
0261 qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
0262 qdisc->offload_mark = false;
0263 }
0264
0265 if (alink->root_qdisc)
0266 nfp_abm_offload_compile_mq(alink, alink->root_qdisc);
0267
0268
0269 radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
0270 qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
0271 if (!qdisc->offload_mark && qdisc->offloaded)
0272 nfp_abm_qdisc_offload_stop(alink, qdisc);
0273 qdisc->offloaded = qdisc->offload_mark;
0274 }
0275
0276
0277 for (i = 0; i < abm->num_thresholds; i++)
0278 if (test_bit(i, abm->threshold_undef))
0279 __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
0280
0281 __nfp_abm_stats_update(alink, ktime_get());
0282 }
0283
0284 static void
0285 nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink,
0286 struct nfp_qdisc *qdisc)
0287 {
0288 struct radix_tree_iter iter;
0289 unsigned int mq_refs = 0;
0290 void __rcu **slot;
0291
0292 if (!qdisc->use_cnt)
0293 return;
0294
0295
0296
0297 if (qdisc->type == NFP_QDISC_MQ &&
0298 qdisc == alink->root_qdisc &&
0299 netdev->reg_state == NETREG_UNREGISTERING)
0300 return;
0301
0302
0303 radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
0304 struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
0305 unsigned int i;
0306
0307 if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
0308 continue;
0309 for (i = 0; i < mq->num_children; i++)
0310 if (mq->children[i] == qdisc) {
0311 mq->children[i] = NULL;
0312 mq_refs++;
0313 }
0314 }
0315
0316 WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n",
0317 qdisc->use_cnt, mq_refs);
0318 }
0319
0320 static void
0321 nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink,
0322 struct nfp_qdisc *qdisc)
0323 {
0324 struct nfp_port *port = nfp_port_from_netdev(netdev);
0325
0326 if (!qdisc)
0327 return;
0328 nfp_abm_qdisc_clear_mq(netdev, alink, qdisc);
0329 WARN_ON(radix_tree_delete(&alink->qdiscs,
0330 TC_H_MAJ(qdisc->handle)) != qdisc);
0331
0332 kfree(qdisc->children);
0333 kfree(qdisc);
0334
0335 port->tc_offload_cnt--;
0336 }
0337
0338 static struct nfp_qdisc *
0339 nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
0340 enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
0341 unsigned int children)
0342 {
0343 struct nfp_port *port = nfp_port_from_netdev(netdev);
0344 struct nfp_qdisc *qdisc;
0345 int err;
0346
0347 qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL);
0348 if (!qdisc)
0349 return NULL;
0350
0351 if (children) {
0352 qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
0353 if (!qdisc->children)
0354 goto err_free_qdisc;
0355 }
0356
0357 qdisc->netdev = netdev;
0358 qdisc->type = type;
0359 qdisc->parent_handle = parent_handle;
0360 qdisc->handle = handle;
0361 qdisc->num_children = children;
0362
0363 err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc);
0364 if (err) {
0365 nfp_err(alink->abm->app->cpp,
0366 "Qdisc insertion into radix tree failed: %d\n", err);
0367 goto err_free_child_tbl;
0368 }
0369
0370 port->tc_offload_cnt++;
0371 return qdisc;
0372
0373 err_free_child_tbl:
0374 kfree(qdisc->children);
0375 err_free_qdisc:
0376 kfree(qdisc);
0377 return NULL;
0378 }
0379
0380 static struct nfp_qdisc *
0381 nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle)
0382 {
0383 return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle));
0384 }
0385
0386 static int
0387 nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink,
0388 enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
0389 unsigned int children, struct nfp_qdisc **qdisc)
0390 {
0391 *qdisc = nfp_abm_qdisc_find(alink, handle);
0392 if (*qdisc) {
0393 if (WARN_ON((*qdisc)->type != type))
0394 return -EINVAL;
0395 return 1;
0396 }
0397
0398 *qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle,
0399 children);
0400 return *qdisc ? 0 : -ENOMEM;
0401 }
0402
0403 static void
0404 nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
0405 u32 handle)
0406 {
0407 struct nfp_qdisc *qdisc;
0408
0409 qdisc = nfp_abm_qdisc_find(alink, handle);
0410 if (!qdisc)
0411 return;
0412
0413
0414 if (alink->root_qdisc == qdisc)
0415 qdisc->use_cnt--;
0416
0417 nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children);
0418 nfp_abm_qdisc_free(netdev, alink, qdisc);
0419
0420 if (alink->root_qdisc == qdisc) {
0421 alink->root_qdisc = NULL;
0422
0423
0424
0425 nfp_abm_qdisc_offload_update(alink);
0426 }
0427 }
0428
0429 static int
0430 nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
0431 unsigned int id)
0432 {
0433 struct nfp_qdisc *parent, *child;
0434
0435 parent = nfp_abm_qdisc_find(alink, handle);
0436 if (!parent)
0437 return 0;
0438
0439 if (WARN(id >= parent->num_children,
0440 "graft child out of bound %d >= %d\n",
0441 id, parent->num_children))
0442 return -EINVAL;
0443
0444 nfp_abm_qdisc_unlink_children(parent, id, id + 1);
0445
0446 child = nfp_abm_qdisc_find(alink, child_handle);
0447 if (child)
0448 child->use_cnt++;
0449 else
0450 child = NFP_QDISC_UNTRACKED;
0451 parent->children[id] = child;
0452
0453 nfp_abm_qdisc_offload_update(alink);
0454
0455 return 0;
0456 }
0457
0458 static void
0459 nfp_abm_stats_calculate(struct nfp_alink_stats *new,
0460 struct nfp_alink_stats *old,
0461 struct gnet_stats_basic_sync *bstats,
0462 struct gnet_stats_queue *qstats)
0463 {
0464 _bstats_update(bstats, new->tx_bytes - old->tx_bytes,
0465 new->tx_pkts - old->tx_pkts);
0466 qstats->qlen += new->backlog_pkts - old->backlog_pkts;
0467 qstats->backlog += new->backlog_bytes - old->backlog_bytes;
0468 qstats->overlimits += new->overlimits - old->overlimits;
0469 qstats->drops += new->drops - old->drops;
0470 }
0471
0472 static void
0473 nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
0474 struct nfp_alink_xstats *old,
0475 struct red_stats *stats)
0476 {
0477 stats->forced_mark += new->ecn_marked - old->ecn_marked;
0478 stats->pdrop += new->pdrop - old->pdrop;
0479 }
0480
0481 static int
0482 nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle,
0483 struct tc_gred_qopt_offload_stats *stats)
0484 {
0485 struct nfp_qdisc *qdisc;
0486 unsigned int i;
0487
0488 nfp_abm_stats_update(alink);
0489
0490 qdisc = nfp_abm_qdisc_find(alink, handle);
0491 if (!qdisc)
0492 return -EOPNOTSUPP;
0493
0494
0495
0496
0497 for (i = 0; i < qdisc->red.num_bands; i++) {
0498 if (!stats->xstats[i])
0499 continue;
0500
0501 nfp_abm_stats_calculate(&qdisc->red.band[i].stats,
0502 &qdisc->red.band[i].prev_stats,
0503 &stats->bstats[i], &stats->qstats[i]);
0504 qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats;
0505
0506 nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats,
0507 &qdisc->red.band[i].prev_xstats,
0508 stats->xstats[i]);
0509 qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats;
0510 }
0511
0512 return qdisc->offloaded ? 0 : -EOPNOTSUPP;
0513 }
0514
0515 static bool
0516 nfp_abm_gred_check_params(struct nfp_abm_link *alink,
0517 struct tc_gred_qopt_offload *opt)
0518 {
0519 struct nfp_cpp *cpp = alink->abm->app->cpp;
0520 struct nfp_abm *abm = alink->abm;
0521 unsigned int i;
0522
0523 if (opt->set.grio_on || opt->set.wred_on) {
0524 nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n",
0525 opt->parent, opt->handle);
0526 return false;
0527 }
0528 if (opt->set.dp_def != alink->def_band) {
0529 nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n",
0530 alink->def_band, opt->parent, opt->handle);
0531 return false;
0532 }
0533 if (opt->set.dp_cnt != abm->num_bands) {
0534 nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n",
0535 abm->num_bands, opt->parent, opt->handle);
0536 return false;
0537 }
0538
0539 for (i = 0; i < abm->num_bands; i++) {
0540 struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i];
0541
0542 if (!band->present)
0543 return false;
0544 if (!band->is_ecn && !nfp_abm_has_drop(abm)) {
0545 nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n",
0546 opt->parent, opt->handle, i);
0547 return false;
0548 }
0549 if (band->is_ecn && !nfp_abm_has_mark(abm)) {
0550 nfp_warn(cpp, "GRED offload failed - ECN marking not supported (p:%08x h:%08x vq:%d)\n",
0551 opt->parent, opt->handle, i);
0552 return false;
0553 }
0554 if (band->is_harddrop) {
0555 nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n",
0556 opt->parent, opt->handle, i);
0557 return false;
0558 }
0559 if (band->min != band->max) {
0560 nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n",
0561 opt->parent, opt->handle, i);
0562 return false;
0563 }
0564 if (band->min > S32_MAX) {
0565 nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n",
0566 band->min, S32_MAX, opt->parent, opt->handle,
0567 i);
0568 return false;
0569 }
0570 }
0571
0572 return true;
0573 }
0574
0575 static int
0576 nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink,
0577 struct tc_gred_qopt_offload *opt)
0578 {
0579 struct nfp_qdisc *qdisc;
0580 unsigned int i;
0581 int ret;
0582
0583 ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent,
0584 opt->handle, 0, &qdisc);
0585 if (ret < 0)
0586 return ret;
0587
0588 qdisc->params_ok = nfp_abm_gred_check_params(alink, opt);
0589 if (qdisc->params_ok) {
0590 qdisc->red.num_bands = opt->set.dp_cnt;
0591 for (i = 0; i < qdisc->red.num_bands; i++) {
0592 qdisc->red.band[i].ecn = opt->set.tab[i].is_ecn;
0593 qdisc->red.band[i].threshold = opt->set.tab[i].min;
0594 }
0595 }
0596
0597 if (qdisc->use_cnt)
0598 nfp_abm_qdisc_offload_update(alink);
0599
0600 return 0;
0601 }
0602
0603 int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink,
0604 struct tc_gred_qopt_offload *opt)
0605 {
0606 switch (opt->command) {
0607 case TC_GRED_REPLACE:
0608 return nfp_abm_gred_replace(netdev, alink, opt);
0609 case TC_GRED_DESTROY:
0610 nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
0611 return 0;
0612 case TC_GRED_STATS:
0613 return nfp_abm_gred_stats(alink, opt->handle, &opt->stats);
0614 default:
0615 return -EOPNOTSUPP;
0616 }
0617 }
0618
0619 static int
0620 nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
0621 {
0622 struct nfp_qdisc *qdisc;
0623
0624 nfp_abm_stats_update(alink);
0625
0626 qdisc = nfp_abm_qdisc_find(alink, opt->handle);
0627 if (!qdisc || !qdisc->offloaded)
0628 return -EOPNOTSUPP;
0629
0630 nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats,
0631 &qdisc->red.band[0].prev_xstats,
0632 opt->xstats);
0633 qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats;
0634 return 0;
0635 }
0636
0637 static int
0638 nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
0639 struct tc_qopt_offload_stats *stats)
0640 {
0641 struct nfp_qdisc *qdisc;
0642
0643 nfp_abm_stats_update(alink);
0644
0645 qdisc = nfp_abm_qdisc_find(alink, handle);
0646 if (!qdisc)
0647 return -EOPNOTSUPP;
0648
0649
0650
0651
0652 nfp_abm_stats_calculate(&qdisc->red.band[0].stats,
0653 &qdisc->red.band[0].prev_stats,
0654 stats->bstats, stats->qstats);
0655 qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats;
0656
0657 return qdisc->offloaded ? 0 : -EOPNOTSUPP;
0658 }
0659
0660 static bool
0661 nfp_abm_red_check_params(struct nfp_abm_link *alink,
0662 struct tc_red_qopt_offload *opt)
0663 {
0664 struct nfp_cpp *cpp = alink->abm->app->cpp;
0665 struct nfp_abm *abm = alink->abm;
0666
0667 if (!opt->set.is_ecn && !nfp_abm_has_drop(abm)) {
0668 nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
0669 opt->parent, opt->handle);
0670 return false;
0671 }
0672 if (opt->set.is_ecn && !nfp_abm_has_mark(abm)) {
0673 nfp_warn(cpp, "RED offload failed - ECN marking not supported (p:%08x h:%08x)\n",
0674 opt->parent, opt->handle);
0675 return false;
0676 }
0677 if (opt->set.is_harddrop) {
0678 nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
0679 opt->parent, opt->handle);
0680 return false;
0681 }
0682 if (opt->set.min != opt->set.max) {
0683 nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
0684 opt->parent, opt->handle);
0685 return false;
0686 }
0687 if (opt->set.min > NFP_ABM_LVL_INFINITY) {
0688 nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
0689 opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
0690 opt->handle);
0691 return false;
0692 }
0693
0694 return true;
0695 }
0696
0697 static int
0698 nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
0699 struct tc_red_qopt_offload *opt)
0700 {
0701 struct nfp_qdisc *qdisc;
0702 int ret;
0703
0704 ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent,
0705 opt->handle, 1, &qdisc);
0706 if (ret < 0)
0707 return ret;
0708
0709
0710 if (opt->set.limit) {
0711 if (nfp_abm_qdisc_child_valid(qdisc, 0))
0712 qdisc->children[0]->use_cnt--;
0713 qdisc->children[0] = NULL;
0714 } else {
0715
0716
0717
0718 if (!ret)
0719 qdisc->children[0] = NFP_QDISC_UNTRACKED;
0720 }
0721
0722 qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
0723 if (qdisc->params_ok) {
0724 qdisc->red.num_bands = 1;
0725 qdisc->red.band[0].ecn = opt->set.is_ecn;
0726 qdisc->red.band[0].threshold = opt->set.min;
0727 }
0728
0729 if (qdisc->use_cnt == 1)
0730 nfp_abm_qdisc_offload_update(alink);
0731
0732 return 0;
0733 }
0734
0735 int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
0736 struct tc_red_qopt_offload *opt)
0737 {
0738 switch (opt->command) {
0739 case TC_RED_REPLACE:
0740 return nfp_abm_red_replace(netdev, alink, opt);
0741 case TC_RED_DESTROY:
0742 nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
0743 return 0;
0744 case TC_RED_STATS:
0745 return nfp_abm_red_stats(alink, opt->handle, &opt->stats);
0746 case TC_RED_XSTATS:
0747 return nfp_abm_red_xstats(alink, opt);
0748 case TC_RED_GRAFT:
0749 return nfp_abm_qdisc_graft(alink, opt->handle,
0750 opt->child_handle, 0);
0751 default:
0752 return -EOPNOTSUPP;
0753 }
0754 }
0755
0756 static int
0757 nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink,
0758 struct tc_mq_qopt_offload *opt)
0759 {
0760 struct nfp_qdisc *qdisc;
0761 int ret;
0762
0763 ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ,
0764 TC_H_ROOT, opt->handle, alink->total_queues,
0765 &qdisc);
0766 if (ret < 0)
0767 return ret;
0768
0769 qdisc->params_ok = true;
0770 qdisc->offloaded = true;
0771 nfp_abm_qdisc_offload_update(alink);
0772 return 0;
0773 }
0774
0775 static int
0776 nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
0777 struct tc_qopt_offload_stats *stats)
0778 {
0779 struct nfp_qdisc *qdisc, *red;
0780 unsigned int i, j;
0781
0782 qdisc = nfp_abm_qdisc_find(alink, handle);
0783 if (!qdisc)
0784 return -EOPNOTSUPP;
0785
0786 nfp_abm_stats_update(alink);
0787
0788
0789
0790
0791 memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
0792 memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
0793
0794 for (i = 0; i < qdisc->num_children; i++) {
0795 if (!nfp_abm_qdisc_child_valid(qdisc, i))
0796 continue;
0797
0798 if (!nfp_abm_qdisc_is_red(qdisc->children[i]))
0799 continue;
0800 red = qdisc->children[i];
0801
0802 for (j = 0; j < red->red.num_bands; j++) {
0803 nfp_abm_stats_propagate(&qdisc->mq.stats,
0804 &red->red.band[j].stats);
0805 nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
0806 &red->red.band[j].prev_stats);
0807 }
0808 }
0809
0810 nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
0811 stats->bstats, stats->qstats);
0812
0813 return qdisc->offloaded ? 0 : -EOPNOTSUPP;
0814 }
0815
0816 int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
0817 struct tc_mq_qopt_offload *opt)
0818 {
0819 switch (opt->command) {
0820 case TC_MQ_CREATE:
0821 return nfp_abm_mq_create(netdev, alink, opt);
0822 case TC_MQ_DESTROY:
0823 nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
0824 return 0;
0825 case TC_MQ_STATS:
0826 return nfp_abm_mq_stats(alink, opt->handle, &opt->stats);
0827 case TC_MQ_GRAFT:
0828 return nfp_abm_qdisc_graft(alink, opt->handle,
0829 opt->graft_params.child_handle,
0830 opt->graft_params.queue);
0831 default:
0832 return -EOPNOTSUPP;
0833 }
0834 }
0835
0836 int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
0837 struct tc_root_qopt_offload *opt)
0838 {
0839 if (opt->ingress)
0840 return -EOPNOTSUPP;
0841 if (alink->root_qdisc)
0842 alink->root_qdisc->use_cnt--;
0843 alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle);
0844 if (alink->root_qdisc)
0845 alink->root_qdisc->use_cnt++;
0846
0847 nfp_abm_qdisc_offload_update(alink);
0848
0849 return 0;
0850 }