0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/types.h>
0009 #include <linux/slab.h>
0010 #include <linux/kernel.h>
0011 #include <linux/string.h>
0012 #include <linux/errno.h>
0013 #include <linux/skbuff.h>
0014 #include <linux/module.h>
0015 #include <net/netlink.h>
0016 #include <net/pkt_sched.h>
0017 #include <net/sch_generic.h>
0018 #include <net/pkt_cls.h>
0019
0020 struct mqprio_sched {
0021 struct Qdisc **qdiscs;
0022 u16 mode;
0023 u16 shaper;
0024 int hw_offload;
0025 u32 flags;
0026 u64 min_rate[TC_QOPT_MAX_QUEUE];
0027 u64 max_rate[TC_QOPT_MAX_QUEUE];
0028 };
0029
0030 static void mqprio_destroy(struct Qdisc *sch)
0031 {
0032 struct net_device *dev = qdisc_dev(sch);
0033 struct mqprio_sched *priv = qdisc_priv(sch);
0034 unsigned int ntx;
0035
0036 if (priv->qdiscs) {
0037 for (ntx = 0;
0038 ntx < dev->num_tx_queues && priv->qdiscs[ntx];
0039 ntx++)
0040 qdisc_put(priv->qdiscs[ntx]);
0041 kfree(priv->qdiscs);
0042 }
0043
0044 if (priv->hw_offload && dev->netdev_ops->ndo_setup_tc) {
0045 struct tc_mqprio_qopt_offload mqprio = { { 0 } };
0046
0047 switch (priv->mode) {
0048 case TC_MQPRIO_MODE_DCB:
0049 case TC_MQPRIO_MODE_CHANNEL:
0050 dev->netdev_ops->ndo_setup_tc(dev,
0051 TC_SETUP_QDISC_MQPRIO,
0052 &mqprio);
0053 break;
0054 default:
0055 return;
0056 }
0057 } else {
0058 netdev_set_num_tc(dev, 0);
0059 }
0060 }
0061
0062 static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt)
0063 {
0064 int i, j;
0065
0066
0067 if (qopt->num_tc > TC_MAX_QUEUE)
0068 return -EINVAL;
0069
0070
0071 for (i = 0; i < TC_BITMASK + 1; i++) {
0072 if (qopt->prio_tc_map[i] >= qopt->num_tc)
0073 return -EINVAL;
0074 }
0075
0076
0077
0078
0079
0080 if (qopt->hw > TC_MQPRIO_HW_OFFLOAD_MAX)
0081 qopt->hw = TC_MQPRIO_HW_OFFLOAD_MAX;
0082
0083
0084
0085
0086
0087
0088 if (qopt->hw)
0089 return dev->netdev_ops->ndo_setup_tc ? 0 : -EINVAL;
0090
0091 for (i = 0; i < qopt->num_tc; i++) {
0092 unsigned int last = qopt->offset[i] + qopt->count[i];
0093
0094
0095
0096
0097 if (qopt->offset[i] >= dev->real_num_tx_queues ||
0098 !qopt->count[i] ||
0099 last > dev->real_num_tx_queues)
0100 return -EINVAL;
0101
0102
0103 for (j = i + 1; j < qopt->num_tc; j++) {
0104 if (last > qopt->offset[j])
0105 return -EINVAL;
0106 }
0107 }
0108
0109 return 0;
0110 }
0111
0112 static const struct nla_policy mqprio_policy[TCA_MQPRIO_MAX + 1] = {
0113 [TCA_MQPRIO_MODE] = { .len = sizeof(u16) },
0114 [TCA_MQPRIO_SHAPER] = { .len = sizeof(u16) },
0115 [TCA_MQPRIO_MIN_RATE64] = { .type = NLA_NESTED },
0116 [TCA_MQPRIO_MAX_RATE64] = { .type = NLA_NESTED },
0117 };
0118
0119 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
0120 const struct nla_policy *policy, int len)
0121 {
0122 int nested_len = nla_len(nla) - NLA_ALIGN(len);
0123
0124 if (nested_len >= nla_attr_size(0))
0125 return nla_parse_deprecated(tb, maxtype,
0126 nla_data(nla) + NLA_ALIGN(len),
0127 nested_len, policy, NULL);
0128
0129 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
0130 return 0;
0131 }
0132
0133 static int mqprio_init(struct Qdisc *sch, struct nlattr *opt,
0134 struct netlink_ext_ack *extack)
0135 {
0136 struct net_device *dev = qdisc_dev(sch);
0137 struct mqprio_sched *priv = qdisc_priv(sch);
0138 struct netdev_queue *dev_queue;
0139 struct Qdisc *qdisc;
0140 int i, err = -EOPNOTSUPP;
0141 struct tc_mqprio_qopt *qopt = NULL;
0142 struct nlattr *tb[TCA_MQPRIO_MAX + 1];
0143 struct nlattr *attr;
0144 int rem;
0145 int len;
0146
0147 BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE);
0148 BUILD_BUG_ON(TC_BITMASK != TC_QOPT_BITMASK);
0149
0150 if (sch->parent != TC_H_ROOT)
0151 return -EOPNOTSUPP;
0152
0153 if (!netif_is_multiqueue(dev))
0154 return -EOPNOTSUPP;
0155
0156
0157 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY)
0158 return -ENOMEM;
0159
0160 if (!opt || nla_len(opt) < sizeof(*qopt))
0161 return -EINVAL;
0162
0163 qopt = nla_data(opt);
0164 if (mqprio_parse_opt(dev, qopt))
0165 return -EINVAL;
0166
0167 len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt));
0168 if (len > 0) {
0169 err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy,
0170 sizeof(*qopt));
0171 if (err < 0)
0172 return err;
0173
0174 if (!qopt->hw)
0175 return -EINVAL;
0176
0177 if (tb[TCA_MQPRIO_MODE]) {
0178 priv->flags |= TC_MQPRIO_F_MODE;
0179 priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]);
0180 }
0181
0182 if (tb[TCA_MQPRIO_SHAPER]) {
0183 priv->flags |= TC_MQPRIO_F_SHAPER;
0184 priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]);
0185 }
0186
0187 if (tb[TCA_MQPRIO_MIN_RATE64]) {
0188 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
0189 return -EINVAL;
0190 i = 0;
0191 nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64],
0192 rem) {
0193 if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64)
0194 return -EINVAL;
0195 if (i >= qopt->num_tc)
0196 break;
0197 priv->min_rate[i] = *(u64 *)nla_data(attr);
0198 i++;
0199 }
0200 priv->flags |= TC_MQPRIO_F_MIN_RATE;
0201 }
0202
0203 if (tb[TCA_MQPRIO_MAX_RATE64]) {
0204 if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE)
0205 return -EINVAL;
0206 i = 0;
0207 nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64],
0208 rem) {
0209 if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64)
0210 return -EINVAL;
0211 if (i >= qopt->num_tc)
0212 break;
0213 priv->max_rate[i] = *(u64 *)nla_data(attr);
0214 i++;
0215 }
0216 priv->flags |= TC_MQPRIO_F_MAX_RATE;
0217 }
0218 }
0219
0220
0221 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
0222 GFP_KERNEL);
0223 if (!priv->qdiscs)
0224 return -ENOMEM;
0225
0226 for (i = 0; i < dev->num_tx_queues; i++) {
0227 dev_queue = netdev_get_tx_queue(dev, i);
0228 qdisc = qdisc_create_dflt(dev_queue,
0229 get_default_qdisc_ops(dev, i),
0230 TC_H_MAKE(TC_H_MAJ(sch->handle),
0231 TC_H_MIN(i + 1)), extack);
0232 if (!qdisc)
0233 return -ENOMEM;
0234
0235 priv->qdiscs[i] = qdisc;
0236 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
0237 }
0238
0239
0240
0241
0242
0243 if (qopt->hw) {
0244 struct tc_mqprio_qopt_offload mqprio = {.qopt = *qopt};
0245
0246 switch (priv->mode) {
0247 case TC_MQPRIO_MODE_DCB:
0248 if (priv->shaper != TC_MQPRIO_SHAPER_DCB)
0249 return -EINVAL;
0250 break;
0251 case TC_MQPRIO_MODE_CHANNEL:
0252 mqprio.flags = priv->flags;
0253 if (priv->flags & TC_MQPRIO_F_MODE)
0254 mqprio.mode = priv->mode;
0255 if (priv->flags & TC_MQPRIO_F_SHAPER)
0256 mqprio.shaper = priv->shaper;
0257 if (priv->flags & TC_MQPRIO_F_MIN_RATE)
0258 for (i = 0; i < mqprio.qopt.num_tc; i++)
0259 mqprio.min_rate[i] = priv->min_rate[i];
0260 if (priv->flags & TC_MQPRIO_F_MAX_RATE)
0261 for (i = 0; i < mqprio.qopt.num_tc; i++)
0262 mqprio.max_rate[i] = priv->max_rate[i];
0263 break;
0264 default:
0265 return -EINVAL;
0266 }
0267 err = dev->netdev_ops->ndo_setup_tc(dev,
0268 TC_SETUP_QDISC_MQPRIO,
0269 &mqprio);
0270 if (err)
0271 return err;
0272
0273 priv->hw_offload = mqprio.qopt.hw;
0274 } else {
0275 netdev_set_num_tc(dev, qopt->num_tc);
0276 for (i = 0; i < qopt->num_tc; i++)
0277 netdev_set_tc_queue(dev, i,
0278 qopt->count[i], qopt->offset[i]);
0279 }
0280
0281
0282 for (i = 0; i < TC_BITMASK + 1; i++)
0283 netdev_set_prio_tc_map(dev, i, qopt->prio_tc_map[i]);
0284
0285 sch->flags |= TCQ_F_MQROOT;
0286 return 0;
0287 }
0288
0289 static void mqprio_attach(struct Qdisc *sch)
0290 {
0291 struct net_device *dev = qdisc_dev(sch);
0292 struct mqprio_sched *priv = qdisc_priv(sch);
0293 struct Qdisc *qdisc, *old;
0294 unsigned int ntx;
0295
0296
0297 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
0298 qdisc = priv->qdiscs[ntx];
0299 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
0300 if (old)
0301 qdisc_put(old);
0302 if (ntx < dev->real_num_tx_queues)
0303 qdisc_hash_add(qdisc, false);
0304 }
0305 kfree(priv->qdiscs);
0306 priv->qdiscs = NULL;
0307 }
0308
0309 static struct netdev_queue *mqprio_queue_get(struct Qdisc *sch,
0310 unsigned long cl)
0311 {
0312 struct net_device *dev = qdisc_dev(sch);
0313 unsigned long ntx = cl - 1;
0314
0315 if (ntx >= dev->num_tx_queues)
0316 return NULL;
0317 return netdev_get_tx_queue(dev, ntx);
0318 }
0319
0320 static int mqprio_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
0321 struct Qdisc **old, struct netlink_ext_ack *extack)
0322 {
0323 struct net_device *dev = qdisc_dev(sch);
0324 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
0325
0326 if (!dev_queue)
0327 return -EINVAL;
0328
0329 if (dev->flags & IFF_UP)
0330 dev_deactivate(dev);
0331
0332 *old = dev_graft_qdisc(dev_queue, new);
0333
0334 if (new)
0335 new->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
0336
0337 if (dev->flags & IFF_UP)
0338 dev_activate(dev);
0339
0340 return 0;
0341 }
0342
0343 static int dump_rates(struct mqprio_sched *priv,
0344 struct tc_mqprio_qopt *opt, struct sk_buff *skb)
0345 {
0346 struct nlattr *nest;
0347 int i;
0348
0349 if (priv->flags & TC_MQPRIO_F_MIN_RATE) {
0350 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MIN_RATE64);
0351 if (!nest)
0352 goto nla_put_failure;
0353
0354 for (i = 0; i < opt->num_tc; i++) {
0355 if (nla_put(skb, TCA_MQPRIO_MIN_RATE64,
0356 sizeof(priv->min_rate[i]),
0357 &priv->min_rate[i]))
0358 goto nla_put_failure;
0359 }
0360 nla_nest_end(skb, nest);
0361 }
0362
0363 if (priv->flags & TC_MQPRIO_F_MAX_RATE) {
0364 nest = nla_nest_start_noflag(skb, TCA_MQPRIO_MAX_RATE64);
0365 if (!nest)
0366 goto nla_put_failure;
0367
0368 for (i = 0; i < opt->num_tc; i++) {
0369 if (nla_put(skb, TCA_MQPRIO_MAX_RATE64,
0370 sizeof(priv->max_rate[i]),
0371 &priv->max_rate[i]))
0372 goto nla_put_failure;
0373 }
0374 nla_nest_end(skb, nest);
0375 }
0376 return 0;
0377
0378 nla_put_failure:
0379 nla_nest_cancel(skb, nest);
0380 return -1;
0381 }
0382
0383 static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
0384 {
0385 struct net_device *dev = qdisc_dev(sch);
0386 struct mqprio_sched *priv = qdisc_priv(sch);
0387 struct nlattr *nla = (struct nlattr *)skb_tail_pointer(skb);
0388 struct tc_mqprio_qopt opt = { 0 };
0389 struct Qdisc *qdisc;
0390 unsigned int ntx, tc;
0391
0392 sch->q.qlen = 0;
0393 gnet_stats_basic_sync_init(&sch->bstats);
0394 memset(&sch->qstats, 0, sizeof(sch->qstats));
0395
0396
0397
0398
0399
0400
0401 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
0402 qdisc = netdev_get_tx_queue(dev, ntx)->qdisc_sleeping;
0403 spin_lock_bh(qdisc_lock(qdisc));
0404
0405 gnet_stats_add_basic(&sch->bstats, qdisc->cpu_bstats,
0406 &qdisc->bstats, false);
0407 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats,
0408 &qdisc->qstats);
0409 sch->q.qlen += qdisc_qlen(qdisc);
0410
0411 spin_unlock_bh(qdisc_lock(qdisc));
0412 }
0413
0414 opt.num_tc = netdev_get_num_tc(dev);
0415 memcpy(opt.prio_tc_map, dev->prio_tc_map, sizeof(opt.prio_tc_map));
0416 opt.hw = priv->hw_offload;
0417
0418 for (tc = 0; tc < netdev_get_num_tc(dev); tc++) {
0419 opt.count[tc] = dev->tc_to_txq[tc].count;
0420 opt.offset[tc] = dev->tc_to_txq[tc].offset;
0421 }
0422
0423 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
0424 goto nla_put_failure;
0425
0426 if ((priv->flags & TC_MQPRIO_F_MODE) &&
0427 nla_put_u16(skb, TCA_MQPRIO_MODE, priv->mode))
0428 goto nla_put_failure;
0429
0430 if ((priv->flags & TC_MQPRIO_F_SHAPER) &&
0431 nla_put_u16(skb, TCA_MQPRIO_SHAPER, priv->shaper))
0432 goto nla_put_failure;
0433
0434 if ((priv->flags & TC_MQPRIO_F_MIN_RATE ||
0435 priv->flags & TC_MQPRIO_F_MAX_RATE) &&
0436 (dump_rates(priv, &opt, skb) != 0))
0437 goto nla_put_failure;
0438
0439 return nla_nest_end(skb, nla);
0440 nla_put_failure:
0441 nlmsg_trim(skb, nla);
0442 return -1;
0443 }
0444
0445 static struct Qdisc *mqprio_leaf(struct Qdisc *sch, unsigned long cl)
0446 {
0447 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
0448
0449 if (!dev_queue)
0450 return NULL;
0451
0452 return dev_queue->qdisc_sleeping;
0453 }
0454
0455 static unsigned long mqprio_find(struct Qdisc *sch, u32 classid)
0456 {
0457 struct net_device *dev = qdisc_dev(sch);
0458 unsigned int ntx = TC_H_MIN(classid);
0459
0460
0461
0462
0463
0464 if (ntx < TC_H_MIN_PRIORITY)
0465 return (ntx <= dev->num_tx_queues) ? ntx : 0;
0466
0467
0468
0469
0470
0471 return ((ntx - TC_H_MIN_PRIORITY) < netdev_get_num_tc(dev)) ? ntx : 0;
0472 }
0473
0474 static int mqprio_dump_class(struct Qdisc *sch, unsigned long cl,
0475 struct sk_buff *skb, struct tcmsg *tcm)
0476 {
0477 if (cl < TC_H_MIN_PRIORITY) {
0478 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
0479 struct net_device *dev = qdisc_dev(sch);
0480 int tc = netdev_txq_to_tc(dev, cl - 1);
0481
0482 tcm->tcm_parent = (tc < 0) ? 0 :
0483 TC_H_MAKE(TC_H_MAJ(sch->handle),
0484 TC_H_MIN(tc + TC_H_MIN_PRIORITY));
0485 tcm->tcm_info = dev_queue->qdisc_sleeping->handle;
0486 } else {
0487 tcm->tcm_parent = TC_H_ROOT;
0488 tcm->tcm_info = 0;
0489 }
0490 tcm->tcm_handle |= TC_H_MIN(cl);
0491 return 0;
0492 }
0493
0494 static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
0495 struct gnet_dump *d)
0496 __releases(d->lock)
0497 __acquires(d->lock)
0498 {
0499 if (cl >= TC_H_MIN_PRIORITY) {
0500 int i;
0501 __u32 qlen;
0502 struct gnet_stats_queue qstats = {0};
0503 struct gnet_stats_basic_sync bstats;
0504 struct net_device *dev = qdisc_dev(sch);
0505 struct netdev_tc_txq tc = dev->tc_to_txq[cl & TC_BITMASK];
0506
0507 gnet_stats_basic_sync_init(&bstats);
0508
0509
0510
0511
0512
0513 if (d->lock)
0514 spin_unlock_bh(d->lock);
0515
0516 for (i = tc.offset; i < tc.offset + tc.count; i++) {
0517 struct netdev_queue *q = netdev_get_tx_queue(dev, i);
0518 struct Qdisc *qdisc = rtnl_dereference(q->qdisc);
0519
0520 spin_lock_bh(qdisc_lock(qdisc));
0521
0522 gnet_stats_add_basic(&bstats, qdisc->cpu_bstats,
0523 &qdisc->bstats, false);
0524 gnet_stats_add_queue(&qstats, qdisc->cpu_qstats,
0525 &qdisc->qstats);
0526 sch->q.qlen += qdisc_qlen(qdisc);
0527
0528 spin_unlock_bh(qdisc_lock(qdisc));
0529 }
0530 qlen = qdisc_qlen(sch) + qstats.qlen;
0531
0532
0533 if (d->lock)
0534 spin_lock_bh(d->lock);
0535 if (gnet_stats_copy_basic(d, NULL, &bstats, false) < 0 ||
0536 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0)
0537 return -1;
0538 } else {
0539 struct netdev_queue *dev_queue = mqprio_queue_get(sch, cl);
0540
0541 sch = dev_queue->qdisc_sleeping;
0542 if (gnet_stats_copy_basic(d, sch->cpu_bstats,
0543 &sch->bstats, true) < 0 ||
0544 qdisc_qstats_copy(d, sch) < 0)
0545 return -1;
0546 }
0547 return 0;
0548 }
0549
0550 static void mqprio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
0551 {
0552 struct net_device *dev = qdisc_dev(sch);
0553 unsigned long ntx;
0554
0555 if (arg->stop)
0556 return;
0557
0558
0559 arg->count = arg->skip;
0560 for (ntx = arg->skip; ntx < netdev_get_num_tc(dev); ntx++) {
0561 if (arg->fn(sch, ntx + TC_H_MIN_PRIORITY, arg) < 0) {
0562 arg->stop = 1;
0563 return;
0564 }
0565 arg->count++;
0566 }
0567
0568
0569 if (ntx < TC_MAX_QUEUE) {
0570 arg->count = TC_MAX_QUEUE;
0571 ntx = TC_MAX_QUEUE;
0572 }
0573
0574
0575 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) {
0576 if (arg->fn(sch, ntx + 1, arg) < 0) {
0577 arg->stop = 1;
0578 return;
0579 }
0580 arg->count++;
0581 }
0582 }
0583
0584 static struct netdev_queue *mqprio_select_queue(struct Qdisc *sch,
0585 struct tcmsg *tcm)
0586 {
0587 return mqprio_queue_get(sch, TC_H_MIN(tcm->tcm_parent));
0588 }
0589
0590 static const struct Qdisc_class_ops mqprio_class_ops = {
0591 .graft = mqprio_graft,
0592 .leaf = mqprio_leaf,
0593 .find = mqprio_find,
0594 .walk = mqprio_walk,
0595 .dump = mqprio_dump_class,
0596 .dump_stats = mqprio_dump_class_stats,
0597 .select_queue = mqprio_select_queue,
0598 };
0599
0600 static struct Qdisc_ops mqprio_qdisc_ops __read_mostly = {
0601 .cl_ops = &mqprio_class_ops,
0602 .id = "mqprio",
0603 .priv_size = sizeof(struct mqprio_sched),
0604 .init = mqprio_init,
0605 .destroy = mqprio_destroy,
0606 .attach = mqprio_attach,
0607 .change_real_num_tx = mq_change_real_num_tx,
0608 .dump = mqprio_dump,
0609 .owner = THIS_MODULE,
0610 };
0611
0612 static int __init mqprio_module_init(void)
0613 {
0614 return register_qdisc(&mqprio_qdisc_ops);
0615 }
0616
0617 static void __exit mqprio_module_exit(void)
0618 {
0619 unregister_qdisc(&mqprio_qdisc_ops);
0620 }
0621
0622 module_init(mqprio_module_init);
0623 module_exit(mqprio_module_exit);
0624
0625 MODULE_LICENSE("GPL");