0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032 #include <linux/module.h>
0033 #include <net/gen_stats.h>
0034 #include <net/netlink.h>
0035 #include <net/pkt_cls.h>
0036 #include <net/pkt_sched.h>
0037 #include <net/sch_generic.h>
0038
0039 struct ets_class {
0040 struct list_head alist;
0041 struct Qdisc *qdisc;
0042 u32 quantum;
0043 u32 deficit;
0044 struct gnet_stats_basic_sync bstats;
0045 struct gnet_stats_queue qstats;
0046 };
0047
0048 struct ets_sched {
0049 struct list_head active;
0050 struct tcf_proto __rcu *filter_list;
0051 struct tcf_block *block;
0052 unsigned int nbands;
0053 unsigned int nstrict;
0054 u8 prio2band[TC_PRIO_MAX + 1];
0055 struct ets_class classes[TCQ_ETS_MAX_BANDS];
0056 };
0057
0058 static const struct nla_policy ets_policy[TCA_ETS_MAX + 1] = {
0059 [TCA_ETS_NBANDS] = { .type = NLA_U8 },
0060 [TCA_ETS_NSTRICT] = { .type = NLA_U8 },
0061 [TCA_ETS_QUANTA] = { .type = NLA_NESTED },
0062 [TCA_ETS_PRIOMAP] = { .type = NLA_NESTED },
0063 };
0064
0065 static const struct nla_policy ets_priomap_policy[TCA_ETS_MAX + 1] = {
0066 [TCA_ETS_PRIOMAP_BAND] = { .type = NLA_U8 },
0067 };
0068
0069 static const struct nla_policy ets_quanta_policy[TCA_ETS_MAX + 1] = {
0070 [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
0071 };
0072
0073 static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = {
0074 [TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
0075 };
0076
0077 static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
0078 unsigned int *quantum,
0079 struct netlink_ext_ack *extack)
0080 {
0081 *quantum = nla_get_u32(attr);
0082 if (!*quantum) {
0083 NL_SET_ERR_MSG(extack, "ETS quantum cannot be zero");
0084 return -EINVAL;
0085 }
0086 return 0;
0087 }
0088
0089 static struct ets_class *
0090 ets_class_from_arg(struct Qdisc *sch, unsigned long arg)
0091 {
0092 struct ets_sched *q = qdisc_priv(sch);
0093
0094 return &q->classes[arg - 1];
0095 }
0096
0097 static u32 ets_class_id(struct Qdisc *sch, const struct ets_class *cl)
0098 {
0099 struct ets_sched *q = qdisc_priv(sch);
0100 int band = cl - q->classes;
0101
0102 return TC_H_MAKE(sch->handle, band + 1);
0103 }
0104
0105 static void ets_offload_change(struct Qdisc *sch)
0106 {
0107 struct net_device *dev = qdisc_dev(sch);
0108 struct ets_sched *q = qdisc_priv(sch);
0109 struct tc_ets_qopt_offload qopt;
0110 unsigned int w_psum_prev = 0;
0111 unsigned int q_psum = 0;
0112 unsigned int q_sum = 0;
0113 unsigned int quantum;
0114 unsigned int w_psum;
0115 unsigned int weight;
0116 unsigned int i;
0117
0118 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
0119 return;
0120
0121 qopt.command = TC_ETS_REPLACE;
0122 qopt.handle = sch->handle;
0123 qopt.parent = sch->parent;
0124 qopt.replace_params.bands = q->nbands;
0125 qopt.replace_params.qstats = &sch->qstats;
0126 memcpy(&qopt.replace_params.priomap,
0127 q->prio2band, sizeof(q->prio2band));
0128
0129 for (i = 0; i < q->nbands; i++)
0130 q_sum += q->classes[i].quantum;
0131
0132 for (i = 0; i < q->nbands; i++) {
0133 quantum = q->classes[i].quantum;
0134 q_psum += quantum;
0135 w_psum = quantum ? q_psum * 100 / q_sum : 0;
0136 weight = w_psum - w_psum_prev;
0137 w_psum_prev = w_psum;
0138
0139 qopt.replace_params.quanta[i] = quantum;
0140 qopt.replace_params.weights[i] = weight;
0141 }
0142
0143 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETS, &qopt);
0144 }
0145
0146 static void ets_offload_destroy(struct Qdisc *sch)
0147 {
0148 struct net_device *dev = qdisc_dev(sch);
0149 struct tc_ets_qopt_offload qopt;
0150
0151 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
0152 return;
0153
0154 qopt.command = TC_ETS_DESTROY;
0155 qopt.handle = sch->handle;
0156 qopt.parent = sch->parent;
0157 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_ETS, &qopt);
0158 }
0159
0160 static void ets_offload_graft(struct Qdisc *sch, struct Qdisc *new,
0161 struct Qdisc *old, unsigned long arg,
0162 struct netlink_ext_ack *extack)
0163 {
0164 struct net_device *dev = qdisc_dev(sch);
0165 struct tc_ets_qopt_offload qopt;
0166
0167 qopt.command = TC_ETS_GRAFT;
0168 qopt.handle = sch->handle;
0169 qopt.parent = sch->parent;
0170 qopt.graft_params.band = arg - 1;
0171 qopt.graft_params.child_handle = new->handle;
0172
0173 qdisc_offload_graft_helper(dev, sch, new, old, TC_SETUP_QDISC_ETS,
0174 &qopt, extack);
0175 }
0176
0177 static int ets_offload_dump(struct Qdisc *sch)
0178 {
0179 struct tc_ets_qopt_offload qopt;
0180
0181 qopt.command = TC_ETS_STATS;
0182 qopt.handle = sch->handle;
0183 qopt.parent = sch->parent;
0184 qopt.stats.bstats = &sch->bstats;
0185 qopt.stats.qstats = &sch->qstats;
0186
0187 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_ETS, &qopt);
0188 }
0189
0190 static bool ets_class_is_strict(struct ets_sched *q, const struct ets_class *cl)
0191 {
0192 unsigned int band = cl - q->classes;
0193
0194 return band < q->nstrict;
0195 }
0196
0197 static int ets_class_change(struct Qdisc *sch, u32 classid, u32 parentid,
0198 struct nlattr **tca, unsigned long *arg,
0199 struct netlink_ext_ack *extack)
0200 {
0201 struct ets_class *cl = ets_class_from_arg(sch, *arg);
0202 struct ets_sched *q = qdisc_priv(sch);
0203 struct nlattr *opt = tca[TCA_OPTIONS];
0204 struct nlattr *tb[TCA_ETS_MAX + 1];
0205 unsigned int quantum;
0206 int err;
0207
0208
0209
0210
0211 if (!cl) {
0212 NL_SET_ERR_MSG(extack, "Fine-grained class addition and removal is not supported");
0213 return -EOPNOTSUPP;
0214 }
0215
0216 if (!opt) {
0217 NL_SET_ERR_MSG(extack, "ETS options are required for this operation");
0218 return -EINVAL;
0219 }
0220
0221 err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_class_policy, extack);
0222 if (err < 0)
0223 return err;
0224
0225 if (!tb[TCA_ETS_QUANTA_BAND])
0226
0227 return 0;
0228
0229 if (ets_class_is_strict(q, cl)) {
0230 NL_SET_ERR_MSG(extack, "Strict bands do not have a configurable quantum");
0231 return -EINVAL;
0232 }
0233
0234 err = ets_quantum_parse(sch, tb[TCA_ETS_QUANTA_BAND], &quantum,
0235 extack);
0236 if (err)
0237 return err;
0238
0239 sch_tree_lock(sch);
0240 cl->quantum = quantum;
0241 sch_tree_unlock(sch);
0242
0243 ets_offload_change(sch);
0244 return 0;
0245 }
0246
0247 static int ets_class_graft(struct Qdisc *sch, unsigned long arg,
0248 struct Qdisc *new, struct Qdisc **old,
0249 struct netlink_ext_ack *extack)
0250 {
0251 struct ets_class *cl = ets_class_from_arg(sch, arg);
0252
0253 if (!new) {
0254 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
0255 ets_class_id(sch, cl), NULL);
0256 if (!new)
0257 new = &noop_qdisc;
0258 else
0259 qdisc_hash_add(new, true);
0260 }
0261
0262 *old = qdisc_replace(sch, new, &cl->qdisc);
0263 ets_offload_graft(sch, new, *old, arg, extack);
0264 return 0;
0265 }
0266
0267 static struct Qdisc *ets_class_leaf(struct Qdisc *sch, unsigned long arg)
0268 {
0269 struct ets_class *cl = ets_class_from_arg(sch, arg);
0270
0271 return cl->qdisc;
0272 }
0273
0274 static unsigned long ets_class_find(struct Qdisc *sch, u32 classid)
0275 {
0276 unsigned long band = TC_H_MIN(classid);
0277 struct ets_sched *q = qdisc_priv(sch);
0278
0279 if (band - 1 >= q->nbands)
0280 return 0;
0281 return band;
0282 }
0283
0284 static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg)
0285 {
0286 struct ets_class *cl = ets_class_from_arg(sch, arg);
0287 struct ets_sched *q = qdisc_priv(sch);
0288
0289
0290
0291
0292
0293 if (!ets_class_is_strict(q, cl) && sch->q.qlen)
0294 list_del(&cl->alist);
0295 }
0296
0297 static int ets_class_dump(struct Qdisc *sch, unsigned long arg,
0298 struct sk_buff *skb, struct tcmsg *tcm)
0299 {
0300 struct ets_class *cl = ets_class_from_arg(sch, arg);
0301 struct ets_sched *q = qdisc_priv(sch);
0302 struct nlattr *nest;
0303
0304 tcm->tcm_parent = TC_H_ROOT;
0305 tcm->tcm_handle = ets_class_id(sch, cl);
0306 tcm->tcm_info = cl->qdisc->handle;
0307
0308 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
0309 if (!nest)
0310 goto nla_put_failure;
0311 if (!ets_class_is_strict(q, cl)) {
0312 if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND, cl->quantum))
0313 goto nla_put_failure;
0314 }
0315 return nla_nest_end(skb, nest);
0316
0317 nla_put_failure:
0318 nla_nest_cancel(skb, nest);
0319 return -EMSGSIZE;
0320 }
0321
0322 static int ets_class_dump_stats(struct Qdisc *sch, unsigned long arg,
0323 struct gnet_dump *d)
0324 {
0325 struct ets_class *cl = ets_class_from_arg(sch, arg);
0326 struct Qdisc *cl_q = cl->qdisc;
0327
0328 if (gnet_stats_copy_basic(d, NULL, &cl_q->bstats, true) < 0 ||
0329 qdisc_qstats_copy(d, cl_q) < 0)
0330 return -1;
0331
0332 return 0;
0333 }
0334
0335 static void ets_qdisc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
0336 {
0337 struct ets_sched *q = qdisc_priv(sch);
0338 int i;
0339
0340 if (arg->stop)
0341 return;
0342
0343 for (i = 0; i < q->nbands; i++) {
0344 if (arg->count < arg->skip) {
0345 arg->count++;
0346 continue;
0347 }
0348 if (arg->fn(sch, i + 1, arg) < 0) {
0349 arg->stop = 1;
0350 break;
0351 }
0352 arg->count++;
0353 }
0354 }
0355
0356 static struct tcf_block *
0357 ets_qdisc_tcf_block(struct Qdisc *sch, unsigned long cl,
0358 struct netlink_ext_ack *extack)
0359 {
0360 struct ets_sched *q = qdisc_priv(sch);
0361
0362 if (cl) {
0363 NL_SET_ERR_MSG(extack, "ETS classid must be zero");
0364 return NULL;
0365 }
0366
0367 return q->block;
0368 }
0369
0370 static unsigned long ets_qdisc_bind_tcf(struct Qdisc *sch, unsigned long parent,
0371 u32 classid)
0372 {
0373 return ets_class_find(sch, classid);
0374 }
0375
0376 static void ets_qdisc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
0377 {
0378 }
0379
0380 static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch,
0381 int *qerr)
0382 {
0383 struct ets_sched *q = qdisc_priv(sch);
0384 u32 band = skb->priority;
0385 struct tcf_result res;
0386 struct tcf_proto *fl;
0387 int err;
0388
0389 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
0390 if (TC_H_MAJ(skb->priority) != sch->handle) {
0391 fl = rcu_dereference_bh(q->filter_list);
0392 err = tcf_classify(skb, NULL, fl, &res, false);
0393 #ifdef CONFIG_NET_CLS_ACT
0394 switch (err) {
0395 case TC_ACT_STOLEN:
0396 case TC_ACT_QUEUED:
0397 case TC_ACT_TRAP:
0398 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
0399 fallthrough;
0400 case TC_ACT_SHOT:
0401 return NULL;
0402 }
0403 #endif
0404 if (!fl || err < 0) {
0405 if (TC_H_MAJ(band))
0406 band = 0;
0407 return &q->classes[q->prio2band[band & TC_PRIO_MAX]];
0408 }
0409 band = res.classid;
0410 }
0411 band = TC_H_MIN(band) - 1;
0412 if (band >= q->nbands)
0413 return &q->classes[q->prio2band[0]];
0414 return &q->classes[band];
0415 }
0416
0417 static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
0418 struct sk_buff **to_free)
0419 {
0420 unsigned int len = qdisc_pkt_len(skb);
0421 struct ets_sched *q = qdisc_priv(sch);
0422 struct ets_class *cl;
0423 int err = 0;
0424 bool first;
0425
0426 cl = ets_classify(skb, sch, &err);
0427 if (!cl) {
0428 if (err & __NET_XMIT_BYPASS)
0429 qdisc_qstats_drop(sch);
0430 __qdisc_drop(skb, to_free);
0431 return err;
0432 }
0433
0434 first = !cl->qdisc->q.qlen;
0435 err = qdisc_enqueue(skb, cl->qdisc, to_free);
0436 if (unlikely(err != NET_XMIT_SUCCESS)) {
0437 if (net_xmit_drop_count(err)) {
0438 cl->qstats.drops++;
0439 qdisc_qstats_drop(sch);
0440 }
0441 return err;
0442 }
0443
0444 if (first && !ets_class_is_strict(q, cl)) {
0445 list_add_tail(&cl->alist, &q->active);
0446 cl->deficit = cl->quantum;
0447 }
0448
0449 sch->qstats.backlog += len;
0450 sch->q.qlen++;
0451 return err;
0452 }
0453
0454 static struct sk_buff *
0455 ets_qdisc_dequeue_skb(struct Qdisc *sch, struct sk_buff *skb)
0456 {
0457 qdisc_bstats_update(sch, skb);
0458 qdisc_qstats_backlog_dec(sch, skb);
0459 sch->q.qlen--;
0460 return skb;
0461 }
0462
0463 static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch)
0464 {
0465 struct ets_sched *q = qdisc_priv(sch);
0466 struct ets_class *cl;
0467 struct sk_buff *skb;
0468 unsigned int band;
0469 unsigned int len;
0470
0471 while (1) {
0472 for (band = 0; band < q->nstrict; band++) {
0473 cl = &q->classes[band];
0474 skb = qdisc_dequeue_peeked(cl->qdisc);
0475 if (skb)
0476 return ets_qdisc_dequeue_skb(sch, skb);
0477 }
0478
0479 if (list_empty(&q->active))
0480 goto out;
0481
0482 cl = list_first_entry(&q->active, struct ets_class, alist);
0483 skb = cl->qdisc->ops->peek(cl->qdisc);
0484 if (!skb) {
0485 qdisc_warn_nonwc(__func__, cl->qdisc);
0486 goto out;
0487 }
0488
0489 len = qdisc_pkt_len(skb);
0490 if (len <= cl->deficit) {
0491 cl->deficit -= len;
0492 skb = qdisc_dequeue_peeked(cl->qdisc);
0493 if (unlikely(!skb))
0494 goto out;
0495 if (cl->qdisc->q.qlen == 0)
0496 list_del(&cl->alist);
0497 return ets_qdisc_dequeue_skb(sch, skb);
0498 }
0499
0500 cl->deficit += cl->quantum;
0501 list_move_tail(&cl->alist, &q->active);
0502 }
0503 out:
0504 return NULL;
0505 }
0506
0507 static int ets_qdisc_priomap_parse(struct nlattr *priomap_attr,
0508 unsigned int nbands, u8 *priomap,
0509 struct netlink_ext_ack *extack)
0510 {
0511 const struct nlattr *attr;
0512 int prio = 0;
0513 u8 band;
0514 int rem;
0515 int err;
0516
0517 err = __nla_validate_nested(priomap_attr, TCA_ETS_MAX,
0518 ets_priomap_policy, NL_VALIDATE_STRICT,
0519 extack);
0520 if (err)
0521 return err;
0522
0523 nla_for_each_nested(attr, priomap_attr, rem) {
0524 switch (nla_type(attr)) {
0525 case TCA_ETS_PRIOMAP_BAND:
0526 if (prio > TC_PRIO_MAX) {
0527 NL_SET_ERR_MSG_MOD(extack, "Too many priorities in ETS priomap");
0528 return -EINVAL;
0529 }
0530 band = nla_get_u8(attr);
0531 if (band >= nbands) {
0532 NL_SET_ERR_MSG_MOD(extack, "Invalid band number in ETS priomap");
0533 return -EINVAL;
0534 }
0535 priomap[prio++] = band;
0536 break;
0537 default:
0538 WARN_ON_ONCE(1);
0539 return -EINVAL;
0540 }
0541 }
0542
0543 return 0;
0544 }
0545
0546 static int ets_qdisc_quanta_parse(struct Qdisc *sch, struct nlattr *quanta_attr,
0547 unsigned int nbands, unsigned int nstrict,
0548 unsigned int *quanta,
0549 struct netlink_ext_ack *extack)
0550 {
0551 const struct nlattr *attr;
0552 int band = nstrict;
0553 int rem;
0554 int err;
0555
0556 err = __nla_validate_nested(quanta_attr, TCA_ETS_MAX,
0557 ets_quanta_policy, NL_VALIDATE_STRICT,
0558 extack);
0559 if (err < 0)
0560 return err;
0561
0562 nla_for_each_nested(attr, quanta_attr, rem) {
0563 switch (nla_type(attr)) {
0564 case TCA_ETS_QUANTA_BAND:
0565 if (band >= nbands) {
0566 NL_SET_ERR_MSG_MOD(extack, "ETS quanta has more values than bands");
0567 return -EINVAL;
0568 }
0569 err = ets_quantum_parse(sch, attr, &quanta[band++],
0570 extack);
0571 if (err)
0572 return err;
0573 break;
0574 default:
0575 WARN_ON_ONCE(1);
0576 return -EINVAL;
0577 }
0578 }
0579
0580 return 0;
0581 }
0582
0583 static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
0584 struct netlink_ext_ack *extack)
0585 {
0586 unsigned int quanta[TCQ_ETS_MAX_BANDS] = {0};
0587 struct Qdisc *queues[TCQ_ETS_MAX_BANDS];
0588 struct ets_sched *q = qdisc_priv(sch);
0589 struct nlattr *tb[TCA_ETS_MAX + 1];
0590 unsigned int oldbands = q->nbands;
0591 u8 priomap[TC_PRIO_MAX + 1];
0592 unsigned int nstrict = 0;
0593 unsigned int nbands;
0594 unsigned int i;
0595 int err;
0596
0597 if (!opt) {
0598 NL_SET_ERR_MSG(extack, "ETS options are required for this operation");
0599 return -EINVAL;
0600 }
0601
0602 err = nla_parse_nested(tb, TCA_ETS_MAX, opt, ets_policy, extack);
0603 if (err < 0)
0604 return err;
0605
0606 if (!tb[TCA_ETS_NBANDS]) {
0607 NL_SET_ERR_MSG_MOD(extack, "Number of bands is a required argument");
0608 return -EINVAL;
0609 }
0610 nbands = nla_get_u8(tb[TCA_ETS_NBANDS]);
0611 if (nbands < 1 || nbands > TCQ_ETS_MAX_BANDS) {
0612 NL_SET_ERR_MSG_MOD(extack, "Invalid number of bands");
0613 return -EINVAL;
0614 }
0615
0616 memset(priomap, nbands - 1, sizeof(priomap));
0617
0618 if (tb[TCA_ETS_NSTRICT]) {
0619 nstrict = nla_get_u8(tb[TCA_ETS_NSTRICT]);
0620 if (nstrict > nbands) {
0621 NL_SET_ERR_MSG_MOD(extack, "Invalid number of strict bands");
0622 return -EINVAL;
0623 }
0624 }
0625
0626 if (tb[TCA_ETS_PRIOMAP]) {
0627 err = ets_qdisc_priomap_parse(tb[TCA_ETS_PRIOMAP],
0628 nbands, priomap, extack);
0629 if (err)
0630 return err;
0631 }
0632
0633 if (tb[TCA_ETS_QUANTA]) {
0634 err = ets_qdisc_quanta_parse(sch, tb[TCA_ETS_QUANTA],
0635 nbands, nstrict, quanta, extack);
0636 if (err)
0637 return err;
0638 }
0639
0640
0641
0642 for (i = nstrict; i < nbands; i++) {
0643 if (!quanta[i])
0644 quanta[i] = psched_mtu(qdisc_dev(sch));
0645 }
0646
0647
0648 for (i = oldbands; i < nbands; i++) {
0649 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
0650 ets_class_id(sch, &q->classes[i]),
0651 extack);
0652 if (!queues[i]) {
0653 while (i > oldbands)
0654 qdisc_put(queues[--i]);
0655 return -ENOMEM;
0656 }
0657 }
0658
0659 sch_tree_lock(sch);
0660
0661 q->nbands = nbands;
0662 for (i = nstrict; i < q->nstrict; i++) {
0663 if (q->classes[i].qdisc->q.qlen) {
0664 list_add_tail(&q->classes[i].alist, &q->active);
0665 q->classes[i].deficit = quanta[i];
0666 }
0667 }
0668 for (i = q->nbands; i < oldbands; i++) {
0669 if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
0670 list_del(&q->classes[i].alist);
0671 qdisc_tree_flush_backlog(q->classes[i].qdisc);
0672 }
0673 q->nstrict = nstrict;
0674 memcpy(q->prio2band, priomap, sizeof(priomap));
0675
0676 for (i = 0; i < q->nbands; i++)
0677 q->classes[i].quantum = quanta[i];
0678
0679 for (i = oldbands; i < q->nbands; i++) {
0680 q->classes[i].qdisc = queues[i];
0681 if (q->classes[i].qdisc != &noop_qdisc)
0682 qdisc_hash_add(q->classes[i].qdisc, true);
0683 }
0684
0685 sch_tree_unlock(sch);
0686
0687 ets_offload_change(sch);
0688 for (i = q->nbands; i < oldbands; i++) {
0689 qdisc_put(q->classes[i].qdisc);
0690 q->classes[i].qdisc = NULL;
0691 q->classes[i].quantum = 0;
0692 q->classes[i].deficit = 0;
0693 gnet_stats_basic_sync_init(&q->classes[i].bstats);
0694 memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats));
0695 }
0696 return 0;
0697 }
0698
0699 static int ets_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
0700 struct netlink_ext_ack *extack)
0701 {
0702 struct ets_sched *q = qdisc_priv(sch);
0703 int err, i;
0704
0705 if (!opt)
0706 return -EINVAL;
0707
0708 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
0709 if (err)
0710 return err;
0711
0712 INIT_LIST_HEAD(&q->active);
0713 for (i = 0; i < TCQ_ETS_MAX_BANDS; i++)
0714 INIT_LIST_HEAD(&q->classes[i].alist);
0715
0716 return ets_qdisc_change(sch, opt, extack);
0717 }
0718
0719 static void ets_qdisc_reset(struct Qdisc *sch)
0720 {
0721 struct ets_sched *q = qdisc_priv(sch);
0722 int band;
0723
0724 for (band = q->nstrict; band < q->nbands; band++) {
0725 if (q->classes[band].qdisc->q.qlen)
0726 list_del(&q->classes[band].alist);
0727 }
0728 for (band = 0; band < q->nbands; band++)
0729 qdisc_reset(q->classes[band].qdisc);
0730 sch->qstats.backlog = 0;
0731 sch->q.qlen = 0;
0732 }
0733
0734 static void ets_qdisc_destroy(struct Qdisc *sch)
0735 {
0736 struct ets_sched *q = qdisc_priv(sch);
0737 int band;
0738
0739 ets_offload_destroy(sch);
0740 tcf_block_put(q->block);
0741 for (band = 0; band < q->nbands; band++)
0742 qdisc_put(q->classes[band].qdisc);
0743 }
0744
0745 static int ets_qdisc_dump(struct Qdisc *sch, struct sk_buff *skb)
0746 {
0747 struct ets_sched *q = qdisc_priv(sch);
0748 struct nlattr *opts;
0749 struct nlattr *nest;
0750 int band;
0751 int prio;
0752 int err;
0753
0754 err = ets_offload_dump(sch);
0755 if (err)
0756 return err;
0757
0758 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
0759 if (!opts)
0760 goto nla_err;
0761
0762 if (nla_put_u8(skb, TCA_ETS_NBANDS, q->nbands))
0763 goto nla_err;
0764
0765 if (q->nstrict &&
0766 nla_put_u8(skb, TCA_ETS_NSTRICT, q->nstrict))
0767 goto nla_err;
0768
0769 if (q->nbands > q->nstrict) {
0770 nest = nla_nest_start(skb, TCA_ETS_QUANTA);
0771 if (!nest)
0772 goto nla_err;
0773
0774 for (band = q->nstrict; band < q->nbands; band++) {
0775 if (nla_put_u32(skb, TCA_ETS_QUANTA_BAND,
0776 q->classes[band].quantum))
0777 goto nla_err;
0778 }
0779
0780 nla_nest_end(skb, nest);
0781 }
0782
0783 nest = nla_nest_start(skb, TCA_ETS_PRIOMAP);
0784 if (!nest)
0785 goto nla_err;
0786
0787 for (prio = 0; prio <= TC_PRIO_MAX; prio++) {
0788 if (nla_put_u8(skb, TCA_ETS_PRIOMAP_BAND, q->prio2band[prio]))
0789 goto nla_err;
0790 }
0791
0792 nla_nest_end(skb, nest);
0793
0794 return nla_nest_end(skb, opts);
0795
0796 nla_err:
0797 nla_nest_cancel(skb, opts);
0798 return -EMSGSIZE;
0799 }
0800
0801 static const struct Qdisc_class_ops ets_class_ops = {
0802 .change = ets_class_change,
0803 .graft = ets_class_graft,
0804 .leaf = ets_class_leaf,
0805 .find = ets_class_find,
0806 .qlen_notify = ets_class_qlen_notify,
0807 .dump = ets_class_dump,
0808 .dump_stats = ets_class_dump_stats,
0809 .walk = ets_qdisc_walk,
0810 .tcf_block = ets_qdisc_tcf_block,
0811 .bind_tcf = ets_qdisc_bind_tcf,
0812 .unbind_tcf = ets_qdisc_unbind_tcf,
0813 };
0814
0815 static struct Qdisc_ops ets_qdisc_ops __read_mostly = {
0816 .cl_ops = &ets_class_ops,
0817 .id = "ets",
0818 .priv_size = sizeof(struct ets_sched),
0819 .enqueue = ets_qdisc_enqueue,
0820 .dequeue = ets_qdisc_dequeue,
0821 .peek = qdisc_peek_dequeued,
0822 .change = ets_qdisc_change,
0823 .init = ets_qdisc_init,
0824 .reset = ets_qdisc_reset,
0825 .destroy = ets_qdisc_destroy,
0826 .dump = ets_qdisc_dump,
0827 .owner = THIS_MODULE,
0828 };
0829
0830 static int __init ets_init(void)
0831 {
0832 return register_qdisc(&ets_qdisc_ops);
0833 }
0834
0835 static void __exit ets_exit(void)
0836 {
0837 unregister_qdisc(&ets_qdisc_ops);
0838 }
0839
0840 module_init(ets_init);
0841 module_exit(ets_exit);
0842 MODULE_LICENSE("GPL");