0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/module.h>
0009 #include <linux/types.h>
0010 #include <linux/kernel.h>
0011 #include <linux/jiffies.h>
0012 #include <linux/string.h>
0013 #include <linux/in.h>
0014 #include <linux/errno.h>
0015 #include <linux/init.h>
0016 #include <linux/skbuff.h>
0017 #include <linux/slab.h>
0018 #include <linux/vmalloc.h>
0019 #include <net/netlink.h>
0020 #include <net/pkt_sched.h>
0021 #include <net/pkt_cls.h>
0022 #include <net/codel.h>
0023 #include <net/codel_impl.h>
0024 #include <net/codel_qdisc.h>
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042 struct fq_codel_flow {
0043 struct sk_buff *head;
0044 struct sk_buff *tail;
0045 struct list_head flowchain;
0046 int deficit;
0047 struct codel_vars cvars;
0048 };
0049
0050 struct fq_codel_sched_data {
0051 struct tcf_proto __rcu *filter_list;
0052 struct tcf_block *block;
0053 struct fq_codel_flow *flows;
0054 u32 *backlogs;
0055 u32 flows_cnt;
0056 u32 quantum;
0057 u32 drop_batch_size;
0058 u32 memory_limit;
0059 struct codel_params cparams;
0060 struct codel_stats cstats;
0061 u32 memory_usage;
0062 u32 drop_overmemory;
0063 u32 drop_overlimit;
0064 u32 new_flow_count;
0065
0066 struct list_head new_flows;
0067 struct list_head old_flows;
0068 };
0069
0070 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
0071 struct sk_buff *skb)
0072 {
0073 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
0074 }
0075
0076 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
0077 int *qerr)
0078 {
0079 struct fq_codel_sched_data *q = qdisc_priv(sch);
0080 struct tcf_proto *filter;
0081 struct tcf_result res;
0082 int result;
0083
0084 if (TC_H_MAJ(skb->priority) == sch->handle &&
0085 TC_H_MIN(skb->priority) > 0 &&
0086 TC_H_MIN(skb->priority) <= q->flows_cnt)
0087 return TC_H_MIN(skb->priority);
0088
0089 filter = rcu_dereference_bh(q->filter_list);
0090 if (!filter)
0091 return fq_codel_hash(q, skb) + 1;
0092
0093 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
0094 result = tcf_classify(skb, NULL, filter, &res, false);
0095 if (result >= 0) {
0096 #ifdef CONFIG_NET_CLS_ACT
0097 switch (result) {
0098 case TC_ACT_STOLEN:
0099 case TC_ACT_QUEUED:
0100 case TC_ACT_TRAP:
0101 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
0102 fallthrough;
0103 case TC_ACT_SHOT:
0104 return 0;
0105 }
0106 #endif
0107 if (TC_H_MIN(res.classid) <= q->flows_cnt)
0108 return TC_H_MIN(res.classid);
0109 }
0110 return 0;
0111 }
0112
0113
0114
0115
0116 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
0117 {
0118 struct sk_buff *skb = flow->head;
0119
0120 flow->head = skb->next;
0121 skb_mark_not_on_list(skb);
0122 return skb;
0123 }
0124
0125
0126 static inline void flow_queue_add(struct fq_codel_flow *flow,
0127 struct sk_buff *skb)
0128 {
0129 if (flow->head == NULL)
0130 flow->head = skb;
0131 else
0132 flow->tail->next = skb;
0133 flow->tail = skb;
0134 skb->next = NULL;
0135 }
0136
0137 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
0138 struct sk_buff **to_free)
0139 {
0140 struct fq_codel_sched_data *q = qdisc_priv(sch);
0141 struct sk_buff *skb;
0142 unsigned int maxbacklog = 0, idx = 0, i, len;
0143 struct fq_codel_flow *flow;
0144 unsigned int threshold;
0145 unsigned int mem = 0;
0146
0147
0148
0149
0150
0151
0152
0153
0154 for (i = 0; i < q->flows_cnt; i++) {
0155 if (q->backlogs[i] > maxbacklog) {
0156 maxbacklog = q->backlogs[i];
0157 idx = i;
0158 }
0159 }
0160
0161
0162 threshold = maxbacklog >> 1;
0163
0164 flow = &q->flows[idx];
0165 len = 0;
0166 i = 0;
0167 do {
0168 skb = dequeue_head(flow);
0169 len += qdisc_pkt_len(skb);
0170 mem += get_codel_cb(skb)->mem_usage;
0171 __qdisc_drop(skb, to_free);
0172 } while (++i < max_packets && len < threshold);
0173
0174
0175 flow->cvars.count += i;
0176 q->backlogs[idx] -= len;
0177 q->memory_usage -= mem;
0178 sch->qstats.drops += i;
0179 sch->qstats.backlog -= len;
0180 sch->q.qlen -= i;
0181 return idx;
0182 }
0183
0184 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
0185 struct sk_buff **to_free)
0186 {
0187 struct fq_codel_sched_data *q = qdisc_priv(sch);
0188 unsigned int idx, prev_backlog, prev_qlen;
0189 struct fq_codel_flow *flow;
0190 int ret;
0191 unsigned int pkt_len;
0192 bool memory_limited;
0193
0194 idx = fq_codel_classify(skb, sch, &ret);
0195 if (idx == 0) {
0196 if (ret & __NET_XMIT_BYPASS)
0197 qdisc_qstats_drop(sch);
0198 __qdisc_drop(skb, to_free);
0199 return ret;
0200 }
0201 idx--;
0202
0203 codel_set_enqueue_time(skb);
0204 flow = &q->flows[idx];
0205 flow_queue_add(flow, skb);
0206 q->backlogs[idx] += qdisc_pkt_len(skb);
0207 qdisc_qstats_backlog_inc(sch, skb);
0208
0209 if (list_empty(&flow->flowchain)) {
0210 list_add_tail(&flow->flowchain, &q->new_flows);
0211 q->new_flow_count++;
0212 flow->deficit = q->quantum;
0213 }
0214 get_codel_cb(skb)->mem_usage = skb->truesize;
0215 q->memory_usage += get_codel_cb(skb)->mem_usage;
0216 memory_limited = q->memory_usage > q->memory_limit;
0217 if (++sch->q.qlen <= sch->limit && !memory_limited)
0218 return NET_XMIT_SUCCESS;
0219
0220 prev_backlog = sch->qstats.backlog;
0221 prev_qlen = sch->q.qlen;
0222
0223
0224 pkt_len = qdisc_pkt_len(skb);
0225
0226
0227
0228
0229
0230 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
0231
0232 prev_qlen -= sch->q.qlen;
0233 prev_backlog -= sch->qstats.backlog;
0234 q->drop_overlimit += prev_qlen;
0235 if (memory_limited)
0236 q->drop_overmemory += prev_qlen;
0237
0238
0239
0240
0241
0242 if (ret == idx) {
0243 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
0244 prev_backlog - pkt_len);
0245 return NET_XMIT_CN;
0246 }
0247 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
0248 return NET_XMIT_SUCCESS;
0249 }
0250
0251
0252
0253
0254
0255 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
0256 {
0257 struct Qdisc *sch = ctx;
0258 struct fq_codel_sched_data *q = qdisc_priv(sch);
0259 struct fq_codel_flow *flow;
0260 struct sk_buff *skb = NULL;
0261
0262 flow = container_of(vars, struct fq_codel_flow, cvars);
0263 if (flow->head) {
0264 skb = dequeue_head(flow);
0265 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
0266 q->memory_usage -= get_codel_cb(skb)->mem_usage;
0267 sch->q.qlen--;
0268 sch->qstats.backlog -= qdisc_pkt_len(skb);
0269 }
0270 return skb;
0271 }
0272
0273 static void drop_func(struct sk_buff *skb, void *ctx)
0274 {
0275 struct Qdisc *sch = ctx;
0276
0277 kfree_skb(skb);
0278 qdisc_qstats_drop(sch);
0279 }
0280
0281 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
0282 {
0283 struct fq_codel_sched_data *q = qdisc_priv(sch);
0284 struct sk_buff *skb;
0285 struct fq_codel_flow *flow;
0286 struct list_head *head;
0287
0288 begin:
0289 head = &q->new_flows;
0290 if (list_empty(head)) {
0291 head = &q->old_flows;
0292 if (list_empty(head))
0293 return NULL;
0294 }
0295 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
0296
0297 if (flow->deficit <= 0) {
0298 flow->deficit += q->quantum;
0299 list_move_tail(&flow->flowchain, &q->old_flows);
0300 goto begin;
0301 }
0302
0303 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
0304 &flow->cvars, &q->cstats, qdisc_pkt_len,
0305 codel_get_enqueue_time, drop_func, dequeue_func);
0306
0307 if (!skb) {
0308
0309 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
0310 list_move_tail(&flow->flowchain, &q->old_flows);
0311 else
0312 list_del_init(&flow->flowchain);
0313 goto begin;
0314 }
0315 qdisc_bstats_update(sch, skb);
0316 flow->deficit -= qdisc_pkt_len(skb);
0317
0318
0319
0320 if (q->cstats.drop_count && sch->q.qlen) {
0321 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
0322 q->cstats.drop_len);
0323 q->cstats.drop_count = 0;
0324 q->cstats.drop_len = 0;
0325 }
0326 return skb;
0327 }
0328
0329 static void fq_codel_flow_purge(struct fq_codel_flow *flow)
0330 {
0331 rtnl_kfree_skbs(flow->head, flow->tail);
0332 flow->head = NULL;
0333 }
0334
0335 static void fq_codel_reset(struct Qdisc *sch)
0336 {
0337 struct fq_codel_sched_data *q = qdisc_priv(sch);
0338 int i;
0339
0340 INIT_LIST_HEAD(&q->new_flows);
0341 INIT_LIST_HEAD(&q->old_flows);
0342 for (i = 0; i < q->flows_cnt; i++) {
0343 struct fq_codel_flow *flow = q->flows + i;
0344
0345 fq_codel_flow_purge(flow);
0346 INIT_LIST_HEAD(&flow->flowchain);
0347 codel_vars_init(&flow->cvars);
0348 }
0349 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
0350 sch->q.qlen = 0;
0351 sch->qstats.backlog = 0;
0352 q->memory_usage = 0;
0353 }
0354
0355 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
0356 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
0357 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
0358 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
0359 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
0360 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
0361 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
0362 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
0363 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
0364 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
0365 [TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR] = { .type = NLA_U8 },
0366 [TCA_FQ_CODEL_CE_THRESHOLD_MASK] = { .type = NLA_U8 },
0367 };
0368
0369 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
0370 struct netlink_ext_ack *extack)
0371 {
0372 struct fq_codel_sched_data *q = qdisc_priv(sch);
0373 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
0374 u32 quantum = 0;
0375 int err;
0376
0377 if (!opt)
0378 return -EINVAL;
0379
0380 err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
0381 fq_codel_policy, NULL);
0382 if (err < 0)
0383 return err;
0384 if (tb[TCA_FQ_CODEL_FLOWS]) {
0385 if (q->flows)
0386 return -EINVAL;
0387 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
0388 if (!q->flows_cnt ||
0389 q->flows_cnt > 65536)
0390 return -EINVAL;
0391 }
0392 if (tb[TCA_FQ_CODEL_QUANTUM]) {
0393 quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
0394 if (quantum > FQ_CODEL_QUANTUM_MAX) {
0395 NL_SET_ERR_MSG(extack, "Invalid quantum");
0396 return -EINVAL;
0397 }
0398 }
0399 sch_tree_lock(sch);
0400
0401 if (tb[TCA_FQ_CODEL_TARGET]) {
0402 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
0403
0404 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
0405 }
0406
0407 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
0408 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
0409
0410 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
0411 }
0412
0413 if (tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR])
0414 q->cparams.ce_threshold_selector = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR]);
0415 if (tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK])
0416 q->cparams.ce_threshold_mask = nla_get_u8(tb[TCA_FQ_CODEL_CE_THRESHOLD_MASK]);
0417
0418 if (tb[TCA_FQ_CODEL_INTERVAL]) {
0419 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
0420
0421 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
0422 }
0423
0424 if (tb[TCA_FQ_CODEL_LIMIT])
0425 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
0426
0427 if (tb[TCA_FQ_CODEL_ECN])
0428 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
0429
0430 if (quantum)
0431 q->quantum = quantum;
0432
0433 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
0434 q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
0435
0436 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
0437 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
0438
0439 while (sch->q.qlen > sch->limit ||
0440 q->memory_usage > q->memory_limit) {
0441 struct sk_buff *skb = fq_codel_dequeue(sch);
0442
0443 q->cstats.drop_len += qdisc_pkt_len(skb);
0444 rtnl_kfree_skbs(skb, skb);
0445 q->cstats.drop_count++;
0446 }
0447 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
0448 q->cstats.drop_count = 0;
0449 q->cstats.drop_len = 0;
0450
0451 sch_tree_unlock(sch);
0452 return 0;
0453 }
0454
0455 static void fq_codel_destroy(struct Qdisc *sch)
0456 {
0457 struct fq_codel_sched_data *q = qdisc_priv(sch);
0458
0459 tcf_block_put(q->block);
0460 kvfree(q->backlogs);
0461 kvfree(q->flows);
0462 }
0463
0464 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
0465 struct netlink_ext_ack *extack)
0466 {
0467 struct fq_codel_sched_data *q = qdisc_priv(sch);
0468 int i;
0469 int err;
0470
0471 sch->limit = 10*1024;
0472 q->flows_cnt = 1024;
0473 q->memory_limit = 32 << 20;
0474 q->drop_batch_size = 64;
0475 q->quantum = psched_mtu(qdisc_dev(sch));
0476 INIT_LIST_HEAD(&q->new_flows);
0477 INIT_LIST_HEAD(&q->old_flows);
0478 codel_params_init(&q->cparams);
0479 codel_stats_init(&q->cstats);
0480 q->cparams.ecn = true;
0481 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
0482
0483 if (opt) {
0484 err = fq_codel_change(sch, opt, extack);
0485 if (err)
0486 goto init_failure;
0487 }
0488
0489 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
0490 if (err)
0491 goto init_failure;
0492
0493 if (!q->flows) {
0494 q->flows = kvcalloc(q->flows_cnt,
0495 sizeof(struct fq_codel_flow),
0496 GFP_KERNEL);
0497 if (!q->flows) {
0498 err = -ENOMEM;
0499 goto init_failure;
0500 }
0501 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
0502 if (!q->backlogs) {
0503 err = -ENOMEM;
0504 goto alloc_failure;
0505 }
0506 for (i = 0; i < q->flows_cnt; i++) {
0507 struct fq_codel_flow *flow = q->flows + i;
0508
0509 INIT_LIST_HEAD(&flow->flowchain);
0510 codel_vars_init(&flow->cvars);
0511 }
0512 }
0513 if (sch->limit >= 1)
0514 sch->flags |= TCQ_F_CAN_BYPASS;
0515 else
0516 sch->flags &= ~TCQ_F_CAN_BYPASS;
0517 return 0;
0518
0519 alloc_failure:
0520 kvfree(q->flows);
0521 q->flows = NULL;
0522 init_failure:
0523 q->flows_cnt = 0;
0524 return err;
0525 }
0526
0527 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
0528 {
0529 struct fq_codel_sched_data *q = qdisc_priv(sch);
0530 struct nlattr *opts;
0531
0532 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
0533 if (opts == NULL)
0534 goto nla_put_failure;
0535
0536 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
0537 codel_time_to_us(q->cparams.target)) ||
0538 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
0539 sch->limit) ||
0540 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
0541 codel_time_to_us(q->cparams.interval)) ||
0542 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
0543 q->cparams.ecn) ||
0544 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
0545 q->quantum) ||
0546 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
0547 q->drop_batch_size) ||
0548 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
0549 q->memory_limit) ||
0550 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
0551 q->flows_cnt))
0552 goto nla_put_failure;
0553
0554 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD) {
0555 if (nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
0556 codel_time_to_us(q->cparams.ce_threshold)))
0557 goto nla_put_failure;
0558 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_SELECTOR, q->cparams.ce_threshold_selector))
0559 goto nla_put_failure;
0560 if (nla_put_u8(skb, TCA_FQ_CODEL_CE_THRESHOLD_MASK, q->cparams.ce_threshold_mask))
0561 goto nla_put_failure;
0562 }
0563
0564 return nla_nest_end(skb, opts);
0565
0566 nla_put_failure:
0567 return -1;
0568 }
0569
0570 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
0571 {
0572 struct fq_codel_sched_data *q = qdisc_priv(sch);
0573 struct tc_fq_codel_xstats st = {
0574 .type = TCA_FQ_CODEL_XSTATS_QDISC,
0575 };
0576 struct list_head *pos;
0577
0578 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
0579 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
0580 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
0581 st.qdisc_stats.new_flow_count = q->new_flow_count;
0582 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
0583 st.qdisc_stats.memory_usage = q->memory_usage;
0584 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
0585
0586 sch_tree_lock(sch);
0587 list_for_each(pos, &q->new_flows)
0588 st.qdisc_stats.new_flows_len++;
0589
0590 list_for_each(pos, &q->old_flows)
0591 st.qdisc_stats.old_flows_len++;
0592 sch_tree_unlock(sch);
0593
0594 return gnet_stats_copy_app(d, &st, sizeof(st));
0595 }
0596
0597 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
0598 {
0599 return NULL;
0600 }
0601
0602 static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
0603 {
0604 return 0;
0605 }
0606
0607 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
0608 u32 classid)
0609 {
0610 return 0;
0611 }
0612
0613 static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
0614 {
0615 }
0616
0617 static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
0618 struct netlink_ext_ack *extack)
0619 {
0620 struct fq_codel_sched_data *q = qdisc_priv(sch);
0621
0622 if (cl)
0623 return NULL;
0624 return q->block;
0625 }
0626
0627 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
0628 struct sk_buff *skb, struct tcmsg *tcm)
0629 {
0630 tcm->tcm_handle |= TC_H_MIN(cl);
0631 return 0;
0632 }
0633
0634 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
0635 struct gnet_dump *d)
0636 {
0637 struct fq_codel_sched_data *q = qdisc_priv(sch);
0638 u32 idx = cl - 1;
0639 struct gnet_stats_queue qs = { 0 };
0640 struct tc_fq_codel_xstats xstats;
0641
0642 if (idx < q->flows_cnt) {
0643 const struct fq_codel_flow *flow = &q->flows[idx];
0644 const struct sk_buff *skb;
0645
0646 memset(&xstats, 0, sizeof(xstats));
0647 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
0648 xstats.class_stats.deficit = flow->deficit;
0649 xstats.class_stats.ldelay =
0650 codel_time_to_us(flow->cvars.ldelay);
0651 xstats.class_stats.count = flow->cvars.count;
0652 xstats.class_stats.lastcount = flow->cvars.lastcount;
0653 xstats.class_stats.dropping = flow->cvars.dropping;
0654 if (flow->cvars.dropping) {
0655 codel_tdiff_t delta = flow->cvars.drop_next -
0656 codel_get_time();
0657
0658 xstats.class_stats.drop_next = (delta >= 0) ?
0659 codel_time_to_us(delta) :
0660 -codel_time_to_us(-delta);
0661 }
0662 if (flow->head) {
0663 sch_tree_lock(sch);
0664 skb = flow->head;
0665 while (skb) {
0666 qs.qlen++;
0667 skb = skb->next;
0668 }
0669 sch_tree_unlock(sch);
0670 }
0671 qs.backlog = q->backlogs[idx];
0672 qs.drops = 0;
0673 }
0674 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
0675 return -1;
0676 if (idx < q->flows_cnt)
0677 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
0678 return 0;
0679 }
0680
0681 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
0682 {
0683 struct fq_codel_sched_data *q = qdisc_priv(sch);
0684 unsigned int i;
0685
0686 if (arg->stop)
0687 return;
0688
0689 for (i = 0; i < q->flows_cnt; i++) {
0690 if (list_empty(&q->flows[i].flowchain) ||
0691 arg->count < arg->skip) {
0692 arg->count++;
0693 continue;
0694 }
0695 if (arg->fn(sch, i + 1, arg) < 0) {
0696 arg->stop = 1;
0697 break;
0698 }
0699 arg->count++;
0700 }
0701 }
0702
0703 static const struct Qdisc_class_ops fq_codel_class_ops = {
0704 .leaf = fq_codel_leaf,
0705 .find = fq_codel_find,
0706 .tcf_block = fq_codel_tcf_block,
0707 .bind_tcf = fq_codel_bind,
0708 .unbind_tcf = fq_codel_unbind,
0709 .dump = fq_codel_dump_class,
0710 .dump_stats = fq_codel_dump_class_stats,
0711 .walk = fq_codel_walk,
0712 };
0713
0714 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
0715 .cl_ops = &fq_codel_class_ops,
0716 .id = "fq_codel",
0717 .priv_size = sizeof(struct fq_codel_sched_data),
0718 .enqueue = fq_codel_enqueue,
0719 .dequeue = fq_codel_dequeue,
0720 .peek = qdisc_peek_dequeued,
0721 .init = fq_codel_init,
0722 .reset = fq_codel_reset,
0723 .destroy = fq_codel_destroy,
0724 .change = fq_codel_change,
0725 .dump = fq_codel_dump,
0726 .dump_stats = fq_codel_dump_stats,
0727 .owner = THIS_MODULE,
0728 };
0729
0730 static int __init fq_codel_module_init(void)
0731 {
0732 return register_qdisc(&fq_codel_qdisc_ops);
0733 }
0734
0735 static void __exit fq_codel_module_exit(void)
0736 {
0737 unregister_qdisc(&fq_codel_qdisc_ops);
0738 }
0739
0740 module_init(fq_codel_module_init)
0741 module_exit(fq_codel_module_exit)
0742 MODULE_AUTHOR("Eric Dumazet");
0743 MODULE_LICENSE("GPL");
0744 MODULE_DESCRIPTION("Fair Queue CoDel discipline");