0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 #include <linux/kernel.h>
0053 #include <linux/module.h>
0054 #include <linux/types.h>
0055 #include <linux/errno.h>
0056 #include <linux/compiler.h>
0057 #include <linux/spinlock.h>
0058 #include <linux/skbuff.h>
0059 #include <linux/string.h>
0060 #include <linux/slab.h>
0061 #include <linux/list.h>
0062 #include <linux/rbtree.h>
0063 #include <linux/init.h>
0064 #include <linux/rtnetlink.h>
0065 #include <linux/pkt_sched.h>
0066 #include <net/netlink.h>
0067 #include <net/pkt_sched.h>
0068 #include <net/pkt_cls.h>
0069 #include <asm/div64.h>
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084 struct internal_sc {
0085 u64 sm1;
0086 u64 ism1;
0087 u64 dx;
0088 u64 dy;
0089 u64 sm2;
0090 u64 ism2;
0091 };
0092
0093
0094 struct runtime_sc {
0095 u64 x;
0096 u64 y;
0097 u64 sm1;
0098 u64 ism1;
0099 u64 dx;
0100 u64 dy;
0101 u64 sm2;
0102 u64 ism2;
0103 };
0104
0105 enum hfsc_class_flags {
0106 HFSC_RSC = 0x1,
0107 HFSC_FSC = 0x2,
0108 HFSC_USC = 0x4
0109 };
0110
0111 struct hfsc_class {
0112 struct Qdisc_class_common cl_common;
0113
0114 struct gnet_stats_basic_sync bstats;
0115 struct gnet_stats_queue qstats;
0116 struct net_rate_estimator __rcu *rate_est;
0117 struct tcf_proto __rcu *filter_list;
0118 struct tcf_block *block;
0119 unsigned int filter_cnt;
0120 unsigned int level;
0121
0122 struct hfsc_sched *sched;
0123 struct hfsc_class *cl_parent;
0124 struct list_head siblings;
0125 struct list_head children;
0126 struct Qdisc *qdisc;
0127
0128 struct rb_node el_node;
0129 struct rb_root vt_tree;
0130 struct rb_node vt_node;
0131 struct rb_root cf_tree;
0132 struct rb_node cf_node;
0133
0134 u64 cl_total;
0135 u64 cl_cumul;
0136
0137
0138 u64 cl_d;
0139 u64 cl_e;
0140 u64 cl_vt;
0141 u64 cl_f;
0142
0143 u64 cl_myf;
0144
0145 u64 cl_cfmin;
0146
0147 u64 cl_cvtmin;
0148
0149
0150 u64 cl_vtadj;
0151
0152 u64 cl_cvtoff;
0153
0154
0155 struct internal_sc cl_rsc;
0156 struct internal_sc cl_fsc;
0157 struct internal_sc cl_usc;
0158 struct runtime_sc cl_deadline;
0159 struct runtime_sc cl_eligible;
0160 struct runtime_sc cl_virtual;
0161 struct runtime_sc cl_ulimit;
0162
0163 u8 cl_flags;
0164 u32 cl_vtperiod;
0165 u32 cl_parentperiod;
0166 u32 cl_nactive;
0167 };
0168
0169 struct hfsc_sched {
0170 u16 defcls;
0171 struct hfsc_class root;
0172 struct Qdisc_class_hash clhash;
0173 struct rb_root eligible;
0174 struct qdisc_watchdog watchdog;
0175 };
0176
0177 #define HT_INFINITY 0xffffffffffffffffULL
0178
0179
0180
0181
0182
0183
0184
0185 static void
0186 eltree_insert(struct hfsc_class *cl)
0187 {
0188 struct rb_node **p = &cl->sched->eligible.rb_node;
0189 struct rb_node *parent = NULL;
0190 struct hfsc_class *cl1;
0191
0192 while (*p != NULL) {
0193 parent = *p;
0194 cl1 = rb_entry(parent, struct hfsc_class, el_node);
0195 if (cl->cl_e >= cl1->cl_e)
0196 p = &parent->rb_right;
0197 else
0198 p = &parent->rb_left;
0199 }
0200 rb_link_node(&cl->el_node, parent, p);
0201 rb_insert_color(&cl->el_node, &cl->sched->eligible);
0202 }
0203
0204 static inline void
0205 eltree_remove(struct hfsc_class *cl)
0206 {
0207 rb_erase(&cl->el_node, &cl->sched->eligible);
0208 }
0209
0210 static inline void
0211 eltree_update(struct hfsc_class *cl)
0212 {
0213 eltree_remove(cl);
0214 eltree_insert(cl);
0215 }
0216
0217
0218 static inline struct hfsc_class *
0219 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time)
0220 {
0221 struct hfsc_class *p, *cl = NULL;
0222 struct rb_node *n;
0223
0224 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) {
0225 p = rb_entry(n, struct hfsc_class, el_node);
0226 if (p->cl_e > cur_time)
0227 break;
0228 if (cl == NULL || p->cl_d < cl->cl_d)
0229 cl = p;
0230 }
0231 return cl;
0232 }
0233
0234
0235 static inline struct hfsc_class *
0236 eltree_get_minel(struct hfsc_sched *q)
0237 {
0238 struct rb_node *n;
0239
0240 n = rb_first(&q->eligible);
0241 if (n == NULL)
0242 return NULL;
0243 return rb_entry(n, struct hfsc_class, el_node);
0244 }
0245
0246
0247
0248
0249
0250 static void
0251 vttree_insert(struct hfsc_class *cl)
0252 {
0253 struct rb_node **p = &cl->cl_parent->vt_tree.rb_node;
0254 struct rb_node *parent = NULL;
0255 struct hfsc_class *cl1;
0256
0257 while (*p != NULL) {
0258 parent = *p;
0259 cl1 = rb_entry(parent, struct hfsc_class, vt_node);
0260 if (cl->cl_vt >= cl1->cl_vt)
0261 p = &parent->rb_right;
0262 else
0263 p = &parent->rb_left;
0264 }
0265 rb_link_node(&cl->vt_node, parent, p);
0266 rb_insert_color(&cl->vt_node, &cl->cl_parent->vt_tree);
0267 }
0268
0269 static inline void
0270 vttree_remove(struct hfsc_class *cl)
0271 {
0272 rb_erase(&cl->vt_node, &cl->cl_parent->vt_tree);
0273 }
0274
0275 static inline void
0276 vttree_update(struct hfsc_class *cl)
0277 {
0278 vttree_remove(cl);
0279 vttree_insert(cl);
0280 }
0281
0282 static inline struct hfsc_class *
0283 vttree_firstfit(struct hfsc_class *cl, u64 cur_time)
0284 {
0285 struct hfsc_class *p;
0286 struct rb_node *n;
0287
0288 for (n = rb_first(&cl->vt_tree); n != NULL; n = rb_next(n)) {
0289 p = rb_entry(n, struct hfsc_class, vt_node);
0290 if (p->cl_f <= cur_time)
0291 return p;
0292 }
0293 return NULL;
0294 }
0295
0296
0297
0298
0299 static struct hfsc_class *
0300 vttree_get_minvt(struct hfsc_class *cl, u64 cur_time)
0301 {
0302
0303 if (cl->cl_cfmin > cur_time)
0304 return NULL;
0305
0306 while (cl->level > 0) {
0307 cl = vttree_firstfit(cl, cur_time);
0308 if (cl == NULL)
0309 return NULL;
0310
0311
0312
0313 if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
0314 cl->cl_parent->cl_cvtmin = cl->cl_vt;
0315 }
0316 return cl;
0317 }
0318
0319 static void
0320 cftree_insert(struct hfsc_class *cl)
0321 {
0322 struct rb_node **p = &cl->cl_parent->cf_tree.rb_node;
0323 struct rb_node *parent = NULL;
0324 struct hfsc_class *cl1;
0325
0326 while (*p != NULL) {
0327 parent = *p;
0328 cl1 = rb_entry(parent, struct hfsc_class, cf_node);
0329 if (cl->cl_f >= cl1->cl_f)
0330 p = &parent->rb_right;
0331 else
0332 p = &parent->rb_left;
0333 }
0334 rb_link_node(&cl->cf_node, parent, p);
0335 rb_insert_color(&cl->cf_node, &cl->cl_parent->cf_tree);
0336 }
0337
0338 static inline void
0339 cftree_remove(struct hfsc_class *cl)
0340 {
0341 rb_erase(&cl->cf_node, &cl->cl_parent->cf_tree);
0342 }
0343
0344 static inline void
0345 cftree_update(struct hfsc_class *cl)
0346 {
0347 cftree_remove(cl);
0348 cftree_insert(cl);
0349 }
0350
0351
0352
0353
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367
0368
0369
0370
0371
0372
0373
0374
0375
0376 #define SM_SHIFT (30 - PSCHED_SHIFT)
0377 #define ISM_SHIFT (8 + PSCHED_SHIFT)
0378
0379 #define SM_MASK ((1ULL << SM_SHIFT) - 1)
0380 #define ISM_MASK ((1ULL << ISM_SHIFT) - 1)
0381
0382 static inline u64
0383 seg_x2y(u64 x, u64 sm)
0384 {
0385 u64 y;
0386
0387
0388
0389
0390
0391
0392 y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
0393 return y;
0394 }
0395
0396 static inline u64
0397 seg_y2x(u64 y, u64 ism)
0398 {
0399 u64 x;
0400
0401 if (y == 0)
0402 x = 0;
0403 else if (ism == HT_INFINITY)
0404 x = HT_INFINITY;
0405 else {
0406 x = (y >> ISM_SHIFT) * ism
0407 + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
0408 }
0409 return x;
0410 }
0411
0412
0413 static u64
0414 m2sm(u32 m)
0415 {
0416 u64 sm;
0417
0418 sm = ((u64)m << SM_SHIFT);
0419 sm += PSCHED_TICKS_PER_SEC - 1;
0420 do_div(sm, PSCHED_TICKS_PER_SEC);
0421 return sm;
0422 }
0423
0424
0425 static u64
0426 m2ism(u32 m)
0427 {
0428 u64 ism;
0429
0430 if (m == 0)
0431 ism = HT_INFINITY;
0432 else {
0433 ism = ((u64)PSCHED_TICKS_PER_SEC << ISM_SHIFT);
0434 ism += m - 1;
0435 do_div(ism, m);
0436 }
0437 return ism;
0438 }
0439
0440
0441 static u64
0442 d2dx(u32 d)
0443 {
0444 u64 dx;
0445
0446 dx = ((u64)d * PSCHED_TICKS_PER_SEC);
0447 dx += USEC_PER_SEC - 1;
0448 do_div(dx, USEC_PER_SEC);
0449 return dx;
0450 }
0451
0452
0453 static u32
0454 sm2m(u64 sm)
0455 {
0456 u64 m;
0457
0458 m = (sm * PSCHED_TICKS_PER_SEC) >> SM_SHIFT;
0459 return (u32)m;
0460 }
0461
0462
0463 static u32
0464 dx2d(u64 dx)
0465 {
0466 u64 d;
0467
0468 d = dx * USEC_PER_SEC;
0469 do_div(d, PSCHED_TICKS_PER_SEC);
0470 return (u32)d;
0471 }
0472
0473 static void
0474 sc2isc(struct tc_service_curve *sc, struct internal_sc *isc)
0475 {
0476 isc->sm1 = m2sm(sc->m1);
0477 isc->ism1 = m2ism(sc->m1);
0478 isc->dx = d2dx(sc->d);
0479 isc->dy = seg_x2y(isc->dx, isc->sm1);
0480 isc->sm2 = m2sm(sc->m2);
0481 isc->ism2 = m2ism(sc->m2);
0482 }
0483
0484
0485
0486
0487
0488 static void
0489 rtsc_init(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
0490 {
0491 rtsc->x = x;
0492 rtsc->y = y;
0493 rtsc->sm1 = isc->sm1;
0494 rtsc->ism1 = isc->ism1;
0495 rtsc->dx = isc->dx;
0496 rtsc->dy = isc->dy;
0497 rtsc->sm2 = isc->sm2;
0498 rtsc->ism2 = isc->ism2;
0499 }
0500
0501
0502
0503
0504
0505 static u64
0506 rtsc_y2x(struct runtime_sc *rtsc, u64 y)
0507 {
0508 u64 x;
0509
0510 if (y < rtsc->y)
0511 x = rtsc->x;
0512 else if (y <= rtsc->y + rtsc->dy) {
0513
0514 if (rtsc->dy == 0)
0515 x = rtsc->x + rtsc->dx;
0516 else
0517 x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
0518 } else {
0519
0520 x = rtsc->x + rtsc->dx
0521 + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
0522 }
0523 return x;
0524 }
0525
0526 static u64
0527 rtsc_x2y(struct runtime_sc *rtsc, u64 x)
0528 {
0529 u64 y;
0530
0531 if (x <= rtsc->x)
0532 y = rtsc->y;
0533 else if (x <= rtsc->x + rtsc->dx)
0534
0535 y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
0536 else
0537
0538 y = rtsc->y + rtsc->dy
0539 + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
0540 return y;
0541 }
0542
0543
0544
0545
0546
0547 static void
0548 rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u64 x, u64 y)
0549 {
0550 u64 y1, y2, dx, dy;
0551 u32 dsm;
0552
0553 if (isc->sm1 <= isc->sm2) {
0554
0555 y1 = rtsc_x2y(rtsc, x);
0556 if (y1 < y)
0557
0558 return;
0559 rtsc->x = x;
0560 rtsc->y = y;
0561 return;
0562 }
0563
0564
0565
0566
0567
0568
0569
0570 y1 = rtsc_x2y(rtsc, x);
0571 if (y1 <= y) {
0572
0573 return;
0574 }
0575
0576 y2 = rtsc_x2y(rtsc, x + isc->dx);
0577 if (y2 >= y + isc->dy) {
0578
0579 rtsc->x = x;
0580 rtsc->y = y;
0581 rtsc->dx = isc->dx;
0582 rtsc->dy = isc->dy;
0583 return;
0584 }
0585
0586
0587
0588
0589
0590
0591
0592 dx = (y1 - y) << SM_SHIFT;
0593 dsm = isc->sm1 - isc->sm2;
0594 do_div(dx, dsm);
0595
0596
0597
0598
0599 if (rtsc->x + rtsc->dx > x)
0600 dx += rtsc->x + rtsc->dx - x;
0601 dy = seg_x2y(dx, isc->sm1);
0602
0603 rtsc->x = x;
0604 rtsc->y = y;
0605 rtsc->dx = dx;
0606 rtsc->dy = dy;
0607 }
0608
0609 static void
0610 init_ed(struct hfsc_class *cl, unsigned int next_len)
0611 {
0612 u64 cur_time = psched_get_time();
0613
0614
0615 rtsc_min(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
0616
0617
0618
0619
0620
0621
0622 cl->cl_eligible = cl->cl_deadline;
0623 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
0624 cl->cl_eligible.dx = 0;
0625 cl->cl_eligible.dy = 0;
0626 }
0627
0628
0629 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
0630 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
0631
0632 eltree_insert(cl);
0633 }
0634
0635 static void
0636 update_ed(struct hfsc_class *cl, unsigned int next_len)
0637 {
0638 cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
0639 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
0640
0641 eltree_update(cl);
0642 }
0643
0644 static inline void
0645 update_d(struct hfsc_class *cl, unsigned int next_len)
0646 {
0647 cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
0648 }
0649
0650 static inline void
0651 update_cfmin(struct hfsc_class *cl)
0652 {
0653 struct rb_node *n = rb_first(&cl->cf_tree);
0654 struct hfsc_class *p;
0655
0656 if (n == NULL) {
0657 cl->cl_cfmin = 0;
0658 return;
0659 }
0660 p = rb_entry(n, struct hfsc_class, cf_node);
0661 cl->cl_cfmin = p->cl_f;
0662 }
0663
0664 static void
0665 init_vf(struct hfsc_class *cl, unsigned int len)
0666 {
0667 struct hfsc_class *max_cl;
0668 struct rb_node *n;
0669 u64 vt, f, cur_time;
0670 int go_active;
0671
0672 cur_time = 0;
0673 go_active = 1;
0674 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
0675 if (go_active && cl->cl_nactive++ == 0)
0676 go_active = 1;
0677 else
0678 go_active = 0;
0679
0680 if (go_active) {
0681 n = rb_last(&cl->cl_parent->vt_tree);
0682 if (n != NULL) {
0683 max_cl = rb_entry(n, struct hfsc_class, vt_node);
0684
0685
0686
0687
0688
0689 vt = max_cl->cl_vt;
0690 if (cl->cl_parent->cl_cvtmin != 0)
0691 vt = (cl->cl_parent->cl_cvtmin + vt)/2;
0692
0693 if (cl->cl_parent->cl_vtperiod !=
0694 cl->cl_parentperiod || vt > cl->cl_vt)
0695 cl->cl_vt = vt;
0696 } else {
0697
0698
0699
0700
0701
0702
0703 cl->cl_vt = cl->cl_parent->cl_cvtoff;
0704 cl->cl_parent->cl_cvtmin = 0;
0705 }
0706
0707
0708 rtsc_min(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
0709 cl->cl_vtadj = 0;
0710
0711 cl->cl_vtperiod++;
0712 cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
0713 if (cl->cl_parent->cl_nactive == 0)
0714 cl->cl_parentperiod++;
0715 cl->cl_f = 0;
0716
0717 vttree_insert(cl);
0718 cftree_insert(cl);
0719
0720 if (cl->cl_flags & HFSC_USC) {
0721
0722 if (cur_time == 0)
0723 cur_time = psched_get_time();
0724
0725
0726 rtsc_min(&cl->cl_ulimit, &cl->cl_usc, cur_time,
0727 cl->cl_total);
0728
0729 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
0730 cl->cl_total);
0731 }
0732 }
0733
0734 f = max(cl->cl_myf, cl->cl_cfmin);
0735 if (f != cl->cl_f) {
0736 cl->cl_f = f;
0737 cftree_update(cl);
0738 }
0739 update_cfmin(cl->cl_parent);
0740 }
0741 }
0742
0743 static void
0744 update_vf(struct hfsc_class *cl, unsigned int len, u64 cur_time)
0745 {
0746 u64 f;
0747 int go_passive = 0;
0748
0749 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC)
0750 go_passive = 1;
0751
0752 for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
0753 cl->cl_total += len;
0754
0755 if (!(cl->cl_flags & HFSC_FSC) || cl->cl_nactive == 0)
0756 continue;
0757
0758 if (go_passive && --cl->cl_nactive == 0)
0759 go_passive = 1;
0760 else
0761 go_passive = 0;
0762
0763
0764 cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total) + cl->cl_vtadj;
0765
0766
0767
0768
0769
0770
0771 if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
0772 cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
0773 cl->cl_vt = cl->cl_parent->cl_cvtmin;
0774 }
0775
0776 if (go_passive) {
0777
0778
0779
0780 if (cl->cl_vt > cl->cl_parent->cl_cvtoff)
0781 cl->cl_parent->cl_cvtoff = cl->cl_vt;
0782
0783
0784 vttree_remove(cl);
0785
0786 cftree_remove(cl);
0787 update_cfmin(cl->cl_parent);
0788
0789 continue;
0790 }
0791
0792
0793 vttree_update(cl);
0794
0795
0796 if (cl->cl_flags & HFSC_USC) {
0797 cl->cl_myf = rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
0798 #if 0
0799 cl->cl_myf = cl->cl_myfadj + rtsc_y2x(&cl->cl_ulimit,
0800 cl->cl_total);
0801
0802
0803
0804
0805
0806
0807
0808
0809
0810
0811
0812
0813 myf_bound = cur_time - PSCHED_JIFFIE2US(1);
0814 if (cl->cl_myf < myf_bound) {
0815 delta = cur_time - cl->cl_myf;
0816 cl->cl_myfadj += delta;
0817 cl->cl_myf += delta;
0818 }
0819 #endif
0820 }
0821
0822 f = max(cl->cl_myf, cl->cl_cfmin);
0823 if (f != cl->cl_f) {
0824 cl->cl_f = f;
0825 cftree_update(cl);
0826 update_cfmin(cl->cl_parent);
0827 }
0828 }
0829 }
0830
0831 static unsigned int
0832 qdisc_peek_len(struct Qdisc *sch)
0833 {
0834 struct sk_buff *skb;
0835 unsigned int len;
0836
0837 skb = sch->ops->peek(sch);
0838 if (unlikely(skb == NULL)) {
0839 qdisc_warn_nonwc("qdisc_peek_len", sch);
0840 return 0;
0841 }
0842 len = qdisc_pkt_len(skb);
0843
0844 return len;
0845 }
0846
0847 static void
0848 hfsc_adjust_levels(struct hfsc_class *cl)
0849 {
0850 struct hfsc_class *p;
0851 unsigned int level;
0852
0853 do {
0854 level = 0;
0855 list_for_each_entry(p, &cl->children, siblings) {
0856 if (p->level >= level)
0857 level = p->level + 1;
0858 }
0859 cl->level = level;
0860 } while ((cl = cl->cl_parent) != NULL);
0861 }
0862
0863 static inline struct hfsc_class *
0864 hfsc_find_class(u32 classid, struct Qdisc *sch)
0865 {
0866 struct hfsc_sched *q = qdisc_priv(sch);
0867 struct Qdisc_class_common *clc;
0868
0869 clc = qdisc_class_find(&q->clhash, classid);
0870 if (clc == NULL)
0871 return NULL;
0872 return container_of(clc, struct hfsc_class, cl_common);
0873 }
0874
0875 static void
0876 hfsc_change_rsc(struct hfsc_class *cl, struct tc_service_curve *rsc,
0877 u64 cur_time)
0878 {
0879 sc2isc(rsc, &cl->cl_rsc);
0880 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, cur_time, cl->cl_cumul);
0881 cl->cl_eligible = cl->cl_deadline;
0882 if (cl->cl_rsc.sm1 <= cl->cl_rsc.sm2) {
0883 cl->cl_eligible.dx = 0;
0884 cl->cl_eligible.dy = 0;
0885 }
0886 cl->cl_flags |= HFSC_RSC;
0887 }
0888
0889 static void
0890 hfsc_change_fsc(struct hfsc_class *cl, struct tc_service_curve *fsc)
0891 {
0892 sc2isc(fsc, &cl->cl_fsc);
0893 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, cl->cl_vt, cl->cl_total);
0894 cl->cl_flags |= HFSC_FSC;
0895 }
0896
0897 static void
0898 hfsc_change_usc(struct hfsc_class *cl, struct tc_service_curve *usc,
0899 u64 cur_time)
0900 {
0901 sc2isc(usc, &cl->cl_usc);
0902 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, cur_time, cl->cl_total);
0903 cl->cl_flags |= HFSC_USC;
0904 }
0905
0906 static const struct nla_policy hfsc_policy[TCA_HFSC_MAX + 1] = {
0907 [TCA_HFSC_RSC] = { .len = sizeof(struct tc_service_curve) },
0908 [TCA_HFSC_FSC] = { .len = sizeof(struct tc_service_curve) },
0909 [TCA_HFSC_USC] = { .len = sizeof(struct tc_service_curve) },
0910 };
0911
0912 static int
0913 hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
0914 struct nlattr **tca, unsigned long *arg,
0915 struct netlink_ext_ack *extack)
0916 {
0917 struct hfsc_sched *q = qdisc_priv(sch);
0918 struct hfsc_class *cl = (struct hfsc_class *)*arg;
0919 struct hfsc_class *parent = NULL;
0920 struct nlattr *opt = tca[TCA_OPTIONS];
0921 struct nlattr *tb[TCA_HFSC_MAX + 1];
0922 struct tc_service_curve *rsc = NULL, *fsc = NULL, *usc = NULL;
0923 u64 cur_time;
0924 int err;
0925
0926 if (opt == NULL)
0927 return -EINVAL;
0928
0929 err = nla_parse_nested_deprecated(tb, TCA_HFSC_MAX, opt, hfsc_policy,
0930 NULL);
0931 if (err < 0)
0932 return err;
0933
0934 if (tb[TCA_HFSC_RSC]) {
0935 rsc = nla_data(tb[TCA_HFSC_RSC]);
0936 if (rsc->m1 == 0 && rsc->m2 == 0)
0937 rsc = NULL;
0938 }
0939
0940 if (tb[TCA_HFSC_FSC]) {
0941 fsc = nla_data(tb[TCA_HFSC_FSC]);
0942 if (fsc->m1 == 0 && fsc->m2 == 0)
0943 fsc = NULL;
0944 }
0945
0946 if (tb[TCA_HFSC_USC]) {
0947 usc = nla_data(tb[TCA_HFSC_USC]);
0948 if (usc->m1 == 0 && usc->m2 == 0)
0949 usc = NULL;
0950 }
0951
0952 if (cl != NULL) {
0953 int old_flags;
0954
0955 if (parentid) {
0956 if (cl->cl_parent &&
0957 cl->cl_parent->cl_common.classid != parentid)
0958 return -EINVAL;
0959 if (cl->cl_parent == NULL && parentid != TC_H_ROOT)
0960 return -EINVAL;
0961 }
0962 cur_time = psched_get_time();
0963
0964 if (tca[TCA_RATE]) {
0965 err = gen_replace_estimator(&cl->bstats, NULL,
0966 &cl->rate_est,
0967 NULL,
0968 true,
0969 tca[TCA_RATE]);
0970 if (err)
0971 return err;
0972 }
0973
0974 sch_tree_lock(sch);
0975 old_flags = cl->cl_flags;
0976
0977 if (rsc != NULL)
0978 hfsc_change_rsc(cl, rsc, cur_time);
0979 if (fsc != NULL)
0980 hfsc_change_fsc(cl, fsc);
0981 if (usc != NULL)
0982 hfsc_change_usc(cl, usc, cur_time);
0983
0984 if (cl->qdisc->q.qlen != 0) {
0985 int len = qdisc_peek_len(cl->qdisc);
0986
0987 if (cl->cl_flags & HFSC_RSC) {
0988 if (old_flags & HFSC_RSC)
0989 update_ed(cl, len);
0990 else
0991 init_ed(cl, len);
0992 }
0993
0994 if (cl->cl_flags & HFSC_FSC) {
0995 if (old_flags & HFSC_FSC)
0996 update_vf(cl, 0, cur_time);
0997 else
0998 init_vf(cl, len);
0999 }
1000 }
1001 sch_tree_unlock(sch);
1002
1003 return 0;
1004 }
1005
1006 if (parentid == TC_H_ROOT)
1007 return -EEXIST;
1008
1009 parent = &q->root;
1010 if (parentid) {
1011 parent = hfsc_find_class(parentid, sch);
1012 if (parent == NULL)
1013 return -ENOENT;
1014 }
1015
1016 if (classid == 0 || TC_H_MAJ(classid ^ sch->handle) != 0)
1017 return -EINVAL;
1018 if (hfsc_find_class(classid, sch))
1019 return -EEXIST;
1020
1021 if (rsc == NULL && fsc == NULL)
1022 return -EINVAL;
1023
1024 cl = kzalloc(sizeof(struct hfsc_class), GFP_KERNEL);
1025 if (cl == NULL)
1026 return -ENOBUFS;
1027
1028 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
1029 if (err) {
1030 kfree(cl);
1031 return err;
1032 }
1033
1034 if (tca[TCA_RATE]) {
1035 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
1036 NULL, true, tca[TCA_RATE]);
1037 if (err) {
1038 tcf_block_put(cl->block);
1039 kfree(cl);
1040 return err;
1041 }
1042 }
1043
1044 if (rsc != NULL)
1045 hfsc_change_rsc(cl, rsc, 0);
1046 if (fsc != NULL)
1047 hfsc_change_fsc(cl, fsc);
1048 if (usc != NULL)
1049 hfsc_change_usc(cl, usc, 0);
1050
1051 cl->cl_common.classid = classid;
1052 cl->sched = q;
1053 cl->cl_parent = parent;
1054 cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1055 classid, NULL);
1056 if (cl->qdisc == NULL)
1057 cl->qdisc = &noop_qdisc;
1058 else
1059 qdisc_hash_add(cl->qdisc, true);
1060 INIT_LIST_HEAD(&cl->children);
1061 cl->vt_tree = RB_ROOT;
1062 cl->cf_tree = RB_ROOT;
1063
1064 sch_tree_lock(sch);
1065 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1066 list_add_tail(&cl->siblings, &parent->children);
1067 if (parent->level == 0)
1068 qdisc_purge_queue(parent->qdisc);
1069 hfsc_adjust_levels(parent);
1070 sch_tree_unlock(sch);
1071
1072 qdisc_class_hash_grow(sch, &q->clhash);
1073
1074 *arg = (unsigned long)cl;
1075 return 0;
1076 }
1077
1078 static void
1079 hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
1080 {
1081 struct hfsc_sched *q = qdisc_priv(sch);
1082
1083 tcf_block_put(cl->block);
1084 qdisc_put(cl->qdisc);
1085 gen_kill_estimator(&cl->rate_est);
1086 if (cl != &q->root)
1087 kfree(cl);
1088 }
1089
1090 static int
1091 hfsc_delete_class(struct Qdisc *sch, unsigned long arg,
1092 struct netlink_ext_ack *extack)
1093 {
1094 struct hfsc_sched *q = qdisc_priv(sch);
1095 struct hfsc_class *cl = (struct hfsc_class *)arg;
1096
1097 if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root)
1098 return -EBUSY;
1099
1100 sch_tree_lock(sch);
1101
1102 list_del(&cl->siblings);
1103 hfsc_adjust_levels(cl->cl_parent);
1104
1105 qdisc_purge_queue(cl->qdisc);
1106 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1107
1108 sch_tree_unlock(sch);
1109
1110 hfsc_destroy_class(sch, cl);
1111 return 0;
1112 }
1113
1114 static struct hfsc_class *
1115 hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
1116 {
1117 struct hfsc_sched *q = qdisc_priv(sch);
1118 struct hfsc_class *head, *cl;
1119 struct tcf_result res;
1120 struct tcf_proto *tcf;
1121 int result;
1122
1123 if (TC_H_MAJ(skb->priority ^ sch->handle) == 0 &&
1124 (cl = hfsc_find_class(skb->priority, sch)) != NULL)
1125 if (cl->level == 0)
1126 return cl;
1127
1128 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1129 head = &q->root;
1130 tcf = rcu_dereference_bh(q->root.filter_list);
1131 while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
1132 #ifdef CONFIG_NET_CLS_ACT
1133 switch (result) {
1134 case TC_ACT_QUEUED:
1135 case TC_ACT_STOLEN:
1136 case TC_ACT_TRAP:
1137 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1138 fallthrough;
1139 case TC_ACT_SHOT:
1140 return NULL;
1141 }
1142 #endif
1143 cl = (struct hfsc_class *)res.class;
1144 if (!cl) {
1145 cl = hfsc_find_class(res.classid, sch);
1146 if (!cl)
1147 break;
1148 if (cl->level >= head->level)
1149 break;
1150 }
1151
1152 if (cl->level == 0)
1153 return cl;
1154
1155
1156 tcf = rcu_dereference_bh(cl->filter_list);
1157 head = cl;
1158 }
1159
1160
1161 cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
1162 if (cl == NULL || cl->level > 0)
1163 return NULL;
1164
1165 return cl;
1166 }
1167
1168 static int
1169 hfsc_graft_class(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1170 struct Qdisc **old, struct netlink_ext_ack *extack)
1171 {
1172 struct hfsc_class *cl = (struct hfsc_class *)arg;
1173
1174 if (cl->level > 0)
1175 return -EINVAL;
1176 if (new == NULL) {
1177 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1178 cl->cl_common.classid, NULL);
1179 if (new == NULL)
1180 new = &noop_qdisc;
1181 }
1182
1183 *old = qdisc_replace(sch, new, &cl->qdisc);
1184 return 0;
1185 }
1186
1187 static struct Qdisc *
1188 hfsc_class_leaf(struct Qdisc *sch, unsigned long arg)
1189 {
1190 struct hfsc_class *cl = (struct hfsc_class *)arg;
1191
1192 if (cl->level == 0)
1193 return cl->qdisc;
1194
1195 return NULL;
1196 }
1197
1198 static void
1199 hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
1200 {
1201 struct hfsc_class *cl = (struct hfsc_class *)arg;
1202
1203
1204
1205
1206 update_vf(cl, 0, 0);
1207 if (cl->cl_flags & HFSC_RSC)
1208 eltree_remove(cl);
1209 }
1210
1211 static unsigned long
1212 hfsc_search_class(struct Qdisc *sch, u32 classid)
1213 {
1214 return (unsigned long)hfsc_find_class(classid, sch);
1215 }
1216
1217 static unsigned long
1218 hfsc_bind_tcf(struct Qdisc *sch, unsigned long parent, u32 classid)
1219 {
1220 struct hfsc_class *p = (struct hfsc_class *)parent;
1221 struct hfsc_class *cl = hfsc_find_class(classid, sch);
1222
1223 if (cl != NULL) {
1224 if (p != NULL && p->level <= cl->level)
1225 return 0;
1226 cl->filter_cnt++;
1227 }
1228
1229 return (unsigned long)cl;
1230 }
1231
1232 static void
1233 hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
1234 {
1235 struct hfsc_class *cl = (struct hfsc_class *)arg;
1236
1237 cl->filter_cnt--;
1238 }
1239
1240 static struct tcf_block *hfsc_tcf_block(struct Qdisc *sch, unsigned long arg,
1241 struct netlink_ext_ack *extack)
1242 {
1243 struct hfsc_sched *q = qdisc_priv(sch);
1244 struct hfsc_class *cl = (struct hfsc_class *)arg;
1245
1246 if (cl == NULL)
1247 cl = &q->root;
1248
1249 return cl->block;
1250 }
1251
1252 static int
1253 hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1254 {
1255 struct tc_service_curve tsc;
1256
1257 tsc.m1 = sm2m(sc->sm1);
1258 tsc.d = dx2d(sc->dx);
1259 tsc.m2 = sm2m(sc->sm2);
1260 if (nla_put(skb, attr, sizeof(tsc), &tsc))
1261 goto nla_put_failure;
1262
1263 return skb->len;
1264
1265 nla_put_failure:
1266 return -1;
1267 }
1268
1269 static int
1270 hfsc_dump_curves(struct sk_buff *skb, struct hfsc_class *cl)
1271 {
1272 if ((cl->cl_flags & HFSC_RSC) &&
1273 (hfsc_dump_sc(skb, TCA_HFSC_RSC, &cl->cl_rsc) < 0))
1274 goto nla_put_failure;
1275
1276 if ((cl->cl_flags & HFSC_FSC) &&
1277 (hfsc_dump_sc(skb, TCA_HFSC_FSC, &cl->cl_fsc) < 0))
1278 goto nla_put_failure;
1279
1280 if ((cl->cl_flags & HFSC_USC) &&
1281 (hfsc_dump_sc(skb, TCA_HFSC_USC, &cl->cl_usc) < 0))
1282 goto nla_put_failure;
1283
1284 return skb->len;
1285
1286 nla_put_failure:
1287 return -1;
1288 }
1289
1290 static int
1291 hfsc_dump_class(struct Qdisc *sch, unsigned long arg, struct sk_buff *skb,
1292 struct tcmsg *tcm)
1293 {
1294 struct hfsc_class *cl = (struct hfsc_class *)arg;
1295 struct nlattr *nest;
1296
1297 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->cl_common.classid :
1298 TC_H_ROOT;
1299 tcm->tcm_handle = cl->cl_common.classid;
1300 if (cl->level == 0)
1301 tcm->tcm_info = cl->qdisc->handle;
1302
1303 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
1304 if (nest == NULL)
1305 goto nla_put_failure;
1306 if (hfsc_dump_curves(skb, cl) < 0)
1307 goto nla_put_failure;
1308 return nla_nest_end(skb, nest);
1309
1310 nla_put_failure:
1311 nla_nest_cancel(skb, nest);
1312 return -EMSGSIZE;
1313 }
1314
1315 static int
1316 hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1317 struct gnet_dump *d)
1318 {
1319 struct hfsc_class *cl = (struct hfsc_class *)arg;
1320 struct tc_hfsc_stats xstats;
1321 __u32 qlen;
1322
1323 qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
1324 xstats.level = cl->level;
1325 xstats.period = cl->cl_vtperiod;
1326 xstats.work = cl->cl_total;
1327 xstats.rtwork = cl->cl_cumul;
1328
1329 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
1330 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1331 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1332 return -1;
1333
1334 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
1335 }
1336
1337
1338
1339 static void
1340 hfsc_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1341 {
1342 struct hfsc_sched *q = qdisc_priv(sch);
1343 struct hfsc_class *cl;
1344 unsigned int i;
1345
1346 if (arg->stop)
1347 return;
1348
1349 for (i = 0; i < q->clhash.hashsize; i++) {
1350 hlist_for_each_entry(cl, &q->clhash.hash[i],
1351 cl_common.hnode) {
1352 if (arg->count < arg->skip) {
1353 arg->count++;
1354 continue;
1355 }
1356 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1357 arg->stop = 1;
1358 return;
1359 }
1360 arg->count++;
1361 }
1362 }
1363 }
1364
1365 static void
1366 hfsc_schedule_watchdog(struct Qdisc *sch)
1367 {
1368 struct hfsc_sched *q = qdisc_priv(sch);
1369 struct hfsc_class *cl;
1370 u64 next_time = 0;
1371
1372 cl = eltree_get_minel(q);
1373 if (cl)
1374 next_time = cl->cl_e;
1375 if (q->root.cl_cfmin != 0) {
1376 if (next_time == 0 || next_time > q->root.cl_cfmin)
1377 next_time = q->root.cl_cfmin;
1378 }
1379 if (next_time)
1380 qdisc_watchdog_schedule(&q->watchdog, next_time);
1381 }
1382
1383 static int
1384 hfsc_init_qdisc(struct Qdisc *sch, struct nlattr *opt,
1385 struct netlink_ext_ack *extack)
1386 {
1387 struct hfsc_sched *q = qdisc_priv(sch);
1388 struct tc_hfsc_qopt *qopt;
1389 int err;
1390
1391 qdisc_watchdog_init(&q->watchdog, sch);
1392
1393 if (!opt || nla_len(opt) < sizeof(*qopt))
1394 return -EINVAL;
1395 qopt = nla_data(opt);
1396
1397 q->defcls = qopt->defcls;
1398 err = qdisc_class_hash_init(&q->clhash);
1399 if (err < 0)
1400 return err;
1401 q->eligible = RB_ROOT;
1402
1403 err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack);
1404 if (err)
1405 return err;
1406
1407 gnet_stats_basic_sync_init(&q->root.bstats);
1408 q->root.cl_common.classid = sch->handle;
1409 q->root.sched = q;
1410 q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1411 sch->handle, NULL);
1412 if (q->root.qdisc == NULL)
1413 q->root.qdisc = &noop_qdisc;
1414 else
1415 qdisc_hash_add(q->root.qdisc, true);
1416 INIT_LIST_HEAD(&q->root.children);
1417 q->root.vt_tree = RB_ROOT;
1418 q->root.cf_tree = RB_ROOT;
1419
1420 qdisc_class_hash_insert(&q->clhash, &q->root.cl_common);
1421 qdisc_class_hash_grow(sch, &q->clhash);
1422
1423 return 0;
1424 }
1425
1426 static int
1427 hfsc_change_qdisc(struct Qdisc *sch, struct nlattr *opt,
1428 struct netlink_ext_ack *extack)
1429 {
1430 struct hfsc_sched *q = qdisc_priv(sch);
1431 struct tc_hfsc_qopt *qopt;
1432
1433 if (opt == NULL || nla_len(opt) < sizeof(*qopt))
1434 return -EINVAL;
1435 qopt = nla_data(opt);
1436
1437 sch_tree_lock(sch);
1438 q->defcls = qopt->defcls;
1439 sch_tree_unlock(sch);
1440
1441 return 0;
1442 }
1443
1444 static void
1445 hfsc_reset_class(struct hfsc_class *cl)
1446 {
1447 cl->cl_total = 0;
1448 cl->cl_cumul = 0;
1449 cl->cl_d = 0;
1450 cl->cl_e = 0;
1451 cl->cl_vt = 0;
1452 cl->cl_vtadj = 0;
1453 cl->cl_cvtmin = 0;
1454 cl->cl_cvtoff = 0;
1455 cl->cl_vtperiod = 0;
1456 cl->cl_parentperiod = 0;
1457 cl->cl_f = 0;
1458 cl->cl_myf = 0;
1459 cl->cl_cfmin = 0;
1460 cl->cl_nactive = 0;
1461
1462 cl->vt_tree = RB_ROOT;
1463 cl->cf_tree = RB_ROOT;
1464 qdisc_reset(cl->qdisc);
1465
1466 if (cl->cl_flags & HFSC_RSC)
1467 rtsc_init(&cl->cl_deadline, &cl->cl_rsc, 0, 0);
1468 if (cl->cl_flags & HFSC_FSC)
1469 rtsc_init(&cl->cl_virtual, &cl->cl_fsc, 0, 0);
1470 if (cl->cl_flags & HFSC_USC)
1471 rtsc_init(&cl->cl_ulimit, &cl->cl_usc, 0, 0);
1472 }
1473
1474 static void
1475 hfsc_reset_qdisc(struct Qdisc *sch)
1476 {
1477 struct hfsc_sched *q = qdisc_priv(sch);
1478 struct hfsc_class *cl;
1479 unsigned int i;
1480
1481 for (i = 0; i < q->clhash.hashsize; i++) {
1482 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode)
1483 hfsc_reset_class(cl);
1484 }
1485 q->eligible = RB_ROOT;
1486 qdisc_watchdog_cancel(&q->watchdog);
1487 sch->qstats.backlog = 0;
1488 sch->q.qlen = 0;
1489 }
1490
1491 static void
1492 hfsc_destroy_qdisc(struct Qdisc *sch)
1493 {
1494 struct hfsc_sched *q = qdisc_priv(sch);
1495 struct hlist_node *next;
1496 struct hfsc_class *cl;
1497 unsigned int i;
1498
1499 for (i = 0; i < q->clhash.hashsize; i++) {
1500 hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) {
1501 tcf_block_put(cl->block);
1502 cl->block = NULL;
1503 }
1504 }
1505 for (i = 0; i < q->clhash.hashsize; i++) {
1506 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1507 cl_common.hnode)
1508 hfsc_destroy_class(sch, cl);
1509 }
1510 qdisc_class_hash_destroy(&q->clhash);
1511 qdisc_watchdog_cancel(&q->watchdog);
1512 }
1513
1514 static int
1515 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1516 {
1517 struct hfsc_sched *q = qdisc_priv(sch);
1518 unsigned char *b = skb_tail_pointer(skb);
1519 struct tc_hfsc_qopt qopt;
1520
1521 qopt.defcls = q->defcls;
1522 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1523 goto nla_put_failure;
1524 return skb->len;
1525
1526 nla_put_failure:
1527 nlmsg_trim(skb, b);
1528 return -1;
1529 }
1530
1531 static int
1532 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
1533 {
1534 unsigned int len = qdisc_pkt_len(skb);
1535 struct hfsc_class *cl;
1536 int err;
1537 bool first;
1538
1539 cl = hfsc_classify(skb, sch, &err);
1540 if (cl == NULL) {
1541 if (err & __NET_XMIT_BYPASS)
1542 qdisc_qstats_drop(sch);
1543 __qdisc_drop(skb, to_free);
1544 return err;
1545 }
1546
1547 first = !cl->qdisc->q.qlen;
1548 err = qdisc_enqueue(skb, cl->qdisc, to_free);
1549 if (unlikely(err != NET_XMIT_SUCCESS)) {
1550 if (net_xmit_drop_count(err)) {
1551 cl->qstats.drops++;
1552 qdisc_qstats_drop(sch);
1553 }
1554 return err;
1555 }
1556
1557 if (first) {
1558 if (cl->cl_flags & HFSC_RSC)
1559 init_ed(cl, len);
1560 if (cl->cl_flags & HFSC_FSC)
1561 init_vf(cl, len);
1562
1563
1564
1565
1566
1567 if (cl->cl_flags & HFSC_RSC)
1568 cl->qdisc->ops->peek(cl->qdisc);
1569
1570 }
1571
1572 sch->qstats.backlog += len;
1573 sch->q.qlen++;
1574
1575 return NET_XMIT_SUCCESS;
1576 }
1577
1578 static struct sk_buff *
1579 hfsc_dequeue(struct Qdisc *sch)
1580 {
1581 struct hfsc_sched *q = qdisc_priv(sch);
1582 struct hfsc_class *cl;
1583 struct sk_buff *skb;
1584 u64 cur_time;
1585 unsigned int next_len;
1586 int realtime = 0;
1587
1588 if (sch->q.qlen == 0)
1589 return NULL;
1590
1591 cur_time = psched_get_time();
1592
1593
1594
1595
1596
1597
1598 cl = eltree_get_mindl(q, cur_time);
1599 if (cl) {
1600 realtime = 1;
1601 } else {
1602
1603
1604
1605
1606 cl = vttree_get_minvt(&q->root, cur_time);
1607 if (cl == NULL) {
1608 qdisc_qstats_overlimit(sch);
1609 hfsc_schedule_watchdog(sch);
1610 return NULL;
1611 }
1612 }
1613
1614 skb = qdisc_dequeue_peeked(cl->qdisc);
1615 if (skb == NULL) {
1616 qdisc_warn_nonwc("HFSC", cl->qdisc);
1617 return NULL;
1618 }
1619
1620 bstats_update(&cl->bstats, skb);
1621 update_vf(cl, qdisc_pkt_len(skb), cur_time);
1622 if (realtime)
1623 cl->cl_cumul += qdisc_pkt_len(skb);
1624
1625 if (cl->cl_flags & HFSC_RSC) {
1626 if (cl->qdisc->q.qlen != 0) {
1627
1628 next_len = qdisc_peek_len(cl->qdisc);
1629 if (realtime)
1630 update_ed(cl, next_len);
1631 else
1632 update_d(cl, next_len);
1633 } else {
1634
1635 eltree_remove(cl);
1636 }
1637 }
1638
1639 qdisc_bstats_update(sch, skb);
1640 qdisc_qstats_backlog_dec(sch, skb);
1641 sch->q.qlen--;
1642
1643 return skb;
1644 }
1645
1646 static const struct Qdisc_class_ops hfsc_class_ops = {
1647 .change = hfsc_change_class,
1648 .delete = hfsc_delete_class,
1649 .graft = hfsc_graft_class,
1650 .leaf = hfsc_class_leaf,
1651 .qlen_notify = hfsc_qlen_notify,
1652 .find = hfsc_search_class,
1653 .bind_tcf = hfsc_bind_tcf,
1654 .unbind_tcf = hfsc_unbind_tcf,
1655 .tcf_block = hfsc_tcf_block,
1656 .dump = hfsc_dump_class,
1657 .dump_stats = hfsc_dump_class_stats,
1658 .walk = hfsc_walk
1659 };
1660
1661 static struct Qdisc_ops hfsc_qdisc_ops __read_mostly = {
1662 .id = "hfsc",
1663 .init = hfsc_init_qdisc,
1664 .change = hfsc_change_qdisc,
1665 .reset = hfsc_reset_qdisc,
1666 .destroy = hfsc_destroy_qdisc,
1667 .dump = hfsc_dump_qdisc,
1668 .enqueue = hfsc_enqueue,
1669 .dequeue = hfsc_dequeue,
1670 .peek = qdisc_peek_dequeued,
1671 .cl_ops = &hfsc_class_ops,
1672 .priv_size = sizeof(struct hfsc_sched),
1673 .owner = THIS_MODULE
1674 };
1675
1676 static int __init
1677 hfsc_init(void)
1678 {
1679 return register_qdisc(&hfsc_qdisc_ops);
1680 }
1681
1682 static void __exit
1683 hfsc_cleanup(void)
1684 {
1685 unregister_qdisc(&hfsc_qdisc_ops);
1686 }
1687
1688 MODULE_LICENSE("GPL");
1689 module_init(hfsc_init);
1690 module_exit(hfsc_cleanup);