0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015 #include <linux/module.h>
0016 #include <linux/types.h>
0017 #include <linux/kernel.h>
0018 #include <linux/errno.h>
0019 #include <linux/skbuff.h>
0020 #include <linux/random.h>
0021 #include <linux/siphash.h>
0022 #include <net/ip.h>
0023 #include <net/pkt_sched.h>
0024 #include <net/pkt_cls.h>
0025 #include <net/inet_ecn.h>
0026
0027
0028
0029
0030
0031
0032
0033 #define SFB_BUCKET_SHIFT 4
0034 #define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT)
0035 #define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
0036 #define SFB_LEVELS (32 / SFB_BUCKET_SHIFT)
0037
0038
0039 struct sfb_bucket {
0040 u16 qlen;
0041 u16 p_mark;
0042 };
0043
0044
0045
0046
0047 struct sfb_bins {
0048 siphash_key_t perturbation;
0049 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
0050 };
0051
0052 struct sfb_sched_data {
0053 struct Qdisc *qdisc;
0054 struct tcf_proto __rcu *filter_list;
0055 struct tcf_block *block;
0056 unsigned long rehash_interval;
0057 unsigned long warmup_time;
0058 u32 max;
0059 u32 bin_size;
0060 u32 increment;
0061 u32 decrement;
0062 u32 limit;
0063 u32 penalty_rate;
0064 u32 penalty_burst;
0065 u32 tokens_avail;
0066 unsigned long rehash_time;
0067 unsigned long token_time;
0068
0069 u8 slot;
0070 bool double_buffering;
0071 struct sfb_bins bins[2];
0072
0073 struct {
0074 u32 earlydrop;
0075 u32 penaltydrop;
0076 u32 bucketdrop;
0077 u32 queuedrop;
0078 u32 childdrop;
0079 u32 marked;
0080 } stats;
0081 };
0082
0083
0084
0085
0086
0087
0088 struct sfb_skb_cb {
0089 u32 hashes[2];
0090 };
0091
0092 static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
0093 {
0094 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
0095 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
0096 }
0097
0098
0099
0100
0101
0102 static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
0103 {
0104 return sfb_skb_cb(skb)->hashes[slot];
0105 }
0106
0107
0108
0109
0110
0111 static u32 prob_plus(u32 p1, u32 p2)
0112 {
0113 u32 res = p1 + p2;
0114
0115 return min_t(u32, res, SFB_MAX_PROB);
0116 }
0117
0118 static u32 prob_minus(u32 p1, u32 p2)
0119 {
0120 return p1 > p2 ? p1 - p2 : 0;
0121 }
0122
0123 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
0124 {
0125 int i;
0126 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
0127
0128 for (i = 0; i < SFB_LEVELS; i++) {
0129 u32 hash = sfbhash & SFB_BUCKET_MASK;
0130
0131 sfbhash >>= SFB_BUCKET_SHIFT;
0132 if (b[hash].qlen < 0xFFFF)
0133 b[hash].qlen++;
0134 b += SFB_NUMBUCKETS;
0135 }
0136 }
0137
0138 static void increment_qlen(const struct sfb_skb_cb *cb, struct sfb_sched_data *q)
0139 {
0140 u32 sfbhash;
0141
0142 sfbhash = cb->hashes[0];
0143 if (sfbhash)
0144 increment_one_qlen(sfbhash, 0, q);
0145
0146 sfbhash = cb->hashes[1];
0147 if (sfbhash)
0148 increment_one_qlen(sfbhash, 1, q);
0149 }
0150
0151 static void decrement_one_qlen(u32 sfbhash, u32 slot,
0152 struct sfb_sched_data *q)
0153 {
0154 int i;
0155 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
0156
0157 for (i = 0; i < SFB_LEVELS; i++) {
0158 u32 hash = sfbhash & SFB_BUCKET_MASK;
0159
0160 sfbhash >>= SFB_BUCKET_SHIFT;
0161 if (b[hash].qlen > 0)
0162 b[hash].qlen--;
0163 b += SFB_NUMBUCKETS;
0164 }
0165 }
0166
0167 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
0168 {
0169 u32 sfbhash;
0170
0171 sfbhash = sfb_hash(skb, 0);
0172 if (sfbhash)
0173 decrement_one_qlen(sfbhash, 0, q);
0174
0175 sfbhash = sfb_hash(skb, 1);
0176 if (sfbhash)
0177 decrement_one_qlen(sfbhash, 1, q);
0178 }
0179
0180 static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
0181 {
0182 b->p_mark = prob_minus(b->p_mark, q->decrement);
0183 }
0184
0185 static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
0186 {
0187 b->p_mark = prob_plus(b->p_mark, q->increment);
0188 }
0189
0190 static void sfb_zero_all_buckets(struct sfb_sched_data *q)
0191 {
0192 memset(&q->bins, 0, sizeof(q->bins));
0193 }
0194
0195
0196
0197
0198 static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
0199 {
0200 int i;
0201 u32 qlen = 0, prob = 0, totalpm = 0;
0202 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
0203
0204 for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
0205 if (qlen < b->qlen)
0206 qlen = b->qlen;
0207 totalpm += b->p_mark;
0208 if (prob < b->p_mark)
0209 prob = b->p_mark;
0210 b++;
0211 }
0212 *prob_r = prob;
0213 *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
0214 return qlen;
0215 }
0216
0217
0218 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
0219 {
0220 get_random_bytes(&q->bins[slot].perturbation,
0221 sizeof(q->bins[slot].perturbation));
0222 }
0223
0224 static void sfb_swap_slot(struct sfb_sched_data *q)
0225 {
0226 sfb_init_perturbation(q->slot, q);
0227 q->slot ^= 1;
0228 q->double_buffering = false;
0229 }
0230
0231
0232
0233
0234 static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
0235 {
0236 if (q->penalty_rate == 0 || q->penalty_burst == 0)
0237 return true;
0238
0239 if (q->tokens_avail < 1) {
0240 unsigned long age = min(10UL * HZ, jiffies - q->token_time);
0241
0242 q->tokens_avail = (age * q->penalty_rate) / HZ;
0243 if (q->tokens_avail > q->penalty_burst)
0244 q->tokens_avail = q->penalty_burst;
0245 q->token_time = jiffies;
0246 if (q->tokens_avail < 1)
0247 return true;
0248 }
0249
0250 q->tokens_avail--;
0251 return false;
0252 }
0253
0254 static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
0255 int *qerr, u32 *salt)
0256 {
0257 struct tcf_result res;
0258 int result;
0259
0260 result = tcf_classify(skb, NULL, fl, &res, false);
0261 if (result >= 0) {
0262 #ifdef CONFIG_NET_CLS_ACT
0263 switch (result) {
0264 case TC_ACT_STOLEN:
0265 case TC_ACT_QUEUED:
0266 case TC_ACT_TRAP:
0267 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
0268 fallthrough;
0269 case TC_ACT_SHOT:
0270 return false;
0271 }
0272 #endif
0273 *salt = TC_H_MIN(res.classid);
0274 return true;
0275 }
0276 return false;
0277 }
0278
0279 static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
0280 struct sk_buff **to_free)
0281 {
0282
0283 struct sfb_sched_data *q = qdisc_priv(sch);
0284 unsigned int len = qdisc_pkt_len(skb);
0285 struct Qdisc *child = q->qdisc;
0286 struct tcf_proto *fl;
0287 struct sfb_skb_cb cb;
0288 int i;
0289 u32 p_min = ~0;
0290 u32 minqlen = ~0;
0291 u32 r, sfbhash;
0292 u32 slot = q->slot;
0293 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
0294
0295 if (unlikely(sch->q.qlen >= q->limit)) {
0296 qdisc_qstats_overlimit(sch);
0297 q->stats.queuedrop++;
0298 goto drop;
0299 }
0300
0301 if (q->rehash_interval > 0) {
0302 unsigned long limit = q->rehash_time + q->rehash_interval;
0303
0304 if (unlikely(time_after(jiffies, limit))) {
0305 sfb_swap_slot(q);
0306 q->rehash_time = jiffies;
0307 } else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
0308 time_after(jiffies, limit - q->warmup_time))) {
0309 q->double_buffering = true;
0310 }
0311 }
0312
0313 fl = rcu_dereference_bh(q->filter_list);
0314 if (fl) {
0315 u32 salt;
0316
0317
0318 if (!sfb_classify(skb, fl, &ret, &salt))
0319 goto other_drop;
0320 sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
0321 } else {
0322 sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
0323 }
0324
0325
0326 if (!sfbhash)
0327 sfbhash = 1;
0328 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
0329
0330 for (i = 0; i < SFB_LEVELS; i++) {
0331 u32 hash = sfbhash & SFB_BUCKET_MASK;
0332 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
0333
0334 sfbhash >>= SFB_BUCKET_SHIFT;
0335 if (b->qlen == 0)
0336 decrement_prob(b, q);
0337 else if (b->qlen >= q->bin_size)
0338 increment_prob(b, q);
0339 if (minqlen > b->qlen)
0340 minqlen = b->qlen;
0341 if (p_min > b->p_mark)
0342 p_min = b->p_mark;
0343 }
0344
0345 slot ^= 1;
0346 sfb_skb_cb(skb)->hashes[slot] = 0;
0347
0348 if (unlikely(minqlen >= q->max)) {
0349 qdisc_qstats_overlimit(sch);
0350 q->stats.bucketdrop++;
0351 goto drop;
0352 }
0353
0354 if (unlikely(p_min >= SFB_MAX_PROB)) {
0355
0356 if (q->double_buffering) {
0357 sfbhash = skb_get_hash_perturb(skb,
0358 &q->bins[slot].perturbation);
0359 if (!sfbhash)
0360 sfbhash = 1;
0361 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
0362
0363 for (i = 0; i < SFB_LEVELS; i++) {
0364 u32 hash = sfbhash & SFB_BUCKET_MASK;
0365 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
0366
0367 sfbhash >>= SFB_BUCKET_SHIFT;
0368 if (b->qlen == 0)
0369 decrement_prob(b, q);
0370 else if (b->qlen >= q->bin_size)
0371 increment_prob(b, q);
0372 }
0373 }
0374 if (sfb_rate_limit(skb, q)) {
0375 qdisc_qstats_overlimit(sch);
0376 q->stats.penaltydrop++;
0377 goto drop;
0378 }
0379 goto enqueue;
0380 }
0381
0382 r = prandom_u32() & SFB_MAX_PROB;
0383
0384 if (unlikely(r < p_min)) {
0385 if (unlikely(p_min > SFB_MAX_PROB / 2)) {
0386
0387
0388
0389
0390 if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
0391 q->stats.earlydrop++;
0392 goto drop;
0393 }
0394 }
0395 if (INET_ECN_set_ce(skb)) {
0396 q->stats.marked++;
0397 } else {
0398 q->stats.earlydrop++;
0399 goto drop;
0400 }
0401 }
0402
0403 enqueue:
0404 memcpy(&cb, sfb_skb_cb(skb), sizeof(cb));
0405 ret = qdisc_enqueue(skb, child, to_free);
0406 if (likely(ret == NET_XMIT_SUCCESS)) {
0407 sch->qstats.backlog += len;
0408 sch->q.qlen++;
0409 increment_qlen(&cb, q);
0410 } else if (net_xmit_drop_count(ret)) {
0411 q->stats.childdrop++;
0412 qdisc_qstats_drop(sch);
0413 }
0414 return ret;
0415
0416 drop:
0417 qdisc_drop(skb, sch, to_free);
0418 return NET_XMIT_CN;
0419 other_drop:
0420 if (ret & __NET_XMIT_BYPASS)
0421 qdisc_qstats_drop(sch);
0422 kfree_skb(skb);
0423 return ret;
0424 }
0425
0426 static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
0427 {
0428 struct sfb_sched_data *q = qdisc_priv(sch);
0429 struct Qdisc *child = q->qdisc;
0430 struct sk_buff *skb;
0431
0432 skb = child->dequeue(q->qdisc);
0433
0434 if (skb) {
0435 qdisc_bstats_update(sch, skb);
0436 qdisc_qstats_backlog_dec(sch, skb);
0437 sch->q.qlen--;
0438 decrement_qlen(skb, q);
0439 }
0440
0441 return skb;
0442 }
0443
0444 static struct sk_buff *sfb_peek(struct Qdisc *sch)
0445 {
0446 struct sfb_sched_data *q = qdisc_priv(sch);
0447 struct Qdisc *child = q->qdisc;
0448
0449 return child->ops->peek(child);
0450 }
0451
0452
0453
0454 static void sfb_reset(struct Qdisc *sch)
0455 {
0456 struct sfb_sched_data *q = qdisc_priv(sch);
0457
0458 qdisc_reset(q->qdisc);
0459 sch->qstats.backlog = 0;
0460 sch->q.qlen = 0;
0461 q->slot = 0;
0462 q->double_buffering = false;
0463 sfb_zero_all_buckets(q);
0464 sfb_init_perturbation(0, q);
0465 }
0466
0467 static void sfb_destroy(struct Qdisc *sch)
0468 {
0469 struct sfb_sched_data *q = qdisc_priv(sch);
0470
0471 tcf_block_put(q->block);
0472 qdisc_put(q->qdisc);
0473 }
0474
0475 static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
0476 [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) },
0477 };
0478
0479 static const struct tc_sfb_qopt sfb_default_ops = {
0480 .rehash_interval = 600 * MSEC_PER_SEC,
0481 .warmup_time = 60 * MSEC_PER_SEC,
0482 .limit = 0,
0483 .max = 25,
0484 .bin_size = 20,
0485 .increment = (SFB_MAX_PROB + 500) / 1000,
0486 .decrement = (SFB_MAX_PROB + 3000) / 6000,
0487 .penalty_rate = 10,
0488 .penalty_burst = 20,
0489 };
0490
0491 static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
0492 struct netlink_ext_ack *extack)
0493 {
0494 struct sfb_sched_data *q = qdisc_priv(sch);
0495 struct Qdisc *child, *old;
0496 struct nlattr *tb[TCA_SFB_MAX + 1];
0497 const struct tc_sfb_qopt *ctl = &sfb_default_ops;
0498 u32 limit;
0499 int err;
0500
0501 if (opt) {
0502 err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt,
0503 sfb_policy, NULL);
0504 if (err < 0)
0505 return -EINVAL;
0506
0507 if (tb[TCA_SFB_PARMS] == NULL)
0508 return -EINVAL;
0509
0510 ctl = nla_data(tb[TCA_SFB_PARMS]);
0511 }
0512
0513 limit = ctl->limit;
0514 if (limit == 0)
0515 limit = qdisc_dev(sch)->tx_queue_len;
0516
0517 child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
0518 if (IS_ERR(child))
0519 return PTR_ERR(child);
0520
0521 if (child != &noop_qdisc)
0522 qdisc_hash_add(child, true);
0523 sch_tree_lock(sch);
0524
0525 qdisc_purge_queue(q->qdisc);
0526 old = q->qdisc;
0527 q->qdisc = child;
0528
0529 q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
0530 q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
0531 q->rehash_time = jiffies;
0532 q->limit = limit;
0533 q->increment = ctl->increment;
0534 q->decrement = ctl->decrement;
0535 q->max = ctl->max;
0536 q->bin_size = ctl->bin_size;
0537 q->penalty_rate = ctl->penalty_rate;
0538 q->penalty_burst = ctl->penalty_burst;
0539 q->tokens_avail = ctl->penalty_burst;
0540 q->token_time = jiffies;
0541
0542 q->slot = 0;
0543 q->double_buffering = false;
0544 sfb_zero_all_buckets(q);
0545 sfb_init_perturbation(0, q);
0546 sfb_init_perturbation(1, q);
0547
0548 sch_tree_unlock(sch);
0549 qdisc_put(old);
0550
0551 return 0;
0552 }
0553
0554 static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
0555 struct netlink_ext_ack *extack)
0556 {
0557 struct sfb_sched_data *q = qdisc_priv(sch);
0558 int err;
0559
0560 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
0561 if (err)
0562 return err;
0563
0564 q->qdisc = &noop_qdisc;
0565 return sfb_change(sch, opt, extack);
0566 }
0567
0568 static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
0569 {
0570 struct sfb_sched_data *q = qdisc_priv(sch);
0571 struct nlattr *opts;
0572 struct tc_sfb_qopt opt = {
0573 .rehash_interval = jiffies_to_msecs(q->rehash_interval),
0574 .warmup_time = jiffies_to_msecs(q->warmup_time),
0575 .limit = q->limit,
0576 .max = q->max,
0577 .bin_size = q->bin_size,
0578 .increment = q->increment,
0579 .decrement = q->decrement,
0580 .penalty_rate = q->penalty_rate,
0581 .penalty_burst = q->penalty_burst,
0582 };
0583
0584 sch->qstats.backlog = q->qdisc->qstats.backlog;
0585 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
0586 if (opts == NULL)
0587 goto nla_put_failure;
0588 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
0589 goto nla_put_failure;
0590 return nla_nest_end(skb, opts);
0591
0592 nla_put_failure:
0593 nla_nest_cancel(skb, opts);
0594 return -EMSGSIZE;
0595 }
0596
0597 static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
0598 {
0599 struct sfb_sched_data *q = qdisc_priv(sch);
0600 struct tc_sfb_xstats st = {
0601 .earlydrop = q->stats.earlydrop,
0602 .penaltydrop = q->stats.penaltydrop,
0603 .bucketdrop = q->stats.bucketdrop,
0604 .queuedrop = q->stats.queuedrop,
0605 .childdrop = q->stats.childdrop,
0606 .marked = q->stats.marked,
0607 };
0608
0609 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
0610
0611 return gnet_stats_copy_app(d, &st, sizeof(st));
0612 }
0613
0614 static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
0615 struct sk_buff *skb, struct tcmsg *tcm)
0616 {
0617 return -ENOSYS;
0618 }
0619
0620 static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
0621 struct Qdisc **old, struct netlink_ext_ack *extack)
0622 {
0623 struct sfb_sched_data *q = qdisc_priv(sch);
0624
0625 if (new == NULL)
0626 new = &noop_qdisc;
0627
0628 *old = qdisc_replace(sch, new, &q->qdisc);
0629 return 0;
0630 }
0631
0632 static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
0633 {
0634 struct sfb_sched_data *q = qdisc_priv(sch);
0635
0636 return q->qdisc;
0637 }
0638
0639 static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
0640 {
0641 return 1;
0642 }
0643
0644 static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
0645 {
0646 }
0647
0648 static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
0649 struct nlattr **tca, unsigned long *arg,
0650 struct netlink_ext_ack *extack)
0651 {
0652 return -ENOSYS;
0653 }
0654
0655 static int sfb_delete(struct Qdisc *sch, unsigned long cl,
0656 struct netlink_ext_ack *extack)
0657 {
0658 return -ENOSYS;
0659 }
0660
0661 static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
0662 {
0663 if (!walker->stop) {
0664 if (walker->count >= walker->skip)
0665 if (walker->fn(sch, 1, walker) < 0) {
0666 walker->stop = 1;
0667 return;
0668 }
0669 walker->count++;
0670 }
0671 }
0672
0673 static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
0674 struct netlink_ext_ack *extack)
0675 {
0676 struct sfb_sched_data *q = qdisc_priv(sch);
0677
0678 if (cl)
0679 return NULL;
0680 return q->block;
0681 }
0682
0683 static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
0684 u32 classid)
0685 {
0686 return 0;
0687 }
0688
0689
0690 static const struct Qdisc_class_ops sfb_class_ops = {
0691 .graft = sfb_graft,
0692 .leaf = sfb_leaf,
0693 .find = sfb_find,
0694 .change = sfb_change_class,
0695 .delete = sfb_delete,
0696 .walk = sfb_walk,
0697 .tcf_block = sfb_tcf_block,
0698 .bind_tcf = sfb_bind,
0699 .unbind_tcf = sfb_unbind,
0700 .dump = sfb_dump_class,
0701 };
0702
0703 static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
0704 .id = "sfb",
0705 .priv_size = sizeof(struct sfb_sched_data),
0706 .cl_ops = &sfb_class_ops,
0707 .enqueue = sfb_enqueue,
0708 .dequeue = sfb_dequeue,
0709 .peek = sfb_peek,
0710 .init = sfb_init,
0711 .reset = sfb_reset,
0712 .destroy = sfb_destroy,
0713 .change = sfb_change,
0714 .dump = sfb_dump,
0715 .dump_stats = sfb_dump_stats,
0716 .owner = THIS_MODULE,
0717 };
0718
0719 static int __init sfb_module_init(void)
0720 {
0721 return register_qdisc(&sfb_qdisc_ops);
0722 }
0723
0724 static void __exit sfb_module_exit(void)
0725 {
0726 unregister_qdisc(&sfb_qdisc_ops);
0727 }
0728
0729 module_init(sfb_module_init)
0730 module_exit(sfb_module_exit)
0731
0732 MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
0733 MODULE_AUTHOR("Juliusz Chroboczek");
0734 MODULE_AUTHOR("Eric Dumazet");
0735 MODULE_LICENSE("GPL");