0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/bitops.h>
0011 #include <linux/module.h>
0012 #include <linux/types.h>
0013 #include <linux/kernel.h>
0014 #include <linux/sched.h>
0015 #include <linux/string.h>
0016 #include <linux/errno.h>
0017 #include <linux/netdevice.h>
0018 #include <linux/skbuff.h>
0019 #include <linux/rtnetlink.h>
0020 #include <linux/init.h>
0021 #include <linux/rcupdate.h>
0022 #include <linux/list.h>
0023 #include <linux/slab.h>
0024 #include <linux/if_vlan.h>
0025 #include <linux/skb_array.h>
0026 #include <linux/if_macvlan.h>
0027 #include <net/sch_generic.h>
0028 #include <net/pkt_sched.h>
0029 #include <net/dst.h>
0030 #include <trace/events/qdisc.h>
0031 #include <trace/events/net.h>
0032 #include <net/xfrm.h>
0033
0034
0035 const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
0036 EXPORT_SYMBOL(default_qdisc_ops);
0037
0038 static void qdisc_maybe_clear_missed(struct Qdisc *q,
0039 const struct netdev_queue *txq)
0040 {
0041 clear_bit(__QDISC_STATE_MISSED, &q->state);
0042
0043
0044
0045
0046 smp_mb__after_atomic();
0047
0048
0049
0050
0051
0052
0053 if (!netif_xmit_frozen_or_stopped(txq))
0054 set_bit(__QDISC_STATE_MISSED, &q->state);
0055 else
0056 set_bit(__QDISC_STATE_DRAINING, &q->state);
0057 }
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070 #define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
0071
0072 static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
0073 {
0074 const struct netdev_queue *txq = q->dev_queue;
0075 spinlock_t *lock = NULL;
0076 struct sk_buff *skb;
0077
0078 if (q->flags & TCQ_F_NOLOCK) {
0079 lock = qdisc_lock(q);
0080 spin_lock(lock);
0081 }
0082
0083 skb = skb_peek(&q->skb_bad_txq);
0084 if (skb) {
0085
0086 txq = skb_get_tx_queue(txq->dev, skb);
0087 if (!netif_xmit_frozen_or_stopped(txq)) {
0088 skb = __skb_dequeue(&q->skb_bad_txq);
0089 if (qdisc_is_percpu_stats(q)) {
0090 qdisc_qstats_cpu_backlog_dec(q, skb);
0091 qdisc_qstats_cpu_qlen_dec(q);
0092 } else {
0093 qdisc_qstats_backlog_dec(q, skb);
0094 q->q.qlen--;
0095 }
0096 } else {
0097 skb = SKB_XOFF_MAGIC;
0098 qdisc_maybe_clear_missed(q, txq);
0099 }
0100 }
0101
0102 if (lock)
0103 spin_unlock(lock);
0104
0105 return skb;
0106 }
0107
0108 static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q)
0109 {
0110 struct sk_buff *skb = skb_peek(&q->skb_bad_txq);
0111
0112 if (unlikely(skb))
0113 skb = __skb_dequeue_bad_txq(q);
0114
0115 return skb;
0116 }
0117
0118 static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q,
0119 struct sk_buff *skb)
0120 {
0121 spinlock_t *lock = NULL;
0122
0123 if (q->flags & TCQ_F_NOLOCK) {
0124 lock = qdisc_lock(q);
0125 spin_lock(lock);
0126 }
0127
0128 __skb_queue_tail(&q->skb_bad_txq, skb);
0129
0130 if (qdisc_is_percpu_stats(q)) {
0131 qdisc_qstats_cpu_backlog_inc(q, skb);
0132 qdisc_qstats_cpu_qlen_inc(q);
0133 } else {
0134 qdisc_qstats_backlog_inc(q, skb);
0135 q->q.qlen++;
0136 }
0137
0138 if (lock)
0139 spin_unlock(lock);
0140 }
0141
0142 static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
0143 {
0144 spinlock_t *lock = NULL;
0145
0146 if (q->flags & TCQ_F_NOLOCK) {
0147 lock = qdisc_lock(q);
0148 spin_lock(lock);
0149 }
0150
0151 while (skb) {
0152 struct sk_buff *next = skb->next;
0153
0154 __skb_queue_tail(&q->gso_skb, skb);
0155
0156
0157 if (qdisc_is_percpu_stats(q)) {
0158 qdisc_qstats_cpu_requeues_inc(q);
0159 qdisc_qstats_cpu_backlog_inc(q, skb);
0160 qdisc_qstats_cpu_qlen_inc(q);
0161 } else {
0162 q->qstats.requeues++;
0163 qdisc_qstats_backlog_inc(q, skb);
0164 q->q.qlen++;
0165 }
0166
0167 skb = next;
0168 }
0169
0170 if (lock) {
0171 spin_unlock(lock);
0172 set_bit(__QDISC_STATE_MISSED, &q->state);
0173 } else {
0174 __netif_schedule(q);
0175 }
0176 }
0177
0178 static void try_bulk_dequeue_skb(struct Qdisc *q,
0179 struct sk_buff *skb,
0180 const struct netdev_queue *txq,
0181 int *packets)
0182 {
0183 int bytelimit = qdisc_avail_bulklimit(txq) - skb->len;
0184
0185 while (bytelimit > 0) {
0186 struct sk_buff *nskb = q->dequeue(q);
0187
0188 if (!nskb)
0189 break;
0190
0191 bytelimit -= nskb->len;
0192 skb->next = nskb;
0193 skb = nskb;
0194 (*packets)++;
0195 }
0196 skb_mark_not_on_list(skb);
0197 }
0198
0199
0200
0201
0202 static void try_bulk_dequeue_skb_slow(struct Qdisc *q,
0203 struct sk_buff *skb,
0204 int *packets)
0205 {
0206 int mapping = skb_get_queue_mapping(skb);
0207 struct sk_buff *nskb;
0208 int cnt = 0;
0209
0210 do {
0211 nskb = q->dequeue(q);
0212 if (!nskb)
0213 break;
0214 if (unlikely(skb_get_queue_mapping(nskb) != mapping)) {
0215 qdisc_enqueue_skb_bad_txq(q, nskb);
0216 break;
0217 }
0218 skb->next = nskb;
0219 skb = nskb;
0220 } while (++cnt < 8);
0221 (*packets) += cnt;
0222 skb_mark_not_on_list(skb);
0223 }
0224
0225
0226
0227
0228 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate,
0229 int *packets)
0230 {
0231 const struct netdev_queue *txq = q->dev_queue;
0232 struct sk_buff *skb = NULL;
0233
0234 *packets = 1;
0235 if (unlikely(!skb_queue_empty(&q->gso_skb))) {
0236 spinlock_t *lock = NULL;
0237
0238 if (q->flags & TCQ_F_NOLOCK) {
0239 lock = qdisc_lock(q);
0240 spin_lock(lock);
0241 }
0242
0243 skb = skb_peek(&q->gso_skb);
0244
0245
0246
0247
0248 if (!skb) {
0249 if (lock)
0250 spin_unlock(lock);
0251 goto validate;
0252 }
0253
0254
0255 *validate = false;
0256 if (xfrm_offload(skb))
0257 *validate = true;
0258
0259 txq = skb_get_tx_queue(txq->dev, skb);
0260 if (!netif_xmit_frozen_or_stopped(txq)) {
0261 skb = __skb_dequeue(&q->gso_skb);
0262 if (qdisc_is_percpu_stats(q)) {
0263 qdisc_qstats_cpu_backlog_dec(q, skb);
0264 qdisc_qstats_cpu_qlen_dec(q);
0265 } else {
0266 qdisc_qstats_backlog_dec(q, skb);
0267 q->q.qlen--;
0268 }
0269 } else {
0270 skb = NULL;
0271 qdisc_maybe_clear_missed(q, txq);
0272 }
0273 if (lock)
0274 spin_unlock(lock);
0275 goto trace;
0276 }
0277 validate:
0278 *validate = true;
0279
0280 if ((q->flags & TCQ_F_ONETXQUEUE) &&
0281 netif_xmit_frozen_or_stopped(txq)) {
0282 qdisc_maybe_clear_missed(q, txq);
0283 return skb;
0284 }
0285
0286 skb = qdisc_dequeue_skb_bad_txq(q);
0287 if (unlikely(skb)) {
0288 if (skb == SKB_XOFF_MAGIC)
0289 return NULL;
0290 goto bulk;
0291 }
0292 skb = q->dequeue(q);
0293 if (skb) {
0294 bulk:
0295 if (qdisc_may_bulk(q))
0296 try_bulk_dequeue_skb(q, skb, txq, packets);
0297 else
0298 try_bulk_dequeue_skb_slow(q, skb, packets);
0299 }
0300 trace:
0301 trace_qdisc_dequeue(q, txq, *packets, skb);
0302 return skb;
0303 }
0304
0305
0306
0307
0308
0309
0310
0311
0312
0313
0314 bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
0315 struct net_device *dev, struct netdev_queue *txq,
0316 spinlock_t *root_lock, bool validate)
0317 {
0318 int ret = NETDEV_TX_BUSY;
0319 bool again = false;
0320
0321
0322 if (root_lock)
0323 spin_unlock(root_lock);
0324
0325
0326 if (validate)
0327 skb = validate_xmit_skb_list(skb, dev, &again);
0328
0329 #ifdef CONFIG_XFRM_OFFLOAD
0330 if (unlikely(again)) {
0331 if (root_lock)
0332 spin_lock(root_lock);
0333
0334 dev_requeue_skb(skb, q);
0335 return false;
0336 }
0337 #endif
0338
0339 if (likely(skb)) {
0340 HARD_TX_LOCK(dev, txq, smp_processor_id());
0341 if (!netif_xmit_frozen_or_stopped(txq))
0342 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
0343 else
0344 qdisc_maybe_clear_missed(q, txq);
0345
0346 HARD_TX_UNLOCK(dev, txq);
0347 } else {
0348 if (root_lock)
0349 spin_lock(root_lock);
0350 return true;
0351 }
0352
0353 if (root_lock)
0354 spin_lock(root_lock);
0355
0356 if (!dev_xmit_complete(ret)) {
0357
0358 if (unlikely(ret != NETDEV_TX_BUSY))
0359 net_warn_ratelimited("BUG %s code %d qlen %d\n",
0360 dev->name, ret, q->q.qlen);
0361
0362 dev_requeue_skb(skb, q);
0363 return false;
0364 }
0365
0366 return true;
0367 }
0368
0369
0370
0371
0372
0373
0374
0375
0376
0377
0378
0379
0380
0381
0382
0383
0384
0385
0386
0387
0388 static inline bool qdisc_restart(struct Qdisc *q, int *packets)
0389 {
0390 spinlock_t *root_lock = NULL;
0391 struct netdev_queue *txq;
0392 struct net_device *dev;
0393 struct sk_buff *skb;
0394 bool validate;
0395
0396
0397 skb = dequeue_skb(q, &validate, packets);
0398 if (unlikely(!skb))
0399 return false;
0400
0401 if (!(q->flags & TCQ_F_NOLOCK))
0402 root_lock = qdisc_lock(q);
0403
0404 dev = qdisc_dev(q);
0405 txq = skb_get_tx_queue(dev, skb);
0406
0407 return sch_direct_xmit(skb, q, dev, txq, root_lock, validate);
0408 }
0409
0410 void __qdisc_run(struct Qdisc *q)
0411 {
0412 int quota = READ_ONCE(dev_tx_weight);
0413 int packets;
0414
0415 while (qdisc_restart(q, &packets)) {
0416 quota -= packets;
0417 if (quota <= 0) {
0418 if (q->flags & TCQ_F_NOLOCK)
0419 set_bit(__QDISC_STATE_MISSED, &q->state);
0420 else
0421 __netif_schedule(q);
0422
0423 break;
0424 }
0425 }
0426 }
0427
0428 unsigned long dev_trans_start(struct net_device *dev)
0429 {
0430 unsigned long res = READ_ONCE(netdev_get_tx_queue(dev, 0)->trans_start);
0431 unsigned long val;
0432 unsigned int i;
0433
0434 for (i = 1; i < dev->num_tx_queues; i++) {
0435 val = READ_ONCE(netdev_get_tx_queue(dev, i)->trans_start);
0436 if (val && time_after(val, res))
0437 res = val;
0438 }
0439
0440 return res;
0441 }
0442 EXPORT_SYMBOL(dev_trans_start);
0443
0444 static void netif_freeze_queues(struct net_device *dev)
0445 {
0446 unsigned int i;
0447 int cpu;
0448
0449 cpu = smp_processor_id();
0450 for (i = 0; i < dev->num_tx_queues; i++) {
0451 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
0452
0453
0454
0455
0456
0457
0458
0459 __netif_tx_lock(txq, cpu);
0460 set_bit(__QUEUE_STATE_FROZEN, &txq->state);
0461 __netif_tx_unlock(txq);
0462 }
0463 }
0464
0465 void netif_tx_lock(struct net_device *dev)
0466 {
0467 spin_lock(&dev->tx_global_lock);
0468 netif_freeze_queues(dev);
0469 }
0470 EXPORT_SYMBOL(netif_tx_lock);
0471
0472 static void netif_unfreeze_queues(struct net_device *dev)
0473 {
0474 unsigned int i;
0475
0476 for (i = 0; i < dev->num_tx_queues; i++) {
0477 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
0478
0479
0480
0481
0482
0483 clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
0484 netif_schedule_queue(txq);
0485 }
0486 }
0487
0488 void netif_tx_unlock(struct net_device *dev)
0489 {
0490 netif_unfreeze_queues(dev);
0491 spin_unlock(&dev->tx_global_lock);
0492 }
0493 EXPORT_SYMBOL(netif_tx_unlock);
0494
0495 static void dev_watchdog(struct timer_list *t)
0496 {
0497 struct net_device *dev = from_timer(dev, t, watchdog_timer);
0498 bool release = true;
0499
0500 spin_lock(&dev->tx_global_lock);
0501 if (!qdisc_tx_is_noop(dev)) {
0502 if (netif_device_present(dev) &&
0503 netif_running(dev) &&
0504 netif_carrier_ok(dev)) {
0505 int some_queue_timedout = 0;
0506 unsigned int i;
0507 unsigned long trans_start;
0508
0509 for (i = 0; i < dev->num_tx_queues; i++) {
0510 struct netdev_queue *txq;
0511
0512 txq = netdev_get_tx_queue(dev, i);
0513 trans_start = READ_ONCE(txq->trans_start);
0514 if (netif_xmit_stopped(txq) &&
0515 time_after(jiffies, (trans_start +
0516 dev->watchdog_timeo))) {
0517 some_queue_timedout = 1;
0518 atomic_long_inc(&txq->trans_timeout);
0519 break;
0520 }
0521 }
0522
0523 if (unlikely(some_queue_timedout)) {
0524 trace_net_dev_xmit_timeout(dev, i);
0525 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
0526 dev->name, netdev_drivername(dev), i);
0527 netif_freeze_queues(dev);
0528 dev->netdev_ops->ndo_tx_timeout(dev, i);
0529 netif_unfreeze_queues(dev);
0530 }
0531 if (!mod_timer(&dev->watchdog_timer,
0532 round_jiffies(jiffies +
0533 dev->watchdog_timeo)))
0534 release = false;
0535 }
0536 }
0537 spin_unlock(&dev->tx_global_lock);
0538
0539 if (release)
0540 netdev_put(dev, &dev->watchdog_dev_tracker);
0541 }
0542
0543 void __netdev_watchdog_up(struct net_device *dev)
0544 {
0545 if (dev->netdev_ops->ndo_tx_timeout) {
0546 if (dev->watchdog_timeo <= 0)
0547 dev->watchdog_timeo = 5*HZ;
0548 if (!mod_timer(&dev->watchdog_timer,
0549 round_jiffies(jiffies + dev->watchdog_timeo)))
0550 netdev_hold(dev, &dev->watchdog_dev_tracker,
0551 GFP_ATOMIC);
0552 }
0553 }
0554 EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
0555
0556 static void dev_watchdog_up(struct net_device *dev)
0557 {
0558 __netdev_watchdog_up(dev);
0559 }
0560
0561 static void dev_watchdog_down(struct net_device *dev)
0562 {
0563 netif_tx_lock_bh(dev);
0564 if (del_timer(&dev->watchdog_timer))
0565 netdev_put(dev, &dev->watchdog_dev_tracker);
0566 netif_tx_unlock_bh(dev);
0567 }
0568
0569
0570
0571
0572
0573
0574
0575 void netif_carrier_on(struct net_device *dev)
0576 {
0577 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
0578 if (dev->reg_state == NETREG_UNINITIALIZED)
0579 return;
0580 atomic_inc(&dev->carrier_up_count);
0581 linkwatch_fire_event(dev);
0582 if (netif_running(dev))
0583 __netdev_watchdog_up(dev);
0584 }
0585 }
0586 EXPORT_SYMBOL(netif_carrier_on);
0587
0588
0589
0590
0591
0592
0593
0594 void netif_carrier_off(struct net_device *dev)
0595 {
0596 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
0597 if (dev->reg_state == NETREG_UNINITIALIZED)
0598 return;
0599 atomic_inc(&dev->carrier_down_count);
0600 linkwatch_fire_event(dev);
0601 }
0602 }
0603 EXPORT_SYMBOL(netif_carrier_off);
0604
0605
0606
0607
0608
0609
0610
0611
0612
0613 void netif_carrier_event(struct net_device *dev)
0614 {
0615 if (dev->reg_state == NETREG_UNINITIALIZED)
0616 return;
0617 atomic_inc(&dev->carrier_up_count);
0618 atomic_inc(&dev->carrier_down_count);
0619 linkwatch_fire_event(dev);
0620 }
0621 EXPORT_SYMBOL_GPL(netif_carrier_event);
0622
0623
0624
0625
0626
0627
0628 static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
0629 struct sk_buff **to_free)
0630 {
0631 __qdisc_drop(skb, to_free);
0632 return NET_XMIT_CN;
0633 }
0634
0635 static struct sk_buff *noop_dequeue(struct Qdisc *qdisc)
0636 {
0637 return NULL;
0638 }
0639
0640 struct Qdisc_ops noop_qdisc_ops __read_mostly = {
0641 .id = "noop",
0642 .priv_size = 0,
0643 .enqueue = noop_enqueue,
0644 .dequeue = noop_dequeue,
0645 .peek = noop_dequeue,
0646 .owner = THIS_MODULE,
0647 };
0648
0649 static struct netdev_queue noop_netdev_queue = {
0650 RCU_POINTER_INITIALIZER(qdisc, &noop_qdisc),
0651 .qdisc_sleeping = &noop_qdisc,
0652 };
0653
0654 struct Qdisc noop_qdisc = {
0655 .enqueue = noop_enqueue,
0656 .dequeue = noop_dequeue,
0657 .flags = TCQ_F_BUILTIN,
0658 .ops = &noop_qdisc_ops,
0659 .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
0660 .dev_queue = &noop_netdev_queue,
0661 .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
0662 .gso_skb = {
0663 .next = (struct sk_buff *)&noop_qdisc.gso_skb,
0664 .prev = (struct sk_buff *)&noop_qdisc.gso_skb,
0665 .qlen = 0,
0666 .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.gso_skb.lock),
0667 },
0668 .skb_bad_txq = {
0669 .next = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
0670 .prev = (struct sk_buff *)&noop_qdisc.skb_bad_txq,
0671 .qlen = 0,
0672 .lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.skb_bad_txq.lock),
0673 },
0674 };
0675 EXPORT_SYMBOL(noop_qdisc);
0676
0677 static int noqueue_init(struct Qdisc *qdisc, struct nlattr *opt,
0678 struct netlink_ext_ack *extack)
0679 {
0680
0681
0682
0683 qdisc->enqueue = NULL;
0684 return 0;
0685 }
0686
0687 struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
0688 .id = "noqueue",
0689 .priv_size = 0,
0690 .init = noqueue_init,
0691 .enqueue = noop_enqueue,
0692 .dequeue = noop_dequeue,
0693 .peek = noop_dequeue,
0694 .owner = THIS_MODULE,
0695 };
0696
0697 static const u8 prio2band[TC_PRIO_MAX + 1] = {
0698 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
0699 };
0700
0701
0702
0703
0704
0705 #define PFIFO_FAST_BANDS 3
0706
0707
0708
0709
0710
0711 struct pfifo_fast_priv {
0712 struct skb_array q[PFIFO_FAST_BANDS];
0713 };
0714
0715 static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
0716 int band)
0717 {
0718 return &priv->q[band];
0719 }
0720
0721 static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
0722 struct sk_buff **to_free)
0723 {
0724 int band = prio2band[skb->priority & TC_PRIO_MAX];
0725 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
0726 struct skb_array *q = band2list(priv, band);
0727 unsigned int pkt_len = qdisc_pkt_len(skb);
0728 int err;
0729
0730 err = skb_array_produce(q, skb);
0731
0732 if (unlikely(err)) {
0733 if (qdisc_is_percpu_stats(qdisc))
0734 return qdisc_drop_cpu(skb, qdisc, to_free);
0735 else
0736 return qdisc_drop(skb, qdisc, to_free);
0737 }
0738
0739 qdisc_update_stats_at_enqueue(qdisc, pkt_len);
0740 return NET_XMIT_SUCCESS;
0741 }
0742
0743 static struct sk_buff *pfifo_fast_dequeue(struct Qdisc *qdisc)
0744 {
0745 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
0746 struct sk_buff *skb = NULL;
0747 bool need_retry = true;
0748 int band;
0749
0750 retry:
0751 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
0752 struct skb_array *q = band2list(priv, band);
0753
0754 if (__skb_array_empty(q))
0755 continue;
0756
0757 skb = __skb_array_consume(q);
0758 }
0759 if (likely(skb)) {
0760 qdisc_update_stats_at_dequeue(qdisc, skb);
0761 } else if (need_retry &&
0762 READ_ONCE(qdisc->state) & QDISC_STATE_NON_EMPTY) {
0763
0764
0765
0766
0767
0768 clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
0769 clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
0770
0771
0772
0773
0774 smp_mb__after_atomic();
0775
0776 need_retry = false;
0777
0778 goto retry;
0779 }
0780
0781 return skb;
0782 }
0783
0784 static struct sk_buff *pfifo_fast_peek(struct Qdisc *qdisc)
0785 {
0786 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
0787 struct sk_buff *skb = NULL;
0788 int band;
0789
0790 for (band = 0; band < PFIFO_FAST_BANDS && !skb; band++) {
0791 struct skb_array *q = band2list(priv, band);
0792
0793 skb = __skb_array_peek(q);
0794 }
0795
0796 return skb;
0797 }
0798
0799 static void pfifo_fast_reset(struct Qdisc *qdisc)
0800 {
0801 int i, band;
0802 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
0803
0804 for (band = 0; band < PFIFO_FAST_BANDS; band++) {
0805 struct skb_array *q = band2list(priv, band);
0806 struct sk_buff *skb;
0807
0808
0809
0810
0811 if (!q->ring.queue)
0812 continue;
0813
0814 while ((skb = __skb_array_consume(q)) != NULL)
0815 kfree_skb(skb);
0816 }
0817
0818 if (qdisc_is_percpu_stats(qdisc)) {
0819 for_each_possible_cpu(i) {
0820 struct gnet_stats_queue *q;
0821
0822 q = per_cpu_ptr(qdisc->cpu_qstats, i);
0823 q->backlog = 0;
0824 q->qlen = 0;
0825 }
0826 }
0827 }
0828
0829 static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
0830 {
0831 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
0832
0833 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
0834 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
0835 goto nla_put_failure;
0836 return skb->len;
0837
0838 nla_put_failure:
0839 return -1;
0840 }
0841
0842 static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt,
0843 struct netlink_ext_ack *extack)
0844 {
0845 unsigned int qlen = qdisc_dev(qdisc)->tx_queue_len;
0846 struct pfifo_fast_priv *priv = qdisc_priv(qdisc);
0847 int prio;
0848
0849
0850 if (!qlen)
0851 return -EINVAL;
0852
0853 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
0854 struct skb_array *q = band2list(priv, prio);
0855 int err;
0856
0857 err = skb_array_init(q, qlen, GFP_KERNEL);
0858 if (err)
0859 return -ENOMEM;
0860 }
0861
0862
0863 qdisc->flags |= TCQ_F_CAN_BYPASS;
0864 return 0;
0865 }
0866
0867 static void pfifo_fast_destroy(struct Qdisc *sch)
0868 {
0869 struct pfifo_fast_priv *priv = qdisc_priv(sch);
0870 int prio;
0871
0872 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
0873 struct skb_array *q = band2list(priv, prio);
0874
0875
0876
0877
0878 if (!q->ring.queue)
0879 continue;
0880
0881
0882
0883 ptr_ring_cleanup(&q->ring, NULL);
0884 }
0885 }
0886
0887 static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
0888 unsigned int new_len)
0889 {
0890 struct pfifo_fast_priv *priv = qdisc_priv(sch);
0891 struct skb_array *bands[PFIFO_FAST_BANDS];
0892 int prio;
0893
0894 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
0895 struct skb_array *q = band2list(priv, prio);
0896
0897 bands[prio] = q;
0898 }
0899
0900 return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
0901 GFP_KERNEL);
0902 }
0903
0904 struct Qdisc_ops pfifo_fast_ops __read_mostly = {
0905 .id = "pfifo_fast",
0906 .priv_size = sizeof(struct pfifo_fast_priv),
0907 .enqueue = pfifo_fast_enqueue,
0908 .dequeue = pfifo_fast_dequeue,
0909 .peek = pfifo_fast_peek,
0910 .init = pfifo_fast_init,
0911 .destroy = pfifo_fast_destroy,
0912 .reset = pfifo_fast_reset,
0913 .dump = pfifo_fast_dump,
0914 .change_tx_queue_len = pfifo_fast_change_tx_queue_len,
0915 .owner = THIS_MODULE,
0916 .static_flags = TCQ_F_NOLOCK | TCQ_F_CPUSTATS,
0917 };
0918 EXPORT_SYMBOL(pfifo_fast_ops);
0919
0920 static struct lock_class_key qdisc_tx_busylock;
0921
0922 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
0923 const struct Qdisc_ops *ops,
0924 struct netlink_ext_ack *extack)
0925 {
0926 struct Qdisc *sch;
0927 unsigned int size = sizeof(*sch) + ops->priv_size;
0928 int err = -ENOBUFS;
0929 struct net_device *dev;
0930
0931 if (!dev_queue) {
0932 NL_SET_ERR_MSG(extack, "No device queue given");
0933 err = -EINVAL;
0934 goto errout;
0935 }
0936
0937 dev = dev_queue->dev;
0938 sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
0939
0940 if (!sch)
0941 goto errout;
0942 __skb_queue_head_init(&sch->gso_skb);
0943 __skb_queue_head_init(&sch->skb_bad_txq);
0944 qdisc_skb_head_init(&sch->q);
0945 gnet_stats_basic_sync_init(&sch->bstats);
0946 spin_lock_init(&sch->q.lock);
0947
0948 if (ops->static_flags & TCQ_F_CPUSTATS) {
0949 sch->cpu_bstats =
0950 netdev_alloc_pcpu_stats(struct gnet_stats_basic_sync);
0951 if (!sch->cpu_bstats)
0952 goto errout1;
0953
0954 sch->cpu_qstats = alloc_percpu(struct gnet_stats_queue);
0955 if (!sch->cpu_qstats) {
0956 free_percpu(sch->cpu_bstats);
0957 goto errout1;
0958 }
0959 }
0960
0961 spin_lock_init(&sch->busylock);
0962 lockdep_set_class(&sch->busylock,
0963 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
0964
0965
0966 spin_lock_init(&sch->seqlock);
0967 lockdep_set_class(&sch->seqlock,
0968 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
0969
0970 sch->ops = ops;
0971 sch->flags = ops->static_flags;
0972 sch->enqueue = ops->enqueue;
0973 sch->dequeue = ops->dequeue;
0974 sch->dev_queue = dev_queue;
0975 netdev_hold(dev, &sch->dev_tracker, GFP_KERNEL);
0976 refcount_set(&sch->refcnt, 1);
0977
0978 return sch;
0979 errout1:
0980 kfree(sch);
0981 errout:
0982 return ERR_PTR(err);
0983 }
0984
0985 struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
0986 const struct Qdisc_ops *ops,
0987 unsigned int parentid,
0988 struct netlink_ext_ack *extack)
0989 {
0990 struct Qdisc *sch;
0991
0992 if (!try_module_get(ops->owner)) {
0993 NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
0994 return NULL;
0995 }
0996
0997 sch = qdisc_alloc(dev_queue, ops, extack);
0998 if (IS_ERR(sch)) {
0999 module_put(ops->owner);
1000 return NULL;
1001 }
1002 sch->parent = parentid;
1003
1004 if (!ops->init || ops->init(sch, NULL, extack) == 0) {
1005 trace_qdisc_create(ops, dev_queue->dev, parentid);
1006 return sch;
1007 }
1008
1009 qdisc_put(sch);
1010 return NULL;
1011 }
1012 EXPORT_SYMBOL(qdisc_create_dflt);
1013
1014
1015
1016 void qdisc_reset(struct Qdisc *qdisc)
1017 {
1018 const struct Qdisc_ops *ops = qdisc->ops;
1019
1020 trace_qdisc_reset(qdisc);
1021
1022 if (ops->reset)
1023 ops->reset(qdisc);
1024
1025 __skb_queue_purge(&qdisc->gso_skb);
1026 __skb_queue_purge(&qdisc->skb_bad_txq);
1027
1028 qdisc->q.qlen = 0;
1029 qdisc->qstats.backlog = 0;
1030 }
1031 EXPORT_SYMBOL(qdisc_reset);
1032
1033 void qdisc_free(struct Qdisc *qdisc)
1034 {
1035 if (qdisc_is_percpu_stats(qdisc)) {
1036 free_percpu(qdisc->cpu_bstats);
1037 free_percpu(qdisc->cpu_qstats);
1038 }
1039
1040 kfree(qdisc);
1041 }
1042
1043 static void qdisc_free_cb(struct rcu_head *head)
1044 {
1045 struct Qdisc *q = container_of(head, struct Qdisc, rcu);
1046
1047 qdisc_free(q);
1048 }
1049
1050 static void qdisc_destroy(struct Qdisc *qdisc)
1051 {
1052 const struct Qdisc_ops *ops = qdisc->ops;
1053
1054 #ifdef CONFIG_NET_SCHED
1055 qdisc_hash_del(qdisc);
1056
1057 qdisc_put_stab(rtnl_dereference(qdisc->stab));
1058 #endif
1059 gen_kill_estimator(&qdisc->rate_est);
1060
1061 qdisc_reset(qdisc);
1062
1063 if (ops->destroy)
1064 ops->destroy(qdisc);
1065
1066 module_put(ops->owner);
1067 netdev_put(qdisc_dev(qdisc), &qdisc->dev_tracker);
1068
1069 trace_qdisc_destroy(qdisc);
1070
1071 call_rcu(&qdisc->rcu, qdisc_free_cb);
1072 }
1073
1074 void qdisc_put(struct Qdisc *qdisc)
1075 {
1076 if (!qdisc)
1077 return;
1078
1079 if (qdisc->flags & TCQ_F_BUILTIN ||
1080 !refcount_dec_and_test(&qdisc->refcnt))
1081 return;
1082
1083 qdisc_destroy(qdisc);
1084 }
1085 EXPORT_SYMBOL(qdisc_put);
1086
1087
1088
1089
1090
1091
1092 void qdisc_put_unlocked(struct Qdisc *qdisc)
1093 {
1094 if (qdisc->flags & TCQ_F_BUILTIN ||
1095 !refcount_dec_and_rtnl_lock(&qdisc->refcnt))
1096 return;
1097
1098 qdisc_destroy(qdisc);
1099 rtnl_unlock();
1100 }
1101 EXPORT_SYMBOL(qdisc_put_unlocked);
1102
1103
1104 struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
1105 struct Qdisc *qdisc)
1106 {
1107 struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
1108 spinlock_t *root_lock;
1109
1110 root_lock = qdisc_lock(oqdisc);
1111 spin_lock_bh(root_lock);
1112
1113
1114 if (qdisc == NULL)
1115 qdisc = &noop_qdisc;
1116 dev_queue->qdisc_sleeping = qdisc;
1117 rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
1118
1119 spin_unlock_bh(root_lock);
1120
1121 return oqdisc;
1122 }
1123 EXPORT_SYMBOL(dev_graft_qdisc);
1124
1125 static void shutdown_scheduler_queue(struct net_device *dev,
1126 struct netdev_queue *dev_queue,
1127 void *_qdisc_default)
1128 {
1129 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1130 struct Qdisc *qdisc_default = _qdisc_default;
1131
1132 if (qdisc) {
1133 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1134 dev_queue->qdisc_sleeping = qdisc_default;
1135
1136 qdisc_put(qdisc);
1137 }
1138 }
1139
1140 static void attach_one_default_qdisc(struct net_device *dev,
1141 struct netdev_queue *dev_queue,
1142 void *_unused)
1143 {
1144 struct Qdisc *qdisc;
1145 const struct Qdisc_ops *ops = default_qdisc_ops;
1146
1147 if (dev->priv_flags & IFF_NO_QUEUE)
1148 ops = &noqueue_qdisc_ops;
1149 else if(dev->type == ARPHRD_CAN)
1150 ops = &pfifo_fast_ops;
1151
1152 qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
1153 if (!qdisc)
1154 return;
1155
1156 if (!netif_is_multiqueue(dev))
1157 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1158 dev_queue->qdisc_sleeping = qdisc;
1159 }
1160
1161 static void attach_default_qdiscs(struct net_device *dev)
1162 {
1163 struct netdev_queue *txq;
1164 struct Qdisc *qdisc;
1165
1166 txq = netdev_get_tx_queue(dev, 0);
1167
1168 if (!netif_is_multiqueue(dev) ||
1169 dev->priv_flags & IFF_NO_QUEUE) {
1170 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1171 qdisc = txq->qdisc_sleeping;
1172 rcu_assign_pointer(dev->qdisc, qdisc);
1173 qdisc_refcount_inc(qdisc);
1174 } else {
1175 qdisc = qdisc_create_dflt(txq, &mq_qdisc_ops, TC_H_ROOT, NULL);
1176 if (qdisc) {
1177 rcu_assign_pointer(dev->qdisc, qdisc);
1178 qdisc->ops->attach(qdisc);
1179 }
1180 }
1181 qdisc = rtnl_dereference(dev->qdisc);
1182
1183
1184 if (qdisc == &noop_qdisc) {
1185 netdev_warn(dev, "default qdisc (%s) fail, fallback to %s\n",
1186 default_qdisc_ops->id, noqueue_qdisc_ops.id);
1187 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1188 dev->priv_flags |= IFF_NO_QUEUE;
1189 netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
1190 qdisc = txq->qdisc_sleeping;
1191 rcu_assign_pointer(dev->qdisc, qdisc);
1192 qdisc_refcount_inc(qdisc);
1193 dev->priv_flags ^= IFF_NO_QUEUE;
1194 }
1195
1196 #ifdef CONFIG_NET_SCHED
1197 if (qdisc != &noop_qdisc)
1198 qdisc_hash_add(qdisc, false);
1199 #endif
1200 }
1201
1202 static void transition_one_qdisc(struct net_device *dev,
1203 struct netdev_queue *dev_queue,
1204 void *_need_watchdog)
1205 {
1206 struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
1207 int *need_watchdog_p = _need_watchdog;
1208
1209 if (!(new_qdisc->flags & TCQ_F_BUILTIN))
1210 clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
1211
1212 rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
1213 if (need_watchdog_p) {
1214 WRITE_ONCE(dev_queue->trans_start, 0);
1215 *need_watchdog_p = 1;
1216 }
1217 }
1218
1219 void dev_activate(struct net_device *dev)
1220 {
1221 int need_watchdog;
1222
1223
1224
1225
1226
1227
1228 if (rtnl_dereference(dev->qdisc) == &noop_qdisc)
1229 attach_default_qdiscs(dev);
1230
1231 if (!netif_carrier_ok(dev))
1232
1233 return;
1234
1235 need_watchdog = 0;
1236 netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
1237 if (dev_ingress_queue(dev))
1238 transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);
1239
1240 if (need_watchdog) {
1241 netif_trans_update(dev);
1242 dev_watchdog_up(dev);
1243 }
1244 }
1245 EXPORT_SYMBOL(dev_activate);
1246
1247 static void qdisc_deactivate(struct Qdisc *qdisc)
1248 {
1249 if (qdisc->flags & TCQ_F_BUILTIN)
1250 return;
1251
1252 set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
1253 }
1254
1255 static void dev_deactivate_queue(struct net_device *dev,
1256 struct netdev_queue *dev_queue,
1257 void *_qdisc_default)
1258 {
1259 struct Qdisc *qdisc_default = _qdisc_default;
1260 struct Qdisc *qdisc;
1261
1262 qdisc = rtnl_dereference(dev_queue->qdisc);
1263 if (qdisc) {
1264 qdisc_deactivate(qdisc);
1265 rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
1266 }
1267 }
1268
1269 static void dev_reset_queue(struct net_device *dev,
1270 struct netdev_queue *dev_queue,
1271 void *_unused)
1272 {
1273 struct Qdisc *qdisc;
1274 bool nolock;
1275
1276 qdisc = dev_queue->qdisc_sleeping;
1277 if (!qdisc)
1278 return;
1279
1280 nolock = qdisc->flags & TCQ_F_NOLOCK;
1281
1282 if (nolock)
1283 spin_lock_bh(&qdisc->seqlock);
1284 spin_lock_bh(qdisc_lock(qdisc));
1285
1286 qdisc_reset(qdisc);
1287
1288 spin_unlock_bh(qdisc_lock(qdisc));
1289 if (nolock) {
1290 clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
1291 clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
1292 spin_unlock_bh(&qdisc->seqlock);
1293 }
1294 }
1295
1296 static bool some_qdisc_is_busy(struct net_device *dev)
1297 {
1298 unsigned int i;
1299
1300 for (i = 0; i < dev->num_tx_queues; i++) {
1301 struct netdev_queue *dev_queue;
1302 spinlock_t *root_lock;
1303 struct Qdisc *q;
1304 int val;
1305
1306 dev_queue = netdev_get_tx_queue(dev, i);
1307 q = dev_queue->qdisc_sleeping;
1308
1309 root_lock = qdisc_lock(q);
1310 spin_lock_bh(root_lock);
1311
1312 val = (qdisc_is_running(q) ||
1313 test_bit(__QDISC_STATE_SCHED, &q->state));
1314
1315 spin_unlock_bh(root_lock);
1316
1317 if (val)
1318 return true;
1319 }
1320 return false;
1321 }
1322
1323
1324
1325
1326
1327
1328
1329
1330 void dev_deactivate_many(struct list_head *head)
1331 {
1332 struct net_device *dev;
1333
1334 list_for_each_entry(dev, head, close_list) {
1335 netdev_for_each_tx_queue(dev, dev_deactivate_queue,
1336 &noop_qdisc);
1337 if (dev_ingress_queue(dev))
1338 dev_deactivate_queue(dev, dev_ingress_queue(dev),
1339 &noop_qdisc);
1340
1341 dev_watchdog_down(dev);
1342 }
1343
1344
1345
1346
1347
1348
1349 synchronize_net();
1350
1351 list_for_each_entry(dev, head, close_list) {
1352 netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
1353
1354 if (dev_ingress_queue(dev))
1355 dev_reset_queue(dev, dev_ingress_queue(dev), NULL);
1356 }
1357
1358
1359 list_for_each_entry(dev, head, close_list) {
1360 while (some_qdisc_is_busy(dev)) {
1361
1362
1363
1364
1365 schedule_timeout_uninterruptible(1);
1366 }
1367 }
1368 }
1369
1370 void dev_deactivate(struct net_device *dev)
1371 {
1372 LIST_HEAD(single);
1373
1374 list_add(&dev->close_list, &single);
1375 dev_deactivate_many(&single);
1376 list_del(&single);
1377 }
1378 EXPORT_SYMBOL(dev_deactivate);
1379
1380 static int qdisc_change_tx_queue_len(struct net_device *dev,
1381 struct netdev_queue *dev_queue)
1382 {
1383 struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
1384 const struct Qdisc_ops *ops = qdisc->ops;
1385
1386 if (ops->change_tx_queue_len)
1387 return ops->change_tx_queue_len(qdisc, dev->tx_queue_len);
1388 return 0;
1389 }
1390
1391 void dev_qdisc_change_real_num_tx(struct net_device *dev,
1392 unsigned int new_real_tx)
1393 {
1394 struct Qdisc *qdisc = rtnl_dereference(dev->qdisc);
1395
1396 if (qdisc->ops->change_real_num_tx)
1397 qdisc->ops->change_real_num_tx(qdisc, new_real_tx);
1398 }
1399
1400 void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx)
1401 {
1402 #ifdef CONFIG_NET_SCHED
1403 struct net_device *dev = qdisc_dev(sch);
1404 struct Qdisc *qdisc;
1405 unsigned int i;
1406
1407 for (i = new_real_tx; i < dev->real_num_tx_queues; i++) {
1408 qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
1409
1410
1411
1412 if (qdisc != &noop_qdisc && !qdisc->handle)
1413 qdisc_hash_del(qdisc);
1414 }
1415 for (i = dev->real_num_tx_queues; i < new_real_tx; i++) {
1416 qdisc = netdev_get_tx_queue(dev, i)->qdisc_sleeping;
1417 if (qdisc != &noop_qdisc && !qdisc->handle)
1418 qdisc_hash_add(qdisc, false);
1419 }
1420 #endif
1421 }
1422 EXPORT_SYMBOL(mq_change_real_num_tx);
1423
1424 int dev_qdisc_change_tx_queue_len(struct net_device *dev)
1425 {
1426 bool up = dev->flags & IFF_UP;
1427 unsigned int i;
1428 int ret = 0;
1429
1430 if (up)
1431 dev_deactivate(dev);
1432
1433 for (i = 0; i < dev->num_tx_queues; i++) {
1434 ret = qdisc_change_tx_queue_len(dev, &dev->_tx[i]);
1435
1436
1437 if (ret)
1438 break;
1439 }
1440
1441 if (up)
1442 dev_activate(dev);
1443 return ret;
1444 }
1445
1446 static void dev_init_scheduler_queue(struct net_device *dev,
1447 struct netdev_queue *dev_queue,
1448 void *_qdisc)
1449 {
1450 struct Qdisc *qdisc = _qdisc;
1451
1452 rcu_assign_pointer(dev_queue->qdisc, qdisc);
1453 dev_queue->qdisc_sleeping = qdisc;
1454 }
1455
1456 void dev_init_scheduler(struct net_device *dev)
1457 {
1458 rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1459 netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
1460 if (dev_ingress_queue(dev))
1461 dev_init_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1462
1463 timer_setup(&dev->watchdog_timer, dev_watchdog, 0);
1464 }
1465
1466 void dev_shutdown(struct net_device *dev)
1467 {
1468 netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
1469 if (dev_ingress_queue(dev))
1470 shutdown_scheduler_queue(dev, dev_ingress_queue(dev), &noop_qdisc);
1471 qdisc_put(rtnl_dereference(dev->qdisc));
1472 rcu_assign_pointer(dev->qdisc, &noop_qdisc);
1473
1474 WARN_ON(timer_pending(&dev->watchdog_timer));
1475 }
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500 static void psched_ratecfg_precompute__(u64 rate, u32 *mult, u8 *shift)
1501 {
1502 u64 factor = NSEC_PER_SEC;
1503
1504 *mult = 1;
1505 *shift = 0;
1506
1507 if (rate <= 0)
1508 return;
1509
1510 for (;;) {
1511 *mult = div64_u64(factor, rate);
1512 if (*mult & (1U << 31) || factor & (1ULL << 63))
1513 break;
1514 factor <<= 1;
1515 (*shift)++;
1516 }
1517 }
1518
1519 void psched_ratecfg_precompute(struct psched_ratecfg *r,
1520 const struct tc_ratespec *conf,
1521 u64 rate64)
1522 {
1523 memset(r, 0, sizeof(*r));
1524 r->overhead = conf->overhead;
1525 r->mpu = conf->mpu;
1526 r->rate_bytes_ps = max_t(u64, conf->rate, rate64);
1527 r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
1528 psched_ratecfg_precompute__(r->rate_bytes_ps, &r->mult, &r->shift);
1529 }
1530 EXPORT_SYMBOL(psched_ratecfg_precompute);
1531
1532 void psched_ppscfg_precompute(struct psched_pktrate *r, u64 pktrate64)
1533 {
1534 r->rate_pkts_ps = pktrate64;
1535 psched_ratecfg_precompute__(r->rate_pkts_ps, &r->mult, &r->shift);
1536 }
1537 EXPORT_SYMBOL(psched_ppscfg_precompute);
1538
1539 void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
1540 struct tcf_proto *tp_head)
1541 {
1542
1543
1544
1545 struct mini_Qdisc *miniq_old =
1546 rcu_dereference_protected(*miniqp->p_miniq, 1);
1547 struct mini_Qdisc *miniq;
1548
1549 if (!tp_head) {
1550 RCU_INIT_POINTER(*miniqp->p_miniq, NULL);
1551 } else {
1552 miniq = miniq_old != &miniqp->miniq1 ?
1553 &miniqp->miniq1 : &miniqp->miniq2;
1554
1555
1556
1557
1558
1559
1560 if (IS_ENABLED(CONFIG_PREEMPT_RT))
1561 cond_synchronize_rcu(miniq->rcu_state);
1562 else if (!poll_state_synchronize_rcu(miniq->rcu_state))
1563 synchronize_rcu_expedited();
1564
1565 miniq->filter_list = tp_head;
1566 rcu_assign_pointer(*miniqp->p_miniq, miniq);
1567 }
1568
1569 if (miniq_old)
1570
1571
1572
1573
1574 miniq_old->rcu_state = start_poll_synchronize_rcu();
1575 }
1576 EXPORT_SYMBOL(mini_qdisc_pair_swap);
1577
1578 void mini_qdisc_pair_block_init(struct mini_Qdisc_pair *miniqp,
1579 struct tcf_block *block)
1580 {
1581 miniqp->miniq1.block = block;
1582 miniqp->miniq2.block = block;
1583 }
1584 EXPORT_SYMBOL(mini_qdisc_pair_block_init);
1585
1586 void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
1587 struct mini_Qdisc __rcu **p_miniq)
1588 {
1589 miniqp->miniq1.cpu_bstats = qdisc->cpu_bstats;
1590 miniqp->miniq1.cpu_qstats = qdisc->cpu_qstats;
1591 miniqp->miniq2.cpu_bstats = qdisc->cpu_bstats;
1592 miniqp->miniq2.cpu_qstats = qdisc->cpu_qstats;
1593 miniqp->miniq1.rcu_state = get_state_synchronize_rcu();
1594 miniqp->miniq2.rcu_state = miniqp->miniq1.rcu_state;
1595 miniqp->p_miniq = p_miniq;
1596 }
1597 EXPORT_SYMBOL(mini_qdisc_pair_init);