0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039
0040
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077
0078
0079
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090
0091
0092
0093
0094
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112
0113
0114 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0115
0116 #include <linux/compat.h>
0117 #include <linux/capability.h>
0118 #include <linux/module.h>
0119 #include <linux/kernel.h>
0120 #include <linux/init.h>
0121 #include <linux/slab.h>
0122 #include <linux/timer.h>
0123 #include <linux/netdevice.h>
0124 #include <net/net_namespace.h>
0125
0126 #include <linux/if.h>
0127 #include <linux/if_arp.h>
0128 #include <linux/if_eql.h>
0129 #include <linux/pkt_sched.h>
0130
0131 #include <linux/uaccess.h>
0132
0133 static int eql_open(struct net_device *dev);
0134 static int eql_close(struct net_device *dev);
0135 static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
0136 void __user *data, int cmd);
0137 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
0138
0139 #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
0140 #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER)
0141
0142 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
0143
0144 static void eql_timer(struct timer_list *t)
0145 {
0146 equalizer_t *eql = from_timer(eql, t, timer);
0147 struct list_head *this, *tmp, *head;
0148
0149 spin_lock(&eql->queue.lock);
0150 head = &eql->queue.all_slaves;
0151 list_for_each_safe(this, tmp, head) {
0152 slave_t *slave = list_entry(this, slave_t, list);
0153
0154 if ((slave->dev->flags & IFF_UP) == IFF_UP) {
0155 slave->bytes_queued -= slave->priority_Bps;
0156 if (slave->bytes_queued < 0)
0157 slave->bytes_queued = 0;
0158 } else {
0159 eql_kill_one_slave(&eql->queue, slave);
0160 }
0161
0162 }
0163 spin_unlock(&eql->queue.lock);
0164
0165 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
0166 add_timer(&eql->timer);
0167 }
0168
0169 static const char version[] __initconst =
0170 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
0171
0172 static const struct net_device_ops eql_netdev_ops = {
0173 .ndo_open = eql_open,
0174 .ndo_stop = eql_close,
0175 .ndo_siocdevprivate = eql_siocdevprivate,
0176 .ndo_start_xmit = eql_slave_xmit,
0177 };
0178
0179 static void __init eql_setup(struct net_device *dev)
0180 {
0181 equalizer_t *eql = netdev_priv(dev);
0182
0183 timer_setup(&eql->timer, eql_timer, 0);
0184 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
0185
0186 spin_lock_init(&eql->queue.lock);
0187 INIT_LIST_HEAD(&eql->queue.all_slaves);
0188 eql->queue.master_dev = dev;
0189
0190 dev->netdev_ops = &eql_netdev_ops;
0191
0192
0193
0194
0195
0196
0197 dev->mtu = EQL_DEFAULT_MTU;
0198 dev->flags = IFF_MASTER;
0199
0200 dev->type = ARPHRD_SLIP;
0201 dev->tx_queue_len = 5;
0202 netif_keep_dst(dev);
0203 }
0204
0205 static int eql_open(struct net_device *dev)
0206 {
0207 equalizer_t *eql = netdev_priv(dev);
0208
0209
0210 netdev_info(dev,
0211 "remember to turn off Van-Jacobson compression on your slave devices\n");
0212
0213 BUG_ON(!list_empty(&eql->queue.all_slaves));
0214
0215 eql->min_slaves = 1;
0216 eql->max_slaves = EQL_DEFAULT_MAX_SLAVES;
0217
0218 add_timer(&eql->timer);
0219
0220 return 0;
0221 }
0222
0223 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
0224 {
0225 list_del(&slave->list);
0226 queue->num_slaves--;
0227 slave->dev->flags &= ~IFF_SLAVE;
0228 netdev_put(slave->dev, &slave->dev_tracker);
0229 kfree(slave);
0230 }
0231
0232 static void eql_kill_slave_queue(slave_queue_t *queue)
0233 {
0234 struct list_head *head, *tmp, *this;
0235
0236 spin_lock_bh(&queue->lock);
0237
0238 head = &queue->all_slaves;
0239 list_for_each_safe(this, tmp, head) {
0240 slave_t *s = list_entry(this, slave_t, list);
0241
0242 eql_kill_one_slave(queue, s);
0243 }
0244
0245 spin_unlock_bh(&queue->lock);
0246 }
0247
0248 static int eql_close(struct net_device *dev)
0249 {
0250 equalizer_t *eql = netdev_priv(dev);
0251
0252
0253
0254
0255
0256
0257 del_timer_sync(&eql->timer);
0258
0259 eql_kill_slave_queue(&eql->queue);
0260
0261 return 0;
0262 }
0263
0264 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
0265 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
0266
0267 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
0268 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
0269
0270 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
0271 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
0272
0273 static int eql_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
0274 void __user *data, int cmd)
0275 {
0276 if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG &&
0277 !capable(CAP_NET_ADMIN))
0278 return -EPERM;
0279
0280 if (in_compat_syscall())
0281 return -EOPNOTSUPP;
0282
0283 switch (cmd) {
0284 case EQL_ENSLAVE:
0285 return eql_enslave(dev, data);
0286 case EQL_EMANCIPATE:
0287 return eql_emancipate(dev, data);
0288 case EQL_GETSLAVECFG:
0289 return eql_g_slave_cfg(dev, data);
0290 case EQL_SETSLAVECFG:
0291 return eql_s_slave_cfg(dev, data);
0292 case EQL_GETMASTRCFG:
0293 return eql_g_master_cfg(dev, data);
0294 case EQL_SETMASTRCFG:
0295 return eql_s_master_cfg(dev, data);
0296 default:
0297 return -EOPNOTSUPP;
0298 }
0299 }
0300
0301
0302 static slave_t *__eql_schedule_slaves(slave_queue_t *queue)
0303 {
0304 unsigned long best_load = ~0UL;
0305 struct list_head *this, *tmp, *head;
0306 slave_t *best_slave;
0307
0308 best_slave = NULL;
0309
0310
0311 head = &queue->all_slaves;
0312 list_for_each_safe(this, tmp, head) {
0313 slave_t *slave = list_entry(this, slave_t, list);
0314 unsigned long slave_load, bytes_queued, priority_Bps;
0315
0316
0317
0318
0319 bytes_queued = slave->bytes_queued;
0320 priority_Bps = slave->priority_Bps;
0321 if ((slave->dev->flags & IFF_UP) == IFF_UP) {
0322 slave_load = (~0UL - (~0UL / 2)) -
0323 (priority_Bps) + bytes_queued * 8;
0324
0325 if (slave_load < best_load) {
0326 best_load = slave_load;
0327 best_slave = slave;
0328 }
0329 } else {
0330
0331 eql_kill_one_slave(queue, slave);
0332 }
0333 }
0334 return best_slave;
0335 }
0336
0337 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev)
0338 {
0339 equalizer_t *eql = netdev_priv(dev);
0340 slave_t *slave;
0341
0342 spin_lock(&eql->queue.lock);
0343
0344 slave = __eql_schedule_slaves(&eql->queue);
0345 if (slave) {
0346 struct net_device *slave_dev = slave->dev;
0347
0348 skb->dev = slave_dev;
0349 skb->priority = TC_PRIO_FILLER;
0350 slave->bytes_queued += skb->len;
0351 dev_queue_xmit(skb);
0352 dev->stats.tx_packets++;
0353 } else {
0354 dev->stats.tx_dropped++;
0355 dev_kfree_skb(skb);
0356 }
0357
0358 spin_unlock(&eql->queue.lock);
0359
0360 return NETDEV_TX_OK;
0361 }
0362
0363
0364
0365
0366
0367
0368 static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev)
0369 {
0370 struct list_head *this, *head;
0371
0372 head = &queue->all_slaves;
0373 list_for_each(this, head) {
0374 slave_t *slave = list_entry(this, slave_t, list);
0375
0376 if (slave->dev == dev)
0377 return slave;
0378 }
0379
0380 return NULL;
0381 }
0382
0383 static inline int eql_is_full(slave_queue_t *queue)
0384 {
0385 equalizer_t *eql = netdev_priv(queue->master_dev);
0386
0387 if (queue->num_slaves >= eql->max_slaves)
0388 return 1;
0389 return 0;
0390 }
0391
0392
0393 static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
0394 {
0395 if (!eql_is_full(queue)) {
0396 slave_t *duplicate_slave = NULL;
0397
0398 duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
0399 if (duplicate_slave)
0400 eql_kill_one_slave(queue, duplicate_slave);
0401
0402 netdev_hold(slave->dev, &slave->dev_tracker, GFP_ATOMIC);
0403 list_add(&slave->list, &queue->all_slaves);
0404 queue->num_slaves++;
0405 slave->dev->flags |= IFF_SLAVE;
0406
0407 return 0;
0408 }
0409
0410 return -ENOSPC;
0411 }
0412
0413 static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp)
0414 {
0415 struct net_device *slave_dev;
0416 slaving_request_t srq;
0417
0418 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
0419 return -EFAULT;
0420
0421 slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
0422 if (!slave_dev)
0423 return -ENODEV;
0424
0425 if ((master_dev->flags & IFF_UP) == IFF_UP) {
0426
0427 if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
0428 slave_t *s = kzalloc(sizeof(*s), GFP_KERNEL);
0429 equalizer_t *eql = netdev_priv(master_dev);
0430 int ret;
0431
0432 if (!s)
0433 return -ENOMEM;
0434
0435 s->dev = slave_dev;
0436 s->priority = srq.priority;
0437 s->priority_bps = srq.priority;
0438 s->priority_Bps = srq.priority / 8;
0439
0440 spin_lock_bh(&eql->queue.lock);
0441 ret = __eql_insert_slave(&eql->queue, s);
0442 if (ret)
0443 kfree(s);
0444
0445 spin_unlock_bh(&eql->queue.lock);
0446
0447 return ret;
0448 }
0449 }
0450
0451 return -EINVAL;
0452 }
0453
0454 static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp)
0455 {
0456 equalizer_t *eql = netdev_priv(master_dev);
0457 struct net_device *slave_dev;
0458 slaving_request_t srq;
0459 int ret;
0460
0461 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
0462 return -EFAULT;
0463
0464 slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
0465 if (!slave_dev)
0466 return -ENODEV;
0467
0468 ret = -EINVAL;
0469 spin_lock_bh(&eql->queue.lock);
0470 if (eql_is_slave(slave_dev)) {
0471 slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev);
0472 if (slave) {
0473 eql_kill_one_slave(&eql->queue, slave);
0474 ret = 0;
0475 }
0476 }
0477 spin_unlock_bh(&eql->queue.lock);
0478
0479 return ret;
0480 }
0481
0482 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
0483 {
0484 equalizer_t *eql = netdev_priv(dev);
0485 slave_t *slave;
0486 struct net_device *slave_dev;
0487 slave_config_t sc;
0488 int ret;
0489
0490 if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
0491 return -EFAULT;
0492
0493 slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
0494 if (!slave_dev)
0495 return -ENODEV;
0496
0497 ret = -EINVAL;
0498
0499 spin_lock_bh(&eql->queue.lock);
0500 if (eql_is_slave(slave_dev)) {
0501 slave = __eql_find_slave_dev(&eql->queue, slave_dev);
0502 if (slave) {
0503 sc.priority = slave->priority;
0504 ret = 0;
0505 }
0506 }
0507 spin_unlock_bh(&eql->queue.lock);
0508
0509 if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
0510 ret = -EFAULT;
0511
0512 return ret;
0513 }
0514
0515 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
0516 {
0517 slave_t *slave;
0518 equalizer_t *eql;
0519 struct net_device *slave_dev;
0520 slave_config_t sc;
0521 int ret;
0522
0523 if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
0524 return -EFAULT;
0525
0526 slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
0527 if (!slave_dev)
0528 return -ENODEV;
0529
0530 ret = -EINVAL;
0531
0532 eql = netdev_priv(dev);
0533 spin_lock_bh(&eql->queue.lock);
0534 if (eql_is_slave(slave_dev)) {
0535 slave = __eql_find_slave_dev(&eql->queue, slave_dev);
0536 if (slave) {
0537 slave->priority = sc.priority;
0538 slave->priority_bps = sc.priority;
0539 slave->priority_Bps = sc.priority / 8;
0540 ret = 0;
0541 }
0542 }
0543 spin_unlock_bh(&eql->queue.lock);
0544
0545 return ret;
0546 }
0547
0548 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
0549 {
0550 equalizer_t *eql;
0551 master_config_t mc;
0552
0553 memset(&mc, 0, sizeof(master_config_t));
0554
0555 if (eql_is_master(dev)) {
0556 eql = netdev_priv(dev);
0557 mc.max_slaves = eql->max_slaves;
0558 mc.min_slaves = eql->min_slaves;
0559 if (copy_to_user(mcp, &mc, sizeof (master_config_t)))
0560 return -EFAULT;
0561 return 0;
0562 }
0563 return -EINVAL;
0564 }
0565
0566 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp)
0567 {
0568 equalizer_t *eql;
0569 master_config_t mc;
0570
0571 if (copy_from_user(&mc, mcp, sizeof (master_config_t)))
0572 return -EFAULT;
0573
0574 if (eql_is_master(dev)) {
0575 eql = netdev_priv(dev);
0576 eql->max_slaves = mc.max_slaves;
0577 eql->min_slaves = mc.min_slaves;
0578 return 0;
0579 }
0580 return -EINVAL;
0581 }
0582
0583 static struct net_device *dev_eql;
0584
0585 static int __init eql_init_module(void)
0586 {
0587 int err;
0588
0589 pr_info("%s\n", version);
0590
0591 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", NET_NAME_UNKNOWN,
0592 eql_setup);
0593 if (!dev_eql)
0594 return -ENOMEM;
0595
0596 err = register_netdev(dev_eql);
0597 if (err)
0598 free_netdev(dev_eql);
0599 return err;
0600 }
0601
0602 static void __exit eql_cleanup_module(void)
0603 {
0604 unregister_netdev(dev_eql);
0605 free_netdev(dev_eql);
0606 }
0607
0608 module_init(eql_init_module);
0609 module_exit(eql_cleanup_module);
0610 MODULE_LICENSE("GPL");