0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0014
0015 #include <linux/moduleparam.h>
0016 #include <linux/kernel.h>
0017 #include <linux/netdevice.h>
0018 #include <linux/etherdevice.h>
0019 #include <linux/string.h>
0020 #include <linux/if_arp.h>
0021 #include <linux/inetdevice.h>
0022 #include <linux/inet.h>
0023 #include <linux/interrupt.h>
0024 #include <linux/netpoll.h>
0025 #include <linux/sched.h>
0026 #include <linux/delay.h>
0027 #include <linux/rcupdate.h>
0028 #include <linux/workqueue.h>
0029 #include <linux/slab.h>
0030 #include <linux/export.h>
0031 #include <linux/if_vlan.h>
0032 #include <net/tcp.h>
0033 #include <net/udp.h>
0034 #include <net/addrconf.h>
0035 #include <net/ndisc.h>
0036 #include <net/ip6_checksum.h>
0037 #include <asm/unaligned.h>
0038 #include <trace/events/napi.h>
0039 #include <linux/kconfig.h>
0040
0041
0042
0043
0044
0045
0046 #define MAX_UDP_CHUNK 1460
0047 #define MAX_SKBS 32
0048
0049 static struct sk_buff_head skb_pool;
0050
0051 DEFINE_STATIC_SRCU(netpoll_srcu);
0052
0053 #define USEC_PER_POLL 50
0054
0055 #define MAX_SKB_SIZE \
0056 (sizeof(struct ethhdr) + \
0057 sizeof(struct iphdr) + \
0058 sizeof(struct udphdr) + \
0059 MAX_UDP_CHUNK)
0060
0061 static void zap_completion_queue(void);
0062
0063 static unsigned int carrier_timeout = 4;
0064 module_param(carrier_timeout, uint, 0644);
0065
0066 #define np_info(np, fmt, ...) \
0067 pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
0068 #define np_err(np, fmt, ...) \
0069 pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
0070 #define np_notice(np, fmt, ...) \
0071 pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
0072
0073 static netdev_tx_t netpoll_start_xmit(struct sk_buff *skb,
0074 struct net_device *dev,
0075 struct netdev_queue *txq)
0076 {
0077 netdev_tx_t status = NETDEV_TX_OK;
0078 netdev_features_t features;
0079
0080 features = netif_skb_features(skb);
0081
0082 if (skb_vlan_tag_present(skb) &&
0083 !vlan_hw_offload_capable(features, skb->vlan_proto)) {
0084 skb = __vlan_hwaccel_push_inside(skb);
0085 if (unlikely(!skb)) {
0086
0087
0088
0089
0090 goto out;
0091 }
0092 }
0093
0094 status = netdev_start_xmit(skb, dev, txq, false);
0095
0096 out:
0097 return status;
0098 }
0099
0100 static void queue_process(struct work_struct *work)
0101 {
0102 struct netpoll_info *npinfo =
0103 container_of(work, struct netpoll_info, tx_work.work);
0104 struct sk_buff *skb;
0105 unsigned long flags;
0106
0107 while ((skb = skb_dequeue(&npinfo->txq))) {
0108 struct net_device *dev = skb->dev;
0109 struct netdev_queue *txq;
0110 unsigned int q_index;
0111
0112 if (!netif_device_present(dev) || !netif_running(dev)) {
0113 kfree_skb(skb);
0114 continue;
0115 }
0116
0117 local_irq_save(flags);
0118
0119 q_index = skb_get_queue_mapping(skb);
0120 if (unlikely(q_index >= dev->real_num_tx_queues)) {
0121 q_index = q_index % dev->real_num_tx_queues;
0122 skb_set_queue_mapping(skb, q_index);
0123 }
0124 txq = netdev_get_tx_queue(dev, q_index);
0125 HARD_TX_LOCK(dev, txq, smp_processor_id());
0126 if (netif_xmit_frozen_or_stopped(txq) ||
0127 !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) {
0128 skb_queue_head(&npinfo->txq, skb);
0129 HARD_TX_UNLOCK(dev, txq);
0130 local_irq_restore(flags);
0131
0132 schedule_delayed_work(&npinfo->tx_work, HZ/10);
0133 return;
0134 }
0135 HARD_TX_UNLOCK(dev, txq);
0136 local_irq_restore(flags);
0137 }
0138 }
0139
0140 static void poll_one_napi(struct napi_struct *napi)
0141 {
0142 int work;
0143
0144
0145
0146
0147
0148 if (test_and_set_bit(NAPI_STATE_NPSVC, &napi->state))
0149 return;
0150
0151
0152
0153
0154 work = napi->poll(napi, 0);
0155 WARN_ONCE(work, "%pS exceeded budget in poll\n", napi->poll);
0156 trace_napi_poll(napi, work, 0);
0157
0158 clear_bit(NAPI_STATE_NPSVC, &napi->state);
0159 }
0160
0161 static void poll_napi(struct net_device *dev)
0162 {
0163 struct napi_struct *napi;
0164 int cpu = smp_processor_id();
0165
0166 list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
0167 if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) {
0168 poll_one_napi(napi);
0169 smp_store_release(&napi->poll_owner, -1);
0170 }
0171 }
0172 }
0173
0174 void netpoll_poll_dev(struct net_device *dev)
0175 {
0176 struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
0177 const struct net_device_ops *ops;
0178
0179
0180
0181
0182
0183 if (!ni || down_trylock(&ni->dev_lock))
0184 return;
0185
0186 if (!netif_running(dev)) {
0187 up(&ni->dev_lock);
0188 return;
0189 }
0190
0191 ops = dev->netdev_ops;
0192 if (ops->ndo_poll_controller)
0193 ops->ndo_poll_controller(dev);
0194
0195 poll_napi(dev);
0196
0197 up(&ni->dev_lock);
0198
0199 zap_completion_queue();
0200 }
0201 EXPORT_SYMBOL(netpoll_poll_dev);
0202
0203 void netpoll_poll_disable(struct net_device *dev)
0204 {
0205 struct netpoll_info *ni;
0206 int idx;
0207 might_sleep();
0208 idx = srcu_read_lock(&netpoll_srcu);
0209 ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
0210 if (ni)
0211 down(&ni->dev_lock);
0212 srcu_read_unlock(&netpoll_srcu, idx);
0213 }
0214 EXPORT_SYMBOL(netpoll_poll_disable);
0215
0216 void netpoll_poll_enable(struct net_device *dev)
0217 {
0218 struct netpoll_info *ni;
0219 rcu_read_lock();
0220 ni = rcu_dereference(dev->npinfo);
0221 if (ni)
0222 up(&ni->dev_lock);
0223 rcu_read_unlock();
0224 }
0225 EXPORT_SYMBOL(netpoll_poll_enable);
0226
0227 static void refill_skbs(void)
0228 {
0229 struct sk_buff *skb;
0230 unsigned long flags;
0231
0232 spin_lock_irqsave(&skb_pool.lock, flags);
0233 while (skb_pool.qlen < MAX_SKBS) {
0234 skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
0235 if (!skb)
0236 break;
0237
0238 __skb_queue_tail(&skb_pool, skb);
0239 }
0240 spin_unlock_irqrestore(&skb_pool.lock, flags);
0241 }
0242
0243 static void zap_completion_queue(void)
0244 {
0245 unsigned long flags;
0246 struct softnet_data *sd = &get_cpu_var(softnet_data);
0247
0248 if (sd->completion_queue) {
0249 struct sk_buff *clist;
0250
0251 local_irq_save(flags);
0252 clist = sd->completion_queue;
0253 sd->completion_queue = NULL;
0254 local_irq_restore(flags);
0255
0256 while (clist != NULL) {
0257 struct sk_buff *skb = clist;
0258 clist = clist->next;
0259 if (!skb_irq_freeable(skb)) {
0260 refcount_set(&skb->users, 1);
0261 dev_kfree_skb_any(skb);
0262 } else {
0263 __kfree_skb(skb);
0264 }
0265 }
0266 }
0267
0268 put_cpu_var(softnet_data);
0269 }
0270
0271 static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
0272 {
0273 int count = 0;
0274 struct sk_buff *skb;
0275
0276 zap_completion_queue();
0277 refill_skbs();
0278 repeat:
0279
0280 skb = alloc_skb(len, GFP_ATOMIC);
0281 if (!skb)
0282 skb = skb_dequeue(&skb_pool);
0283
0284 if (!skb) {
0285 if (++count < 10) {
0286 netpoll_poll_dev(np->dev);
0287 goto repeat;
0288 }
0289 return NULL;
0290 }
0291
0292 refcount_set(&skb->users, 1);
0293 skb_reserve(skb, reserve);
0294 return skb;
0295 }
0296
0297 static int netpoll_owner_active(struct net_device *dev)
0298 {
0299 struct napi_struct *napi;
0300
0301 list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
0302 if (napi->poll_owner == smp_processor_id())
0303 return 1;
0304 }
0305 return 0;
0306 }
0307
0308
0309 static netdev_tx_t __netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
0310 {
0311 netdev_tx_t status = NETDEV_TX_BUSY;
0312 struct net_device *dev;
0313 unsigned long tries;
0314
0315 struct netpoll_info *npinfo;
0316
0317 lockdep_assert_irqs_disabled();
0318
0319 dev = np->dev;
0320 npinfo = rcu_dereference_bh(dev->npinfo);
0321
0322 if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
0323 dev_kfree_skb_irq(skb);
0324 return NET_XMIT_DROP;
0325 }
0326
0327
0328 if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
0329 struct netdev_queue *txq;
0330
0331 txq = netdev_core_pick_tx(dev, skb, NULL);
0332
0333
0334 for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
0335 tries > 0; --tries) {
0336 if (HARD_TX_TRYLOCK(dev, txq)) {
0337 if (!netif_xmit_stopped(txq))
0338 status = netpoll_start_xmit(skb, dev, txq);
0339
0340 HARD_TX_UNLOCK(dev, txq);
0341
0342 if (dev_xmit_complete(status))
0343 break;
0344
0345 }
0346
0347
0348 netpoll_poll_dev(np->dev);
0349
0350 udelay(USEC_PER_POLL);
0351 }
0352
0353 WARN_ONCE(!irqs_disabled(),
0354 "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pS)\n",
0355 dev->name, dev->netdev_ops->ndo_start_xmit);
0356
0357 }
0358
0359 if (!dev_xmit_complete(status)) {
0360 skb_queue_tail(&npinfo->txq, skb);
0361 schedule_delayed_work(&npinfo->tx_work,0);
0362 }
0363 return NETDEV_TX_OK;
0364 }
0365
0366 netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
0367 {
0368 unsigned long flags;
0369 netdev_tx_t ret;
0370
0371 if (unlikely(!np)) {
0372 dev_kfree_skb_irq(skb);
0373 ret = NET_XMIT_DROP;
0374 } else {
0375 local_irq_save(flags);
0376 ret = __netpoll_send_skb(np, skb);
0377 local_irq_restore(flags);
0378 }
0379 return ret;
0380 }
0381 EXPORT_SYMBOL(netpoll_send_skb);
0382
0383 void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
0384 {
0385 int total_len, ip_len, udp_len;
0386 struct sk_buff *skb;
0387 struct udphdr *udph;
0388 struct iphdr *iph;
0389 struct ethhdr *eth;
0390 static atomic_t ip_ident;
0391 struct ipv6hdr *ip6h;
0392
0393 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
0394 WARN_ON_ONCE(!irqs_disabled());
0395
0396 udp_len = len + sizeof(*udph);
0397 if (np->ipv6)
0398 ip_len = udp_len + sizeof(*ip6h);
0399 else
0400 ip_len = udp_len + sizeof(*iph);
0401
0402 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
0403
0404 skb = find_skb(np, total_len + np->dev->needed_tailroom,
0405 total_len - len);
0406 if (!skb)
0407 return;
0408
0409 skb_copy_to_linear_data(skb, msg, len);
0410 skb_put(skb, len);
0411
0412 skb_push(skb, sizeof(*udph));
0413 skb_reset_transport_header(skb);
0414 udph = udp_hdr(skb);
0415 udph->source = htons(np->local_port);
0416 udph->dest = htons(np->remote_port);
0417 udph->len = htons(udp_len);
0418
0419 if (np->ipv6) {
0420 udph->check = 0;
0421 udph->check = csum_ipv6_magic(&np->local_ip.in6,
0422 &np->remote_ip.in6,
0423 udp_len, IPPROTO_UDP,
0424 csum_partial(udph, udp_len, 0));
0425 if (udph->check == 0)
0426 udph->check = CSUM_MANGLED_0;
0427
0428 skb_push(skb, sizeof(*ip6h));
0429 skb_reset_network_header(skb);
0430 ip6h = ipv6_hdr(skb);
0431
0432
0433 *(unsigned char *)ip6h = 0x60;
0434 ip6h->flow_lbl[0] = 0;
0435 ip6h->flow_lbl[1] = 0;
0436 ip6h->flow_lbl[2] = 0;
0437
0438 ip6h->payload_len = htons(sizeof(struct udphdr) + len);
0439 ip6h->nexthdr = IPPROTO_UDP;
0440 ip6h->hop_limit = 32;
0441 ip6h->saddr = np->local_ip.in6;
0442 ip6h->daddr = np->remote_ip.in6;
0443
0444 eth = skb_push(skb, ETH_HLEN);
0445 skb_reset_mac_header(skb);
0446 skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
0447 } else {
0448 udph->check = 0;
0449 udph->check = csum_tcpudp_magic(np->local_ip.ip,
0450 np->remote_ip.ip,
0451 udp_len, IPPROTO_UDP,
0452 csum_partial(udph, udp_len, 0));
0453 if (udph->check == 0)
0454 udph->check = CSUM_MANGLED_0;
0455
0456 skb_push(skb, sizeof(*iph));
0457 skb_reset_network_header(skb);
0458 iph = ip_hdr(skb);
0459
0460
0461 *(unsigned char *)iph = 0x45;
0462 iph->tos = 0;
0463 put_unaligned(htons(ip_len), &(iph->tot_len));
0464 iph->id = htons(atomic_inc_return(&ip_ident));
0465 iph->frag_off = 0;
0466 iph->ttl = 64;
0467 iph->protocol = IPPROTO_UDP;
0468 iph->check = 0;
0469 put_unaligned(np->local_ip.ip, &(iph->saddr));
0470 put_unaligned(np->remote_ip.ip, &(iph->daddr));
0471 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
0472
0473 eth = skb_push(skb, ETH_HLEN);
0474 skb_reset_mac_header(skb);
0475 skb->protocol = eth->h_proto = htons(ETH_P_IP);
0476 }
0477
0478 ether_addr_copy(eth->h_source, np->dev->dev_addr);
0479 ether_addr_copy(eth->h_dest, np->remote_mac);
0480
0481 skb->dev = np->dev;
0482
0483 netpoll_send_skb(np, skb);
0484 }
0485 EXPORT_SYMBOL(netpoll_send_udp);
0486
0487 void netpoll_print_options(struct netpoll *np)
0488 {
0489 np_info(np, "local port %d\n", np->local_port);
0490 if (np->ipv6)
0491 np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
0492 else
0493 np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
0494 np_info(np, "interface '%s'\n", np->dev_name);
0495 np_info(np, "remote port %d\n", np->remote_port);
0496 if (np->ipv6)
0497 np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
0498 else
0499 np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
0500 np_info(np, "remote ethernet address %pM\n", np->remote_mac);
0501 }
0502 EXPORT_SYMBOL(netpoll_print_options);
0503
0504 static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
0505 {
0506 const char *end;
0507
0508 if (!strchr(str, ':') &&
0509 in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
0510 if (!*end)
0511 return 0;
0512 }
0513 if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
0514 #if IS_ENABLED(CONFIG_IPV6)
0515 if (!*end)
0516 return 1;
0517 #else
0518 return -1;
0519 #endif
0520 }
0521 return -1;
0522 }
0523
0524 int netpoll_parse_options(struct netpoll *np, char *opt)
0525 {
0526 char *cur=opt, *delim;
0527 int ipv6;
0528 bool ipversion_set = false;
0529
0530 if (*cur != '@') {
0531 if ((delim = strchr(cur, '@')) == NULL)
0532 goto parse_failed;
0533 *delim = 0;
0534 if (kstrtou16(cur, 10, &np->local_port))
0535 goto parse_failed;
0536 cur = delim;
0537 }
0538 cur++;
0539
0540 if (*cur != '/') {
0541 ipversion_set = true;
0542 if ((delim = strchr(cur, '/')) == NULL)
0543 goto parse_failed;
0544 *delim = 0;
0545 ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
0546 if (ipv6 < 0)
0547 goto parse_failed;
0548 else
0549 np->ipv6 = (bool)ipv6;
0550 cur = delim;
0551 }
0552 cur++;
0553
0554 if (*cur != ',') {
0555
0556 if ((delim = strchr(cur, ',')) == NULL)
0557 goto parse_failed;
0558 *delim = 0;
0559 strlcpy(np->dev_name, cur, sizeof(np->dev_name));
0560 cur = delim;
0561 }
0562 cur++;
0563
0564 if (*cur != '@') {
0565
0566 if ((delim = strchr(cur, '@')) == NULL)
0567 goto parse_failed;
0568 *delim = 0;
0569 if (*cur == ' ' || *cur == '\t')
0570 np_info(np, "warning: whitespace is not allowed\n");
0571 if (kstrtou16(cur, 10, &np->remote_port))
0572 goto parse_failed;
0573 cur = delim;
0574 }
0575 cur++;
0576
0577
0578 if ((delim = strchr(cur, '/')) == NULL)
0579 goto parse_failed;
0580 *delim = 0;
0581 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
0582 if (ipv6 < 0)
0583 goto parse_failed;
0584 else if (ipversion_set && np->ipv6 != (bool)ipv6)
0585 goto parse_failed;
0586 else
0587 np->ipv6 = (bool)ipv6;
0588 cur = delim + 1;
0589
0590 if (*cur != 0) {
0591
0592 if (!mac_pton(cur, np->remote_mac))
0593 goto parse_failed;
0594 }
0595
0596 netpoll_print_options(np);
0597
0598 return 0;
0599
0600 parse_failed:
0601 np_info(np, "couldn't parse config at '%s'!\n", cur);
0602 return -1;
0603 }
0604 EXPORT_SYMBOL(netpoll_parse_options);
0605
0606 int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
0607 {
0608 struct netpoll_info *npinfo;
0609 const struct net_device_ops *ops;
0610 int err;
0611
0612 np->dev = ndev;
0613 strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
0614
0615 if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
0616 np_err(np, "%s doesn't support polling, aborting\n",
0617 np->dev_name);
0618 err = -ENOTSUPP;
0619 goto out;
0620 }
0621
0622 if (!ndev->npinfo) {
0623 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
0624 if (!npinfo) {
0625 err = -ENOMEM;
0626 goto out;
0627 }
0628
0629 sema_init(&npinfo->dev_lock, 1);
0630 skb_queue_head_init(&npinfo->txq);
0631 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
0632
0633 refcount_set(&npinfo->refcnt, 1);
0634
0635 ops = np->dev->netdev_ops;
0636 if (ops->ndo_netpoll_setup) {
0637 err = ops->ndo_netpoll_setup(ndev, npinfo);
0638 if (err)
0639 goto free_npinfo;
0640 }
0641 } else {
0642 npinfo = rtnl_dereference(ndev->npinfo);
0643 refcount_inc(&npinfo->refcnt);
0644 }
0645
0646 npinfo->netpoll = np;
0647
0648
0649 rcu_assign_pointer(ndev->npinfo, npinfo);
0650
0651 return 0;
0652
0653 free_npinfo:
0654 kfree(npinfo);
0655 out:
0656 return err;
0657 }
0658 EXPORT_SYMBOL_GPL(__netpoll_setup);
0659
0660 int netpoll_setup(struct netpoll *np)
0661 {
0662 struct net_device *ndev = NULL;
0663 struct in_device *in_dev;
0664 int err;
0665
0666 rtnl_lock();
0667 if (np->dev_name[0]) {
0668 struct net *net = current->nsproxy->net_ns;
0669 ndev = __dev_get_by_name(net, np->dev_name);
0670 }
0671 if (!ndev) {
0672 np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
0673 err = -ENODEV;
0674 goto unlock;
0675 }
0676 dev_hold(ndev);
0677
0678 if (netdev_master_upper_dev_get(ndev)) {
0679 np_err(np, "%s is a slave device, aborting\n", np->dev_name);
0680 err = -EBUSY;
0681 goto put;
0682 }
0683
0684 if (!netif_running(ndev)) {
0685 unsigned long atmost, atleast;
0686
0687 np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
0688
0689 err = dev_open(ndev, NULL);
0690
0691 if (err) {
0692 np_err(np, "failed to open %s\n", ndev->name);
0693 goto put;
0694 }
0695
0696 rtnl_unlock();
0697 atleast = jiffies + HZ/10;
0698 atmost = jiffies + carrier_timeout * HZ;
0699 while (!netif_carrier_ok(ndev)) {
0700 if (time_after(jiffies, atmost)) {
0701 np_notice(np, "timeout waiting for carrier\n");
0702 break;
0703 }
0704 msleep(1);
0705 }
0706
0707
0708
0709
0710
0711
0712 if (time_before(jiffies, atleast)) {
0713 np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
0714 msleep(4000);
0715 }
0716 rtnl_lock();
0717 }
0718
0719 if (!np->local_ip.ip) {
0720 if (!np->ipv6) {
0721 const struct in_ifaddr *ifa;
0722
0723 in_dev = __in_dev_get_rtnl(ndev);
0724 if (!in_dev)
0725 goto put_noaddr;
0726
0727 ifa = rtnl_dereference(in_dev->ifa_list);
0728 if (!ifa) {
0729 put_noaddr:
0730 np_err(np, "no IP address for %s, aborting\n",
0731 np->dev_name);
0732 err = -EDESTADDRREQ;
0733 goto put;
0734 }
0735
0736 np->local_ip.ip = ifa->ifa_local;
0737 np_info(np, "local IP %pI4\n", &np->local_ip.ip);
0738 } else {
0739 #if IS_ENABLED(CONFIG_IPV6)
0740 struct inet6_dev *idev;
0741
0742 err = -EDESTADDRREQ;
0743 idev = __in6_dev_get(ndev);
0744 if (idev) {
0745 struct inet6_ifaddr *ifp;
0746
0747 read_lock_bh(&idev->lock);
0748 list_for_each_entry(ifp, &idev->addr_list, if_list) {
0749 if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
0750 !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
0751 continue;
0752 np->local_ip.in6 = ifp->addr;
0753 err = 0;
0754 break;
0755 }
0756 read_unlock_bh(&idev->lock);
0757 }
0758 if (err) {
0759 np_err(np, "no IPv6 address for %s, aborting\n",
0760 np->dev_name);
0761 goto put;
0762 } else
0763 np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
0764 #else
0765 np_err(np, "IPv6 is not supported %s, aborting\n",
0766 np->dev_name);
0767 err = -EINVAL;
0768 goto put;
0769 #endif
0770 }
0771 }
0772
0773
0774 refill_skbs();
0775
0776 err = __netpoll_setup(np, ndev);
0777 if (err)
0778 goto put;
0779 netdev_tracker_alloc(ndev, &np->dev_tracker, GFP_KERNEL);
0780 rtnl_unlock();
0781 return 0;
0782
0783 put:
0784 dev_put(ndev);
0785 unlock:
0786 rtnl_unlock();
0787 return err;
0788 }
0789 EXPORT_SYMBOL(netpoll_setup);
0790
0791 static int __init netpoll_init(void)
0792 {
0793 skb_queue_head_init(&skb_pool);
0794 return 0;
0795 }
0796 core_initcall(netpoll_init);
0797
0798 static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
0799 {
0800 struct netpoll_info *npinfo =
0801 container_of(rcu_head, struct netpoll_info, rcu);
0802
0803 skb_queue_purge(&npinfo->txq);
0804
0805
0806 cancel_delayed_work(&npinfo->tx_work);
0807
0808
0809 __skb_queue_purge(&npinfo->txq);
0810
0811 cancel_delayed_work(&npinfo->tx_work);
0812 kfree(npinfo);
0813 }
0814
0815 void __netpoll_cleanup(struct netpoll *np)
0816 {
0817 struct netpoll_info *npinfo;
0818
0819 npinfo = rtnl_dereference(np->dev->npinfo);
0820 if (!npinfo)
0821 return;
0822
0823 synchronize_srcu(&netpoll_srcu);
0824
0825 if (refcount_dec_and_test(&npinfo->refcnt)) {
0826 const struct net_device_ops *ops;
0827
0828 ops = np->dev->netdev_ops;
0829 if (ops->ndo_netpoll_cleanup)
0830 ops->ndo_netpoll_cleanup(np->dev);
0831
0832 RCU_INIT_POINTER(np->dev->npinfo, NULL);
0833 call_rcu(&npinfo->rcu, rcu_cleanup_netpoll_info);
0834 } else
0835 RCU_INIT_POINTER(np->dev->npinfo, NULL);
0836 }
0837 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
0838
0839 void __netpoll_free(struct netpoll *np)
0840 {
0841 ASSERT_RTNL();
0842
0843
0844 synchronize_rcu();
0845 __netpoll_cleanup(np);
0846 kfree(np);
0847 }
0848 EXPORT_SYMBOL_GPL(__netpoll_free);
0849
0850 void netpoll_cleanup(struct netpoll *np)
0851 {
0852 rtnl_lock();
0853 if (!np->dev)
0854 goto out;
0855 __netpoll_cleanup(np);
0856 netdev_put(np->dev, &np->dev_tracker);
0857 np->dev = NULL;
0858 out:
0859 rtnl_unlock();
0860 }
0861 EXPORT_SYMBOL(netpoll_cleanup);