0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031 #include "common.h"
0032
0033 #include <linux/kthread.h>
0034 #include <linux/sched/task.h>
0035 #include <linux/ethtool.h>
0036 #include <linux/rtnetlink.h>
0037 #include <linux/if_vlan.h>
0038 #include <linux/vmalloc.h>
0039
0040 #include <xen/events.h>
0041 #include <asm/xen/hypercall.h>
0042 #include <xen/balloon.h>
0043
0044 #define XENVIF_QUEUE_LENGTH 32
0045
0046
0047 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
0048
0049
0050
0051
0052
0053
0054 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
0055 struct sk_buff *skb)
0056 {
0057 skb_shinfo(skb)->flags |= SKBFL_ZEROCOPY_ENABLE;
0058 atomic_inc(&queue->inflight_packets);
0059 }
0060
0061 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
0062 {
0063 atomic_dec(&queue->inflight_packets);
0064
0065
0066
0067
0068
0069 wake_up(&queue->dealloc_wq);
0070 }
0071
0072 static int xenvif_schedulable(struct xenvif *vif)
0073 {
0074 return netif_running(vif->dev) &&
0075 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
0076 !vif->disabled;
0077 }
0078
0079 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
0080 {
0081 bool rc;
0082
0083 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
0084 if (rc)
0085 napi_schedule(&queue->napi);
0086 return rc;
0087 }
0088
0089 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
0090 {
0091 struct xenvif_queue *queue = dev_id;
0092 int old;
0093
0094 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
0095 WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
0096
0097 if (!xenvif_handle_tx_interrupt(queue)) {
0098 atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
0099 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
0100 }
0101
0102 return IRQ_HANDLED;
0103 }
0104
0105 static int xenvif_poll(struct napi_struct *napi, int budget)
0106 {
0107 struct xenvif_queue *queue =
0108 container_of(napi, struct xenvif_queue, napi);
0109 int work_done;
0110
0111
0112
0113
0114
0115 if (unlikely(queue->vif->disabled)) {
0116 napi_complete(napi);
0117 return 0;
0118 }
0119
0120 work_done = xenvif_tx_action(queue, budget);
0121
0122 if (work_done < budget) {
0123 napi_complete_done(napi, work_done);
0124
0125
0126
0127 if (likely(!queue->rate_limited))
0128 xenvif_napi_schedule_or_enable_events(queue);
0129 }
0130
0131 return work_done;
0132 }
0133
0134 static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
0135 {
0136 bool rc;
0137
0138 rc = xenvif_have_rx_work(queue, false);
0139 if (rc)
0140 xenvif_kick_thread(queue);
0141 return rc;
0142 }
0143
0144 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
0145 {
0146 struct xenvif_queue *queue = dev_id;
0147 int old;
0148
0149 old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
0150 WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
0151
0152 if (!xenvif_handle_rx_interrupt(queue)) {
0153 atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
0154 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
0155 }
0156
0157 return IRQ_HANDLED;
0158 }
0159
0160 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
0161 {
0162 struct xenvif_queue *queue = dev_id;
0163 int old;
0164 bool has_rx, has_tx;
0165
0166 old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
0167 WARN(old, "Interrupt while EOI pending\n");
0168
0169 has_tx = xenvif_handle_tx_interrupt(queue);
0170 has_rx = xenvif_handle_rx_interrupt(queue);
0171
0172 if (!has_rx && !has_tx) {
0173 atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
0174 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
0175 }
0176
0177 return IRQ_HANDLED;
0178 }
0179
0180 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
0181 struct net_device *sb_dev)
0182 {
0183 struct xenvif *vif = netdev_priv(dev);
0184 unsigned int size = vif->hash.size;
0185 unsigned int num_queues;
0186
0187
0188
0189 num_queues = READ_ONCE(vif->num_queues);
0190 if (num_queues < 1)
0191 return 0;
0192
0193 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
0194 return netdev_pick_tx(dev, skb, NULL) %
0195 dev->real_num_tx_queues;
0196
0197 xenvif_set_skb_hash(vif, skb);
0198
0199 if (size == 0)
0200 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
0201
0202 return vif->hash.mapping[vif->hash.mapping_sel]
0203 [skb_get_hash_raw(skb) % size];
0204 }
0205
0206 static netdev_tx_t
0207 xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
0208 {
0209 struct xenvif *vif = netdev_priv(dev);
0210 struct xenvif_queue *queue = NULL;
0211 unsigned int num_queues;
0212 u16 index;
0213 struct xenvif_rx_cb *cb;
0214
0215 BUG_ON(skb->dev != dev);
0216
0217
0218
0219
0220
0221 num_queues = READ_ONCE(vif->num_queues);
0222 if (num_queues < 1)
0223 goto drop;
0224
0225
0226 index = skb_get_queue_mapping(skb);
0227 if (index >= num_queues) {
0228 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
0229 index, vif->dev->name);
0230 index %= num_queues;
0231 }
0232 queue = &vif->queues[index];
0233
0234
0235 if (queue->task == NULL ||
0236 queue->dealloc_task == NULL ||
0237 !xenvif_schedulable(vif))
0238 goto drop;
0239
0240 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
0241 struct ethhdr *eth = (struct ethhdr *)skb->data;
0242
0243 if (!xenvif_mcast_match(vif, eth->h_dest))
0244 goto drop;
0245 }
0246
0247 cb = XENVIF_RX_CB(skb);
0248 cb->expires = jiffies + vif->drain_timeout;
0249
0250
0251
0252
0253
0254 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
0255 skb_clear_hash(skb);
0256
0257 xenvif_rx_queue_tail(queue, skb);
0258 xenvif_kick_thread(queue);
0259
0260 return NETDEV_TX_OK;
0261
0262 drop:
0263 vif->dev->stats.tx_dropped++;
0264 dev_kfree_skb(skb);
0265 return NETDEV_TX_OK;
0266 }
0267
0268 static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
0269 {
0270 struct xenvif *vif = netdev_priv(dev);
0271 struct xenvif_queue *queue = NULL;
0272 unsigned int num_queues;
0273 u64 rx_bytes = 0;
0274 u64 rx_packets = 0;
0275 u64 tx_bytes = 0;
0276 u64 tx_packets = 0;
0277 unsigned int index;
0278
0279 rcu_read_lock();
0280 num_queues = READ_ONCE(vif->num_queues);
0281
0282
0283 for (index = 0; index < num_queues; ++index) {
0284 queue = &vif->queues[index];
0285 rx_bytes += queue->stats.rx_bytes;
0286 rx_packets += queue->stats.rx_packets;
0287 tx_bytes += queue->stats.tx_bytes;
0288 tx_packets += queue->stats.tx_packets;
0289 }
0290
0291 rcu_read_unlock();
0292
0293 vif->dev->stats.rx_bytes = rx_bytes;
0294 vif->dev->stats.rx_packets = rx_packets;
0295 vif->dev->stats.tx_bytes = tx_bytes;
0296 vif->dev->stats.tx_packets = tx_packets;
0297
0298 return &vif->dev->stats;
0299 }
0300
0301 static void xenvif_up(struct xenvif *vif)
0302 {
0303 struct xenvif_queue *queue = NULL;
0304 unsigned int num_queues = vif->num_queues;
0305 unsigned int queue_index;
0306
0307 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
0308 queue = &vif->queues[queue_index];
0309 napi_enable(&queue->napi);
0310 enable_irq(queue->tx_irq);
0311 if (queue->tx_irq != queue->rx_irq)
0312 enable_irq(queue->rx_irq);
0313 xenvif_napi_schedule_or_enable_events(queue);
0314 }
0315 }
0316
0317 static void xenvif_down(struct xenvif *vif)
0318 {
0319 struct xenvif_queue *queue = NULL;
0320 unsigned int num_queues = vif->num_queues;
0321 unsigned int queue_index;
0322
0323 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
0324 queue = &vif->queues[queue_index];
0325 disable_irq(queue->tx_irq);
0326 if (queue->tx_irq != queue->rx_irq)
0327 disable_irq(queue->rx_irq);
0328 napi_disable(&queue->napi);
0329 del_timer_sync(&queue->credit_timeout);
0330 }
0331 }
0332
0333 static int xenvif_open(struct net_device *dev)
0334 {
0335 struct xenvif *vif = netdev_priv(dev);
0336 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
0337 xenvif_up(vif);
0338 netif_tx_start_all_queues(dev);
0339 return 0;
0340 }
0341
0342 static int xenvif_close(struct net_device *dev)
0343 {
0344 struct xenvif *vif = netdev_priv(dev);
0345 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
0346 xenvif_down(vif);
0347 netif_tx_stop_all_queues(dev);
0348 return 0;
0349 }
0350
0351 static int xenvif_change_mtu(struct net_device *dev, int mtu)
0352 {
0353 struct xenvif *vif = netdev_priv(dev);
0354 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
0355
0356 if (mtu > max)
0357 return -EINVAL;
0358 dev->mtu = mtu;
0359 return 0;
0360 }
0361
0362 static netdev_features_t xenvif_fix_features(struct net_device *dev,
0363 netdev_features_t features)
0364 {
0365 struct xenvif *vif = netdev_priv(dev);
0366
0367 if (!vif->can_sg)
0368 features &= ~NETIF_F_SG;
0369 if (~(vif->gso_mask) & GSO_BIT(TCPV4))
0370 features &= ~NETIF_F_TSO;
0371 if (~(vif->gso_mask) & GSO_BIT(TCPV6))
0372 features &= ~NETIF_F_TSO6;
0373 if (!vif->ip_csum)
0374 features &= ~NETIF_F_IP_CSUM;
0375 if (!vif->ipv6_csum)
0376 features &= ~NETIF_F_IPV6_CSUM;
0377
0378 return features;
0379 }
0380
0381 static const struct xenvif_stat {
0382 char name[ETH_GSTRING_LEN];
0383 u16 offset;
0384 } xenvif_stats[] = {
0385 {
0386 "rx_gso_checksum_fixup",
0387 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
0388 },
0389
0390
0391
0392 {
0393 "tx_zerocopy_sent",
0394 offsetof(struct xenvif_stats, tx_zerocopy_sent),
0395 },
0396 {
0397 "tx_zerocopy_success",
0398 offsetof(struct xenvif_stats, tx_zerocopy_success),
0399 },
0400 {
0401 "tx_zerocopy_fail",
0402 offsetof(struct xenvif_stats, tx_zerocopy_fail)
0403 },
0404
0405
0406
0407 {
0408 "tx_frag_overflow",
0409 offsetof(struct xenvif_stats, tx_frag_overflow)
0410 },
0411 };
0412
0413 static int xenvif_get_sset_count(struct net_device *dev, int string_set)
0414 {
0415 switch (string_set) {
0416 case ETH_SS_STATS:
0417 return ARRAY_SIZE(xenvif_stats);
0418 default:
0419 return -EINVAL;
0420 }
0421 }
0422
0423 static void xenvif_get_ethtool_stats(struct net_device *dev,
0424 struct ethtool_stats *stats, u64 * data)
0425 {
0426 struct xenvif *vif = netdev_priv(dev);
0427 unsigned int num_queues;
0428 int i;
0429 unsigned int queue_index;
0430
0431 rcu_read_lock();
0432 num_queues = READ_ONCE(vif->num_queues);
0433
0434 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
0435 unsigned long accum = 0;
0436 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
0437 void *vif_stats = &vif->queues[queue_index].stats;
0438 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
0439 }
0440 data[i] = accum;
0441 }
0442
0443 rcu_read_unlock();
0444 }
0445
0446 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
0447 {
0448 int i;
0449
0450 switch (stringset) {
0451 case ETH_SS_STATS:
0452 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
0453 memcpy(data + i * ETH_GSTRING_LEN,
0454 xenvif_stats[i].name, ETH_GSTRING_LEN);
0455 break;
0456 }
0457 }
0458
0459 static const struct ethtool_ops xenvif_ethtool_ops = {
0460 .get_link = ethtool_op_get_link,
0461
0462 .get_sset_count = xenvif_get_sset_count,
0463 .get_ethtool_stats = xenvif_get_ethtool_stats,
0464 .get_strings = xenvif_get_strings,
0465 };
0466
0467 static const struct net_device_ops xenvif_netdev_ops = {
0468 .ndo_select_queue = xenvif_select_queue,
0469 .ndo_start_xmit = xenvif_start_xmit,
0470 .ndo_get_stats = xenvif_get_stats,
0471 .ndo_open = xenvif_open,
0472 .ndo_stop = xenvif_close,
0473 .ndo_change_mtu = xenvif_change_mtu,
0474 .ndo_fix_features = xenvif_fix_features,
0475 .ndo_set_mac_address = eth_mac_addr,
0476 .ndo_validate_addr = eth_validate_addr,
0477 };
0478
0479 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
0480 unsigned int handle)
0481 {
0482 static const u8 dummy_addr[ETH_ALEN] = {
0483 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
0484 };
0485 int err;
0486 struct net_device *dev;
0487 struct xenvif *vif;
0488 char name[IFNAMSIZ] = {};
0489
0490 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
0491
0492
0493
0494
0495 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
0496 ether_setup, xenvif_max_queues);
0497 if (dev == NULL) {
0498 pr_warn("Could not allocate netdev for %s\n", name);
0499 return ERR_PTR(-ENOMEM);
0500 }
0501
0502 SET_NETDEV_DEV(dev, parent);
0503
0504 vif = netdev_priv(dev);
0505
0506 vif->domid = domid;
0507 vif->handle = handle;
0508 vif->can_sg = 1;
0509 vif->ip_csum = 1;
0510 vif->dev = dev;
0511 vif->disabled = false;
0512 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
0513 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
0514
0515
0516 vif->queues = NULL;
0517 vif->num_queues = 0;
0518
0519 vif->xdp_headroom = 0;
0520
0521 spin_lock_init(&vif->lock);
0522 INIT_LIST_HEAD(&vif->fe_mcast_addr);
0523
0524 dev->netdev_ops = &xenvif_netdev_ops;
0525 dev->hw_features = NETIF_F_SG |
0526 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
0527 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
0528 dev->features = dev->hw_features | NETIF_F_RXCSUM;
0529 dev->ethtool_ops = &xenvif_ethtool_ops;
0530
0531 dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
0532
0533 dev->min_mtu = ETH_MIN_MTU;
0534 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
0535
0536
0537
0538
0539
0540
0541
0542 eth_hw_addr_set(dev, dummy_addr);
0543
0544 netif_carrier_off(dev);
0545
0546 err = register_netdev(dev);
0547 if (err) {
0548 netdev_warn(dev, "Could not register device: err=%d\n", err);
0549 free_netdev(dev);
0550 return ERR_PTR(err);
0551 }
0552
0553 netdev_dbg(dev, "Successfully created xenvif\n");
0554
0555 __module_get(THIS_MODULE);
0556
0557 return vif;
0558 }
0559
0560 int xenvif_init_queue(struct xenvif_queue *queue)
0561 {
0562 int err, i;
0563
0564 queue->credit_bytes = queue->remaining_credit = ~0UL;
0565 queue->credit_usec = 0UL;
0566 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
0567 queue->credit_window_start = get_jiffies_64();
0568
0569 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
0570
0571 skb_queue_head_init(&queue->rx_queue);
0572 skb_queue_head_init(&queue->tx_queue);
0573
0574 queue->pending_cons = 0;
0575 queue->pending_prod = MAX_PENDING_REQS;
0576 for (i = 0; i < MAX_PENDING_REQS; ++i)
0577 queue->pending_ring[i] = i;
0578
0579 spin_lock_init(&queue->callback_lock);
0580 spin_lock_init(&queue->response_lock);
0581
0582
0583
0584
0585
0586 err = gnttab_alloc_pages(MAX_PENDING_REQS,
0587 queue->mmap_pages);
0588 if (err) {
0589 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
0590 return -ENOMEM;
0591 }
0592
0593 for (i = 0; i < MAX_PENDING_REQS; i++) {
0594 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
0595 { .callback = xenvif_zerocopy_callback,
0596 { { .ctx = NULL,
0597 .desc = i } } };
0598 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
0599 }
0600
0601 return 0;
0602 }
0603
0604 void xenvif_carrier_on(struct xenvif *vif)
0605 {
0606 rtnl_lock();
0607 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
0608 dev_set_mtu(vif->dev, ETH_DATA_LEN);
0609 netdev_update_features(vif->dev);
0610 set_bit(VIF_STATUS_CONNECTED, &vif->status);
0611 if (netif_running(vif->dev))
0612 xenvif_up(vif);
0613 rtnl_unlock();
0614 }
0615
0616 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
0617 unsigned int evtchn)
0618 {
0619 struct net_device *dev = vif->dev;
0620 struct xenbus_device *xendev = xenvif_to_xenbus_device(vif);
0621 void *addr;
0622 struct xen_netif_ctrl_sring *shared;
0623 RING_IDX rsp_prod, req_prod;
0624 int err;
0625
0626 err = xenbus_map_ring_valloc(xendev, &ring_ref, 1, &addr);
0627 if (err)
0628 goto err;
0629
0630 shared = (struct xen_netif_ctrl_sring *)addr;
0631 rsp_prod = READ_ONCE(shared->rsp_prod);
0632 req_prod = READ_ONCE(shared->req_prod);
0633
0634 BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
0635
0636 err = -EIO;
0637 if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
0638 goto err_unmap;
0639
0640 err = bind_interdomain_evtchn_to_irq_lateeoi(xendev, evtchn);
0641 if (err < 0)
0642 goto err_unmap;
0643
0644 vif->ctrl_irq = err;
0645
0646 xenvif_init_hash(vif);
0647
0648 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
0649 IRQF_ONESHOT, "xen-netback-ctrl", vif);
0650 if (err) {
0651 pr_warn("Could not setup irq handler for %s\n", dev->name);
0652 goto err_deinit;
0653 }
0654
0655 return 0;
0656
0657 err_deinit:
0658 xenvif_deinit_hash(vif);
0659 unbind_from_irqhandler(vif->ctrl_irq, vif);
0660 vif->ctrl_irq = 0;
0661
0662 err_unmap:
0663 xenbus_unmap_ring_vfree(xendev, vif->ctrl.sring);
0664 vif->ctrl.sring = NULL;
0665
0666 err:
0667 return err;
0668 }
0669
0670 static void xenvif_disconnect_queue(struct xenvif_queue *queue)
0671 {
0672 if (queue->task) {
0673 kthread_stop(queue->task);
0674 put_task_struct(queue->task);
0675 queue->task = NULL;
0676 }
0677
0678 if (queue->dealloc_task) {
0679 kthread_stop(queue->dealloc_task);
0680 queue->dealloc_task = NULL;
0681 }
0682
0683 if (queue->napi.poll) {
0684 netif_napi_del(&queue->napi);
0685 queue->napi.poll = NULL;
0686 }
0687
0688 if (queue->tx_irq) {
0689 unbind_from_irqhandler(queue->tx_irq, queue);
0690 if (queue->tx_irq == queue->rx_irq)
0691 queue->rx_irq = 0;
0692 queue->tx_irq = 0;
0693 }
0694
0695 if (queue->rx_irq) {
0696 unbind_from_irqhandler(queue->rx_irq, queue);
0697 queue->rx_irq = 0;
0698 }
0699
0700 xenvif_unmap_frontend_data_rings(queue);
0701 }
0702
0703 int xenvif_connect_data(struct xenvif_queue *queue,
0704 unsigned long tx_ring_ref,
0705 unsigned long rx_ring_ref,
0706 unsigned int tx_evtchn,
0707 unsigned int rx_evtchn)
0708 {
0709 struct xenbus_device *dev = xenvif_to_xenbus_device(queue->vif);
0710 struct task_struct *task;
0711 int err;
0712
0713 BUG_ON(queue->tx_irq);
0714 BUG_ON(queue->task);
0715 BUG_ON(queue->dealloc_task);
0716
0717 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
0718 rx_ring_ref);
0719 if (err < 0)
0720 goto err;
0721
0722 init_waitqueue_head(&queue->wq);
0723 init_waitqueue_head(&queue->dealloc_wq);
0724 atomic_set(&queue->inflight_packets, 0);
0725
0726 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
0727 NAPI_POLL_WEIGHT);
0728
0729 queue->stalled = true;
0730
0731 task = kthread_run(xenvif_kthread_guest_rx, queue,
0732 "%s-guest-rx", queue->name);
0733 if (IS_ERR(task))
0734 goto kthread_err;
0735 queue->task = task;
0736
0737
0738
0739
0740 get_task_struct(task);
0741
0742 task = kthread_run(xenvif_dealloc_kthread, queue,
0743 "%s-dealloc", queue->name);
0744 if (IS_ERR(task))
0745 goto kthread_err;
0746 queue->dealloc_task = task;
0747
0748 if (tx_evtchn == rx_evtchn) {
0749
0750 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
0751 dev, tx_evtchn, xenvif_interrupt, 0,
0752 queue->name, queue);
0753 if (err < 0)
0754 goto err;
0755 queue->tx_irq = queue->rx_irq = err;
0756 disable_irq(queue->tx_irq);
0757 } else {
0758
0759 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
0760 "%s-tx", queue->name);
0761 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
0762 dev, tx_evtchn, xenvif_tx_interrupt, 0,
0763 queue->tx_irq_name, queue);
0764 if (err < 0)
0765 goto err;
0766 queue->tx_irq = err;
0767 disable_irq(queue->tx_irq);
0768
0769 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
0770 "%s-rx", queue->name);
0771 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
0772 dev, rx_evtchn, xenvif_rx_interrupt, 0,
0773 queue->rx_irq_name, queue);
0774 if (err < 0)
0775 goto err;
0776 queue->rx_irq = err;
0777 disable_irq(queue->rx_irq);
0778 }
0779
0780 return 0;
0781
0782 kthread_err:
0783 pr_warn("Could not allocate kthread for %s\n", queue->name);
0784 err = PTR_ERR(task);
0785 err:
0786 xenvif_disconnect_queue(queue);
0787 return err;
0788 }
0789
0790 void xenvif_carrier_off(struct xenvif *vif)
0791 {
0792 struct net_device *dev = vif->dev;
0793
0794 rtnl_lock();
0795 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
0796 netif_carrier_off(dev);
0797 if (netif_running(dev))
0798 xenvif_down(vif);
0799 }
0800 rtnl_unlock();
0801 }
0802
0803 void xenvif_disconnect_data(struct xenvif *vif)
0804 {
0805 struct xenvif_queue *queue = NULL;
0806 unsigned int num_queues = vif->num_queues;
0807 unsigned int queue_index;
0808
0809 xenvif_carrier_off(vif);
0810
0811 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
0812 queue = &vif->queues[queue_index];
0813
0814 xenvif_disconnect_queue(queue);
0815 }
0816
0817 xenvif_mcast_addr_list_free(vif);
0818 }
0819
0820 void xenvif_disconnect_ctrl(struct xenvif *vif)
0821 {
0822 if (vif->ctrl_irq) {
0823 xenvif_deinit_hash(vif);
0824 unbind_from_irqhandler(vif->ctrl_irq, vif);
0825 vif->ctrl_irq = 0;
0826 }
0827
0828 if (vif->ctrl.sring) {
0829 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
0830 vif->ctrl.sring);
0831 vif->ctrl.sring = NULL;
0832 }
0833 }
0834
0835
0836
0837
0838
0839 void xenvif_deinit_queue(struct xenvif_queue *queue)
0840 {
0841 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
0842 }
0843
0844 void xenvif_free(struct xenvif *vif)
0845 {
0846 struct xenvif_queue *queues = vif->queues;
0847 unsigned int num_queues = vif->num_queues;
0848 unsigned int queue_index;
0849
0850 unregister_netdev(vif->dev);
0851 free_netdev(vif->dev);
0852
0853 for (queue_index = 0; queue_index < num_queues; ++queue_index)
0854 xenvif_deinit_queue(&queues[queue_index]);
0855 vfree(queues);
0856
0857 module_put(THIS_MODULE);
0858 }