Back to home page

OSCL-LXR

 
 

    


0001 /*
0002  * Copyright (c) 2016 Citrix Systems Inc.
0003  * Copyright (c) 2002-2005, K A Fraser
0004  *
0005  * This program is free software; you can redistribute it and/or
0006  * modify it under the terms of the GNU General Public License version 2
0007  * as published by the Free Software Foundation; or, when distributed
0008  * separately from the Linux kernel or incorporated into other
0009  * software packages, subject to the following license:
0010  *
0011  * Permission is hereby granted, free of charge, to any person obtaining a copy
0012  * of this source file (the "Software"), to deal in the Software without
0013  * restriction, including without limitation the rights to use, copy, modify,
0014  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
0015  * and to permit persons to whom the Software is furnished to do so, subject to
0016  * the following conditions:
0017  *
0018  * The above copyright notice and this permission notice shall be included in
0019  * all copies or substantial portions of the Software.
0020  *
0021  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
0022  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
0023  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
0024  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
0025  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
0026  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
0027  * IN THE SOFTWARE.
0028  */
0029 #include "common.h"
0030 
0031 #include <linux/kthread.h>
0032 
0033 #include <xen/xen.h>
0034 #include <xen/events.h>
0035 
0036 /*
0037  * Update the needed ring page slots for the first SKB queued.
0038  * Note that any call sequence outside the RX thread calling this function
0039  * needs to wake up the RX thread via a call of xenvif_kick_thread()
0040  * afterwards in order to avoid a race with putting the thread to sleep.
0041  */
0042 static void xenvif_update_needed_slots(struct xenvif_queue *queue,
0043                        const struct sk_buff *skb)
0044 {
0045     unsigned int needed = 0;
0046 
0047     if (skb) {
0048         needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
0049         if (skb_is_gso(skb))
0050             needed++;
0051         if (skb->sw_hash)
0052             needed++;
0053     }
0054 
0055     WRITE_ONCE(queue->rx_slots_needed, needed);
0056 }
0057 
0058 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
0059 {
0060     RING_IDX prod, cons;
0061     unsigned int needed;
0062 
0063     needed = READ_ONCE(queue->rx_slots_needed);
0064     if (!needed)
0065         return false;
0066 
0067     do {
0068         prod = queue->rx.sring->req_prod;
0069         cons = queue->rx.req_cons;
0070 
0071         if (prod - cons >= needed)
0072             return true;
0073 
0074         queue->rx.sring->req_event = prod + 1;
0075 
0076         /* Make sure event is visible before we check prod
0077          * again.
0078          */
0079         mb();
0080     } while (queue->rx.sring->req_prod != prod);
0081 
0082     return false;
0083 }
0084 
0085 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
0086 {
0087     unsigned long flags;
0088 
0089     spin_lock_irqsave(&queue->rx_queue.lock, flags);
0090 
0091     if (queue->rx_queue_len >= queue->rx_queue_max) {
0092         struct net_device *dev = queue->vif->dev;
0093 
0094         netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
0095         kfree_skb(skb);
0096         queue->vif->dev->stats.rx_dropped++;
0097     } else {
0098         if (skb_queue_empty(&queue->rx_queue))
0099             xenvif_update_needed_slots(queue, skb);
0100 
0101         __skb_queue_tail(&queue->rx_queue, skb);
0102 
0103         queue->rx_queue_len += skb->len;
0104     }
0105 
0106     spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
0107 }
0108 
0109 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
0110 {
0111     struct sk_buff *skb;
0112 
0113     spin_lock_irq(&queue->rx_queue.lock);
0114 
0115     skb = __skb_dequeue(&queue->rx_queue);
0116     if (skb) {
0117         xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
0118 
0119         queue->rx_queue_len -= skb->len;
0120         if (queue->rx_queue_len < queue->rx_queue_max) {
0121             struct netdev_queue *txq;
0122 
0123             txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
0124             netif_tx_wake_queue(txq);
0125         }
0126     }
0127 
0128     spin_unlock_irq(&queue->rx_queue.lock);
0129 
0130     return skb;
0131 }
0132 
0133 static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
0134 {
0135     struct sk_buff *skb;
0136 
0137     while ((skb = xenvif_rx_dequeue(queue)) != NULL)
0138         kfree_skb(skb);
0139 }
0140 
0141 static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
0142 {
0143     struct sk_buff *skb;
0144 
0145     for (;;) {
0146         skb = skb_peek(&queue->rx_queue);
0147         if (!skb)
0148             break;
0149         if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
0150             break;
0151         xenvif_rx_dequeue(queue);
0152         kfree_skb(skb);
0153         queue->vif->dev->stats.rx_dropped++;
0154     }
0155 }
0156 
0157 static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
0158 {
0159     unsigned int i;
0160     int notify;
0161 
0162     gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
0163 
0164     for (i = 0; i < queue->rx_copy.num; i++) {
0165         struct gnttab_copy *op;
0166 
0167         op = &queue->rx_copy.op[i];
0168 
0169         /* If the copy failed, overwrite the status field in
0170          * the corresponding response.
0171          */
0172         if (unlikely(op->status != GNTST_okay)) {
0173             struct xen_netif_rx_response *rsp;
0174 
0175             rsp = RING_GET_RESPONSE(&queue->rx,
0176                         queue->rx_copy.idx[i]);
0177             rsp->status = op->status;
0178         }
0179     }
0180 
0181     queue->rx_copy.num = 0;
0182 
0183     /* Push responses for all completed packets. */
0184     RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
0185     if (notify)
0186         notify_remote_via_irq(queue->rx_irq);
0187 
0188     __skb_queue_purge(queue->rx_copy.completed);
0189 }
0190 
0191 static void xenvif_rx_copy_add(struct xenvif_queue *queue,
0192                    struct xen_netif_rx_request *req,
0193                    unsigned int offset, void *data, size_t len)
0194 {
0195     struct gnttab_copy *op;
0196     struct page *page;
0197     struct xen_page_foreign *foreign;
0198 
0199     if (queue->rx_copy.num == COPY_BATCH_SIZE)
0200         xenvif_rx_copy_flush(queue);
0201 
0202     op = &queue->rx_copy.op[queue->rx_copy.num];
0203 
0204     page = virt_to_page(data);
0205 
0206     op->flags = GNTCOPY_dest_gref;
0207 
0208     foreign = xen_page_foreign(page);
0209     if (foreign) {
0210         op->source.domid = foreign->domid;
0211         op->source.u.ref = foreign->gref;
0212         op->flags |= GNTCOPY_source_gref;
0213     } else {
0214         op->source.u.gmfn = virt_to_gfn(data);
0215         op->source.domid  = DOMID_SELF;
0216     }
0217 
0218     op->source.offset = xen_offset_in_page(data);
0219     op->dest.u.ref    = req->gref;
0220     op->dest.domid    = queue->vif->domid;
0221     op->dest.offset   = offset;
0222     op->len           = len;
0223 
0224     queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
0225     queue->rx_copy.num++;
0226 }
0227 
0228 static unsigned int xenvif_gso_type(struct sk_buff *skb)
0229 {
0230     if (skb_is_gso(skb)) {
0231         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
0232             return XEN_NETIF_GSO_TYPE_TCPV4;
0233         else
0234             return XEN_NETIF_GSO_TYPE_TCPV6;
0235     }
0236     return XEN_NETIF_GSO_TYPE_NONE;
0237 }
0238 
0239 struct xenvif_pkt_state {
0240     struct sk_buff *skb;
0241     size_t remaining_len;
0242     struct sk_buff *frag_iter;
0243     int frag; /* frag == -1 => frag_iter->head */
0244     unsigned int frag_offset;
0245     struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
0246     unsigned int extra_count;
0247     unsigned int slot;
0248 };
0249 
0250 static void xenvif_rx_next_skb(struct xenvif_queue *queue,
0251                    struct xenvif_pkt_state *pkt)
0252 {
0253     struct sk_buff *skb;
0254     unsigned int gso_type;
0255 
0256     skb = xenvif_rx_dequeue(queue);
0257 
0258     queue->stats.tx_bytes += skb->len;
0259     queue->stats.tx_packets++;
0260 
0261     /* Reset packet state. */
0262     memset(pkt, 0, sizeof(struct xenvif_pkt_state));
0263 
0264     pkt->skb = skb;
0265     pkt->frag_iter = skb;
0266     pkt->remaining_len = skb->len;
0267     pkt->frag = -1;
0268 
0269     gso_type = xenvif_gso_type(skb);
0270     if ((1 << gso_type) & queue->vif->gso_mask) {
0271         struct xen_netif_extra_info *extra;
0272 
0273         extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
0274 
0275         extra->u.gso.type = gso_type;
0276         extra->u.gso.size = skb_shinfo(skb)->gso_size;
0277         extra->u.gso.pad = 0;
0278         extra->u.gso.features = 0;
0279         extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
0280         extra->flags = 0;
0281 
0282         pkt->extra_count++;
0283     }
0284 
0285     if (queue->vif->xdp_headroom) {
0286         struct xen_netif_extra_info *extra;
0287 
0288         extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
0289 
0290         memset(extra, 0, sizeof(struct xen_netif_extra_info));
0291         extra->u.xdp.headroom = queue->vif->xdp_headroom;
0292         extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
0293         extra->flags = 0;
0294 
0295         pkt->extra_count++;
0296     }
0297 
0298     if (skb->sw_hash) {
0299         struct xen_netif_extra_info *extra;
0300 
0301         extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
0302 
0303         extra->u.hash.algorithm =
0304             XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
0305 
0306         if (skb->l4_hash)
0307             extra->u.hash.type =
0308                 skb->protocol == htons(ETH_P_IP) ?
0309                 _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
0310                 _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
0311         else
0312             extra->u.hash.type =
0313                 skb->protocol == htons(ETH_P_IP) ?
0314                 _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
0315                 _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
0316 
0317         *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
0318 
0319         extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
0320         extra->flags = 0;
0321 
0322         pkt->extra_count++;
0323     }
0324 }
0325 
0326 static void xenvif_rx_complete(struct xenvif_queue *queue,
0327                    struct xenvif_pkt_state *pkt)
0328 {
0329     /* All responses are ready to be pushed. */
0330     queue->rx.rsp_prod_pvt = queue->rx.req_cons;
0331 
0332     __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
0333 }
0334 
0335 static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
0336 {
0337     struct sk_buff *frag_iter = pkt->frag_iter;
0338     unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
0339 
0340     pkt->frag++;
0341     pkt->frag_offset = 0;
0342 
0343     if (pkt->frag >= nr_frags) {
0344         if (frag_iter == pkt->skb)
0345             pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
0346         else
0347             pkt->frag_iter = frag_iter->next;
0348 
0349         pkt->frag = -1;
0350     }
0351 }
0352 
0353 static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
0354                  struct xenvif_pkt_state *pkt,
0355                  unsigned int offset, void **data,
0356                  size_t *len)
0357 {
0358     struct sk_buff *frag_iter = pkt->frag_iter;
0359     void *frag_data;
0360     size_t frag_len, chunk_len;
0361 
0362     BUG_ON(!frag_iter);
0363 
0364     if (pkt->frag == -1) {
0365         frag_data = frag_iter->data;
0366         frag_len = skb_headlen(frag_iter);
0367     } else {
0368         skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
0369 
0370         frag_data = skb_frag_address(frag);
0371         frag_len = skb_frag_size(frag);
0372     }
0373 
0374     frag_data += pkt->frag_offset;
0375     frag_len -= pkt->frag_offset;
0376 
0377     chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
0378     chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
0379                          xen_offset_in_page(frag_data));
0380 
0381     pkt->frag_offset += chunk_len;
0382 
0383     /* Advance to next frag? */
0384     if (frag_len == chunk_len)
0385         xenvif_rx_next_frag(pkt);
0386 
0387     *data = frag_data;
0388     *len = chunk_len;
0389 }
0390 
0391 static void xenvif_rx_data_slot(struct xenvif_queue *queue,
0392                 struct xenvif_pkt_state *pkt,
0393                 struct xen_netif_rx_request *req,
0394                 struct xen_netif_rx_response *rsp)
0395 {
0396     unsigned int offset = queue->vif->xdp_headroom;
0397     unsigned int flags;
0398 
0399     do {
0400         size_t len;
0401         void *data;
0402 
0403         xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
0404         xenvif_rx_copy_add(queue, req, offset, data, len);
0405 
0406         offset += len;
0407         pkt->remaining_len -= len;
0408 
0409     } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
0410 
0411     if (pkt->remaining_len > 0)
0412         flags = XEN_NETRXF_more_data;
0413     else
0414         flags = 0;
0415 
0416     if (pkt->slot == 0) {
0417         struct sk_buff *skb = pkt->skb;
0418 
0419         if (skb->ip_summed == CHECKSUM_PARTIAL)
0420             flags |= XEN_NETRXF_csum_blank |
0421                  XEN_NETRXF_data_validated;
0422         else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
0423             flags |= XEN_NETRXF_data_validated;
0424 
0425         if (pkt->extra_count != 0)
0426             flags |= XEN_NETRXF_extra_info;
0427     }
0428 
0429     rsp->offset = 0;
0430     rsp->flags = flags;
0431     rsp->id = req->id;
0432     rsp->status = (s16)offset;
0433 }
0434 
0435 static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
0436                  struct xenvif_pkt_state *pkt,
0437                  struct xen_netif_rx_request *req,
0438                  struct xen_netif_rx_response *rsp)
0439 {
0440     struct xen_netif_extra_info *extra = (void *)rsp;
0441     unsigned int i;
0442 
0443     pkt->extra_count--;
0444 
0445     for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
0446         if (pkt->extras[i].type) {
0447             *extra = pkt->extras[i];
0448 
0449             if (pkt->extra_count != 0)
0450                 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
0451 
0452             pkt->extras[i].type = 0;
0453             return;
0454         }
0455     }
0456     BUG();
0457 }
0458 
0459 static void xenvif_rx_skb(struct xenvif_queue *queue)
0460 {
0461     struct xenvif_pkt_state pkt;
0462 
0463     xenvif_rx_next_skb(queue, &pkt);
0464 
0465     queue->last_rx_time = jiffies;
0466 
0467     do {
0468         struct xen_netif_rx_request *req;
0469         struct xen_netif_rx_response *rsp;
0470 
0471         req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
0472         rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
0473 
0474         /* Extras must go after the first data slot */
0475         if (pkt.slot != 0 && pkt.extra_count != 0)
0476             xenvif_rx_extra_slot(queue, &pkt, req, rsp);
0477         else
0478             xenvif_rx_data_slot(queue, &pkt, req, rsp);
0479 
0480         queue->rx.req_cons++;
0481         pkt.slot++;
0482     } while (pkt.remaining_len > 0 || pkt.extra_count != 0);
0483 
0484     xenvif_rx_complete(queue, &pkt);
0485 }
0486 
0487 #define RX_BATCH_SIZE 64
0488 
0489 static void xenvif_rx_action(struct xenvif_queue *queue)
0490 {
0491     struct sk_buff_head completed_skbs;
0492     unsigned int work_done = 0;
0493 
0494     __skb_queue_head_init(&completed_skbs);
0495     queue->rx_copy.completed = &completed_skbs;
0496 
0497     while (xenvif_rx_ring_slots_available(queue) &&
0498            !skb_queue_empty(&queue->rx_queue) &&
0499            work_done < RX_BATCH_SIZE) {
0500         xenvif_rx_skb(queue);
0501         work_done++;
0502     }
0503 
0504     /* Flush any pending copies and complete all skbs. */
0505     xenvif_rx_copy_flush(queue);
0506 }
0507 
0508 static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
0509 {
0510     RING_IDX prod, cons;
0511 
0512     prod = queue->rx.sring->req_prod;
0513     cons = queue->rx.req_cons;
0514 
0515     return prod - cons;
0516 }
0517 
0518 static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
0519 {
0520     unsigned int needed = READ_ONCE(queue->rx_slots_needed);
0521 
0522     return !queue->stalled &&
0523         xenvif_rx_queue_slots(queue) < needed &&
0524         time_after(jiffies,
0525                queue->last_rx_time + queue->vif->stall_timeout);
0526 }
0527 
0528 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
0529 {
0530     unsigned int needed = READ_ONCE(queue->rx_slots_needed);
0531 
0532     return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
0533 }
0534 
0535 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
0536 {
0537     return xenvif_rx_ring_slots_available(queue) ||
0538         (queue->vif->stall_timeout &&
0539          (xenvif_rx_queue_stalled(queue) ||
0540           xenvif_rx_queue_ready(queue))) ||
0541         (test_kthread && kthread_should_stop()) ||
0542         queue->vif->disabled;
0543 }
0544 
0545 static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
0546 {
0547     struct sk_buff *skb;
0548     long timeout;
0549 
0550     skb = skb_peek(&queue->rx_queue);
0551     if (!skb)
0552         return MAX_SCHEDULE_TIMEOUT;
0553 
0554     timeout = XENVIF_RX_CB(skb)->expires - jiffies;
0555     return timeout < 0 ? 0 : timeout;
0556 }
0557 
0558 /* Wait until the guest Rx thread has work.
0559  *
0560  * The timeout needs to be adjusted based on the current head of the
0561  * queue (and not just the head at the beginning).  In particular, if
0562  * the queue is initially empty an infinite timeout is used and this
0563  * needs to be reduced when a skb is queued.
0564  *
0565  * This cannot be done with wait_event_timeout() because it only
0566  * calculates the timeout once.
0567  */
0568 static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
0569 {
0570     DEFINE_WAIT(wait);
0571 
0572     if (xenvif_have_rx_work(queue, true))
0573         return;
0574 
0575     for (;;) {
0576         long ret;
0577 
0578         prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
0579         if (xenvif_have_rx_work(queue, true))
0580             break;
0581         if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
0582                     &queue->eoi_pending) &
0583             (NETBK_RX_EOI | NETBK_COMMON_EOI))
0584             xen_irq_lateeoi(queue->rx_irq, 0);
0585 
0586         ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
0587         if (!ret)
0588             break;
0589     }
0590     finish_wait(&queue->wq, &wait);
0591 }
0592 
0593 static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
0594 {
0595     struct xenvif *vif = queue->vif;
0596 
0597     queue->stalled = true;
0598 
0599     /* At least one queue has stalled? Disable the carrier. */
0600     spin_lock(&vif->lock);
0601     if (vif->stalled_queues++ == 0) {
0602         netdev_info(vif->dev, "Guest Rx stalled");
0603         netif_carrier_off(vif->dev);
0604     }
0605     spin_unlock(&vif->lock);
0606 }
0607 
0608 static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
0609 {
0610     struct xenvif *vif = queue->vif;
0611 
0612     queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
0613     queue->stalled = false;
0614 
0615     /* All queues are ready? Enable the carrier. */
0616     spin_lock(&vif->lock);
0617     if (--vif->stalled_queues == 0) {
0618         netdev_info(vif->dev, "Guest Rx ready");
0619         netif_carrier_on(vif->dev);
0620     }
0621     spin_unlock(&vif->lock);
0622 }
0623 
0624 int xenvif_kthread_guest_rx(void *data)
0625 {
0626     struct xenvif_queue *queue = data;
0627     struct xenvif *vif = queue->vif;
0628 
0629     if (!vif->stall_timeout)
0630         xenvif_queue_carrier_on(queue);
0631 
0632     for (;;) {
0633         xenvif_wait_for_rx_work(queue);
0634 
0635         if (kthread_should_stop())
0636             break;
0637 
0638         /* This frontend is found to be rogue, disable it in
0639          * kthread context. Currently this is only set when
0640          * netback finds out frontend sends malformed packet,
0641          * but we cannot disable the interface in softirq
0642          * context so we defer it here, if this thread is
0643          * associated with queue 0.
0644          */
0645         if (unlikely(vif->disabled && queue->id == 0)) {
0646             xenvif_carrier_off(vif);
0647             break;
0648         }
0649 
0650         if (!skb_queue_empty(&queue->rx_queue))
0651             xenvif_rx_action(queue);
0652 
0653         /* If the guest hasn't provided any Rx slots for a
0654          * while it's probably not responsive, drop the
0655          * carrier so packets are dropped earlier.
0656          */
0657         if (vif->stall_timeout) {
0658             if (xenvif_rx_queue_stalled(queue))
0659                 xenvif_queue_carrier_off(queue);
0660             else if (xenvif_rx_queue_ready(queue))
0661                 xenvif_queue_carrier_on(queue);
0662         }
0663 
0664         /* Queued packets may have foreign pages from other
0665          * domains.  These cannot be queued indefinitely as
0666          * this would starve guests of grant refs and transmit
0667          * slots.
0668          */
0669         xenvif_rx_queue_drop_expired(queue);
0670 
0671         cond_resched();
0672     }
0673 
0674     /* Bin any remaining skbs */
0675     xenvif_rx_queue_purge(queue);
0676 
0677     return 0;
0678 }