0001
0002 #include <linux/skbuff.h>
0003 #include <linux/slab.h>
0004 #include <linux/netdevice.h>
0005 #include <net/gro_cells.h>
0006
0007 struct gro_cell {
0008 struct sk_buff_head napi_skbs;
0009 struct napi_struct napi;
0010 };
0011
0012 int gro_cells_receive(struct gro_cells *gcells, struct sk_buff *skb)
0013 {
0014 struct net_device *dev = skb->dev;
0015 struct gro_cell *cell;
0016 int res;
0017
0018 rcu_read_lock();
0019 if (unlikely(!(dev->flags & IFF_UP)))
0020 goto drop;
0021
0022 if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
0023 res = netif_rx(skb);
0024 goto unlock;
0025 }
0026
0027 cell = this_cpu_ptr(gcells->cells);
0028
0029 if (skb_queue_len(&cell->napi_skbs) > READ_ONCE(netdev_max_backlog)) {
0030 drop:
0031 dev_core_stats_rx_dropped_inc(dev);
0032 kfree_skb(skb);
0033 res = NET_RX_DROP;
0034 goto unlock;
0035 }
0036
0037 __skb_queue_tail(&cell->napi_skbs, skb);
0038 if (skb_queue_len(&cell->napi_skbs) == 1)
0039 napi_schedule(&cell->napi);
0040
0041 res = NET_RX_SUCCESS;
0042
0043 unlock:
0044 rcu_read_unlock();
0045 return res;
0046 }
0047 EXPORT_SYMBOL(gro_cells_receive);
0048
0049
0050 static int gro_cell_poll(struct napi_struct *napi, int budget)
0051 {
0052 struct gro_cell *cell = container_of(napi, struct gro_cell, napi);
0053 struct sk_buff *skb;
0054 int work_done = 0;
0055
0056 while (work_done < budget) {
0057 skb = __skb_dequeue(&cell->napi_skbs);
0058 if (!skb)
0059 break;
0060 napi_gro_receive(napi, skb);
0061 work_done++;
0062 }
0063
0064 if (work_done < budget)
0065 napi_complete_done(napi, work_done);
0066 return work_done;
0067 }
0068
0069 int gro_cells_init(struct gro_cells *gcells, struct net_device *dev)
0070 {
0071 int i;
0072
0073 gcells->cells = alloc_percpu(struct gro_cell);
0074 if (!gcells->cells)
0075 return -ENOMEM;
0076
0077 for_each_possible_cpu(i) {
0078 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
0079
0080 __skb_queue_head_init(&cell->napi_skbs);
0081
0082 set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
0083
0084 netif_napi_add(dev, &cell->napi, gro_cell_poll,
0085 NAPI_POLL_WEIGHT);
0086 napi_enable(&cell->napi);
0087 }
0088 return 0;
0089 }
0090 EXPORT_SYMBOL(gro_cells_init);
0091
0092 struct percpu_free_defer {
0093 struct rcu_head rcu;
0094 void __percpu *ptr;
0095 };
0096
0097 static void percpu_free_defer_callback(struct rcu_head *head)
0098 {
0099 struct percpu_free_defer *defer;
0100
0101 defer = container_of(head, struct percpu_free_defer, rcu);
0102 free_percpu(defer->ptr);
0103 kfree(defer);
0104 }
0105
0106 void gro_cells_destroy(struct gro_cells *gcells)
0107 {
0108 struct percpu_free_defer *defer;
0109 int i;
0110
0111 if (!gcells->cells)
0112 return;
0113 for_each_possible_cpu(i) {
0114 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
0115
0116 napi_disable(&cell->napi);
0117 __netif_napi_del(&cell->napi);
0118 __skb_queue_purge(&cell->napi_skbs);
0119 }
0120
0121
0122
0123
0124
0125
0126 defer = kmalloc(sizeof(*defer), GFP_KERNEL | __GFP_NOWARN);
0127 if (likely(defer)) {
0128 defer->ptr = gcells->cells;
0129 call_rcu(&defer->rcu, percpu_free_defer_callback);
0130 } else {
0131
0132
0133
0134 synchronize_rcu_expedited();
0135 free_percpu(gcells->cells);
0136 }
0137 gcells->cells = NULL;
0138 }
0139 EXPORT_SYMBOL(gro_cells_destroy);