Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/skbuff.h>
0003 #include <linux/netdevice.h>
0004 #include <linux/if_vlan.h>
0005 #include <linux/netpoll.h>
0006 #include <linux/export.h>
0007 #include <net/gro.h>
0008 #include "vlan.h"
0009 
0010 bool vlan_do_receive(struct sk_buff **skbp)
0011 {
0012     struct sk_buff *skb = *skbp;
0013     __be16 vlan_proto = skb->vlan_proto;
0014     u16 vlan_id = skb_vlan_tag_get_id(skb);
0015     struct net_device *vlan_dev;
0016     struct vlan_pcpu_stats *rx_stats;
0017 
0018     vlan_dev = vlan_find_dev(skb->dev, vlan_proto, vlan_id);
0019     if (!vlan_dev)
0020         return false;
0021 
0022     skb = *skbp = skb_share_check(skb, GFP_ATOMIC);
0023     if (unlikely(!skb))
0024         return false;
0025 
0026     if (unlikely(!(vlan_dev->flags & IFF_UP))) {
0027         kfree_skb(skb);
0028         *skbp = NULL;
0029         return false;
0030     }
0031 
0032     skb->dev = vlan_dev;
0033     if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) {
0034         /* Our lower layer thinks this is not local, let's make sure.
0035          * This allows the VLAN to have a different MAC than the
0036          * underlying device, and still route correctly. */
0037         if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, vlan_dev->dev_addr))
0038             skb->pkt_type = PACKET_HOST;
0039     }
0040 
0041     if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
0042         !netif_is_macvlan_port(vlan_dev) &&
0043         !netif_is_bridge_port(vlan_dev)) {
0044         unsigned int offset = skb->data - skb_mac_header(skb);
0045 
0046         /*
0047          * vlan_insert_tag expect skb->data pointing to mac header.
0048          * So change skb->data before calling it and change back to
0049          * original position later
0050          */
0051         skb_push(skb, offset);
0052         skb = *skbp = vlan_insert_inner_tag(skb, skb->vlan_proto,
0053                             skb->vlan_tci, skb->mac_len);
0054         if (!skb)
0055             return false;
0056         skb_pull(skb, offset + VLAN_HLEN);
0057         skb_reset_mac_len(skb);
0058     }
0059 
0060     skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
0061     __vlan_hwaccel_clear_tag(skb);
0062 
0063     rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats);
0064 
0065     u64_stats_update_begin(&rx_stats->syncp);
0066     u64_stats_inc(&rx_stats->rx_packets);
0067     u64_stats_add(&rx_stats->rx_bytes, skb->len);
0068     if (skb->pkt_type == PACKET_MULTICAST)
0069         u64_stats_inc(&rx_stats->rx_multicast);
0070     u64_stats_update_end(&rx_stats->syncp);
0071 
0072     return true;
0073 }
0074 
0075 /* Must be invoked with rcu_read_lock. */
0076 struct net_device *__vlan_find_dev_deep_rcu(struct net_device *dev,
0077                     __be16 vlan_proto, u16 vlan_id)
0078 {
0079     struct vlan_info *vlan_info = rcu_dereference(dev->vlan_info);
0080 
0081     if (vlan_info) {
0082         return vlan_group_get_device(&vlan_info->grp,
0083                          vlan_proto, vlan_id);
0084     } else {
0085         /*
0086          * Lower devices of master uppers (bonding, team) do not have
0087          * grp assigned to themselves. Grp is assigned to upper device
0088          * instead.
0089          */
0090         struct net_device *upper_dev;
0091 
0092         upper_dev = netdev_master_upper_dev_get_rcu(dev);
0093         if (upper_dev)
0094             return __vlan_find_dev_deep_rcu(upper_dev,
0095                             vlan_proto, vlan_id);
0096     }
0097 
0098     return NULL;
0099 }
0100 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
0101 
0102 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
0103 {
0104     struct net_device *ret = vlan_dev_priv(dev)->real_dev;
0105 
0106     while (is_vlan_dev(ret))
0107         ret = vlan_dev_priv(ret)->real_dev;
0108 
0109     return ret;
0110 }
0111 EXPORT_SYMBOL(vlan_dev_real_dev);
0112 
0113 u16 vlan_dev_vlan_id(const struct net_device *dev)
0114 {
0115     return vlan_dev_priv(dev)->vlan_id;
0116 }
0117 EXPORT_SYMBOL(vlan_dev_vlan_id);
0118 
0119 __be16 vlan_dev_vlan_proto(const struct net_device *dev)
0120 {
0121     return vlan_dev_priv(dev)->vlan_proto;
0122 }
0123 EXPORT_SYMBOL(vlan_dev_vlan_proto);
0124 
0125 /*
0126  * vlan info and vid list
0127  */
0128 
0129 static void vlan_group_free(struct vlan_group *grp)
0130 {
0131     int i, j;
0132 
0133     for (i = 0; i < VLAN_PROTO_NUM; i++)
0134         for (j = 0; j < VLAN_GROUP_ARRAY_SPLIT_PARTS; j++)
0135             kfree(grp->vlan_devices_arrays[i][j]);
0136 }
0137 
0138 static void vlan_info_free(struct vlan_info *vlan_info)
0139 {
0140     vlan_group_free(&vlan_info->grp);
0141     kfree(vlan_info);
0142 }
0143 
0144 static void vlan_info_rcu_free(struct rcu_head *rcu)
0145 {
0146     vlan_info_free(container_of(rcu, struct vlan_info, rcu));
0147 }
0148 
0149 static struct vlan_info *vlan_info_alloc(struct net_device *dev)
0150 {
0151     struct vlan_info *vlan_info;
0152 
0153     vlan_info = kzalloc(sizeof(struct vlan_info), GFP_KERNEL);
0154     if (!vlan_info)
0155         return NULL;
0156 
0157     vlan_info->real_dev = dev;
0158     INIT_LIST_HEAD(&vlan_info->vid_list);
0159     return vlan_info;
0160 }
0161 
0162 struct vlan_vid_info {
0163     struct list_head list;
0164     __be16 proto;
0165     u16 vid;
0166     int refcount;
0167 };
0168 
0169 static bool vlan_hw_filter_capable(const struct net_device *dev, __be16 proto)
0170 {
0171     if (proto == htons(ETH_P_8021Q) &&
0172         dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
0173         return true;
0174     if (proto == htons(ETH_P_8021AD) &&
0175         dev->features & NETIF_F_HW_VLAN_STAG_FILTER)
0176         return true;
0177     return false;
0178 }
0179 
0180 static struct vlan_vid_info *vlan_vid_info_get(struct vlan_info *vlan_info,
0181                            __be16 proto, u16 vid)
0182 {
0183     struct vlan_vid_info *vid_info;
0184 
0185     list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
0186         if (vid_info->proto == proto && vid_info->vid == vid)
0187             return vid_info;
0188     }
0189     return NULL;
0190 }
0191 
0192 static struct vlan_vid_info *vlan_vid_info_alloc(__be16 proto, u16 vid)
0193 {
0194     struct vlan_vid_info *vid_info;
0195 
0196     vid_info = kzalloc(sizeof(struct vlan_vid_info), GFP_KERNEL);
0197     if (!vid_info)
0198         return NULL;
0199     vid_info->proto = proto;
0200     vid_info->vid = vid;
0201 
0202     return vid_info;
0203 }
0204 
0205 static int vlan_add_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
0206 {
0207     if (!vlan_hw_filter_capable(dev, proto))
0208         return 0;
0209 
0210     if (netif_device_present(dev))
0211         return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid);
0212     else
0213         return -ENODEV;
0214 }
0215 
0216 static int vlan_kill_rx_filter_info(struct net_device *dev, __be16 proto, u16 vid)
0217 {
0218     if (!vlan_hw_filter_capable(dev, proto))
0219         return 0;
0220 
0221     if (netif_device_present(dev))
0222         return dev->netdev_ops->ndo_vlan_rx_kill_vid(dev, proto, vid);
0223     else
0224         return -ENODEV;
0225 }
0226 
0227 int vlan_for_each(struct net_device *dev,
0228           int (*action)(struct net_device *dev, int vid, void *arg),
0229           void *arg)
0230 {
0231     struct vlan_vid_info *vid_info;
0232     struct vlan_info *vlan_info;
0233     struct net_device *vdev;
0234     int ret;
0235 
0236     ASSERT_RTNL();
0237 
0238     vlan_info = rtnl_dereference(dev->vlan_info);
0239     if (!vlan_info)
0240         return 0;
0241 
0242     list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
0243         vdev = vlan_group_get_device(&vlan_info->grp, vid_info->proto,
0244                          vid_info->vid);
0245         ret = action(vdev, vid_info->vid, arg);
0246         if (ret)
0247             return ret;
0248     }
0249 
0250     return 0;
0251 }
0252 EXPORT_SYMBOL(vlan_for_each);
0253 
0254 int vlan_filter_push_vids(struct vlan_info *vlan_info, __be16 proto)
0255 {
0256     struct net_device *real_dev = vlan_info->real_dev;
0257     struct vlan_vid_info *vlan_vid_info;
0258     int err;
0259 
0260     list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list) {
0261         if (vlan_vid_info->proto == proto) {
0262             err = vlan_add_rx_filter_info(real_dev, proto,
0263                               vlan_vid_info->vid);
0264             if (err)
0265                 goto unwind;
0266         }
0267     }
0268 
0269     return 0;
0270 
0271 unwind:
0272     list_for_each_entry_continue_reverse(vlan_vid_info,
0273                          &vlan_info->vid_list, list) {
0274         if (vlan_vid_info->proto == proto)
0275             vlan_kill_rx_filter_info(real_dev, proto,
0276                          vlan_vid_info->vid);
0277     }
0278 
0279     return err;
0280 }
0281 EXPORT_SYMBOL(vlan_filter_push_vids);
0282 
0283 void vlan_filter_drop_vids(struct vlan_info *vlan_info, __be16 proto)
0284 {
0285     struct vlan_vid_info *vlan_vid_info;
0286 
0287     list_for_each_entry(vlan_vid_info, &vlan_info->vid_list, list)
0288         if (vlan_vid_info->proto == proto)
0289             vlan_kill_rx_filter_info(vlan_info->real_dev,
0290                          vlan_vid_info->proto,
0291                          vlan_vid_info->vid);
0292 }
0293 EXPORT_SYMBOL(vlan_filter_drop_vids);
0294 
0295 static int __vlan_vid_add(struct vlan_info *vlan_info, __be16 proto, u16 vid,
0296               struct vlan_vid_info **pvid_info)
0297 {
0298     struct net_device *dev = vlan_info->real_dev;
0299     struct vlan_vid_info *vid_info;
0300     int err;
0301 
0302     vid_info = vlan_vid_info_alloc(proto, vid);
0303     if (!vid_info)
0304         return -ENOMEM;
0305 
0306     err = vlan_add_rx_filter_info(dev, proto, vid);
0307     if (err) {
0308         kfree(vid_info);
0309         return err;
0310     }
0311 
0312     list_add(&vid_info->list, &vlan_info->vid_list);
0313     vlan_info->nr_vids++;
0314     *pvid_info = vid_info;
0315     return 0;
0316 }
0317 
0318 int vlan_vid_add(struct net_device *dev, __be16 proto, u16 vid)
0319 {
0320     struct vlan_info *vlan_info;
0321     struct vlan_vid_info *vid_info;
0322     bool vlan_info_created = false;
0323     int err;
0324 
0325     ASSERT_RTNL();
0326 
0327     vlan_info = rtnl_dereference(dev->vlan_info);
0328     if (!vlan_info) {
0329         vlan_info = vlan_info_alloc(dev);
0330         if (!vlan_info)
0331             return -ENOMEM;
0332         vlan_info_created = true;
0333     }
0334     vid_info = vlan_vid_info_get(vlan_info, proto, vid);
0335     if (!vid_info) {
0336         err = __vlan_vid_add(vlan_info, proto, vid, &vid_info);
0337         if (err)
0338             goto out_free_vlan_info;
0339     }
0340     vid_info->refcount++;
0341 
0342     if (vlan_info_created)
0343         rcu_assign_pointer(dev->vlan_info, vlan_info);
0344 
0345     return 0;
0346 
0347 out_free_vlan_info:
0348     if (vlan_info_created)
0349         kfree(vlan_info);
0350     return err;
0351 }
0352 EXPORT_SYMBOL(vlan_vid_add);
0353 
0354 static void __vlan_vid_del(struct vlan_info *vlan_info,
0355                struct vlan_vid_info *vid_info)
0356 {
0357     struct net_device *dev = vlan_info->real_dev;
0358     __be16 proto = vid_info->proto;
0359     u16 vid = vid_info->vid;
0360     int err;
0361 
0362     err = vlan_kill_rx_filter_info(dev, proto, vid);
0363     if (err && dev->reg_state != NETREG_UNREGISTERING)
0364         netdev_warn(dev, "failed to kill vid %04x/%d\n", proto, vid);
0365 
0366     list_del(&vid_info->list);
0367     kfree(vid_info);
0368     vlan_info->nr_vids--;
0369 }
0370 
0371 void vlan_vid_del(struct net_device *dev, __be16 proto, u16 vid)
0372 {
0373     struct vlan_info *vlan_info;
0374     struct vlan_vid_info *vid_info;
0375 
0376     ASSERT_RTNL();
0377 
0378     vlan_info = rtnl_dereference(dev->vlan_info);
0379     if (!vlan_info)
0380         return;
0381 
0382     vid_info = vlan_vid_info_get(vlan_info, proto, vid);
0383     if (!vid_info)
0384         return;
0385     vid_info->refcount--;
0386     if (vid_info->refcount == 0) {
0387         __vlan_vid_del(vlan_info, vid_info);
0388         if (vlan_info->nr_vids == 0) {
0389             RCU_INIT_POINTER(dev->vlan_info, NULL);
0390             call_rcu(&vlan_info->rcu, vlan_info_rcu_free);
0391         }
0392     }
0393 }
0394 EXPORT_SYMBOL(vlan_vid_del);
0395 
0396 int vlan_vids_add_by_dev(struct net_device *dev,
0397              const struct net_device *by_dev)
0398 {
0399     struct vlan_vid_info *vid_info;
0400     struct vlan_info *vlan_info;
0401     int err;
0402 
0403     ASSERT_RTNL();
0404 
0405     vlan_info = rtnl_dereference(by_dev->vlan_info);
0406     if (!vlan_info)
0407         return 0;
0408 
0409     list_for_each_entry(vid_info, &vlan_info->vid_list, list) {
0410         err = vlan_vid_add(dev, vid_info->proto, vid_info->vid);
0411         if (err)
0412             goto unwind;
0413     }
0414     return 0;
0415 
0416 unwind:
0417     list_for_each_entry_continue_reverse(vid_info,
0418                          &vlan_info->vid_list,
0419                          list) {
0420         vlan_vid_del(dev, vid_info->proto, vid_info->vid);
0421     }
0422 
0423     return err;
0424 }
0425 EXPORT_SYMBOL(vlan_vids_add_by_dev);
0426 
0427 void vlan_vids_del_by_dev(struct net_device *dev,
0428               const struct net_device *by_dev)
0429 {
0430     struct vlan_vid_info *vid_info;
0431     struct vlan_info *vlan_info;
0432 
0433     ASSERT_RTNL();
0434 
0435     vlan_info = rtnl_dereference(by_dev->vlan_info);
0436     if (!vlan_info)
0437         return;
0438 
0439     list_for_each_entry(vid_info, &vlan_info->vid_list, list)
0440         vlan_vid_del(dev, vid_info->proto, vid_info->vid);
0441 }
0442 EXPORT_SYMBOL(vlan_vids_del_by_dev);
0443 
0444 bool vlan_uses_dev(const struct net_device *dev)
0445 {
0446     struct vlan_info *vlan_info;
0447 
0448     ASSERT_RTNL();
0449 
0450     vlan_info = rtnl_dereference(dev->vlan_info);
0451     if (!vlan_info)
0452         return false;
0453     return vlan_info->grp.nr_vlan_devs ? true : false;
0454 }
0455 EXPORT_SYMBOL(vlan_uses_dev);
0456 
0457 static struct sk_buff *vlan_gro_receive(struct list_head *head,
0458                     struct sk_buff *skb)
0459 {
0460     const struct packet_offload *ptype;
0461     unsigned int hlen, off_vlan;
0462     struct sk_buff *pp = NULL;
0463     struct vlan_hdr *vhdr;
0464     struct sk_buff *p;
0465     __be16 type;
0466     int flush = 1;
0467 
0468     off_vlan = skb_gro_offset(skb);
0469     hlen = off_vlan + sizeof(*vhdr);
0470     vhdr = skb_gro_header_fast(skb, off_vlan);
0471     if (skb_gro_header_hard(skb, hlen)) {
0472         vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
0473         if (unlikely(!vhdr))
0474             goto out;
0475     }
0476 
0477     type = vhdr->h_vlan_encapsulated_proto;
0478 
0479     ptype = gro_find_receive_by_type(type);
0480     if (!ptype)
0481         goto out;
0482 
0483     flush = 0;
0484 
0485     list_for_each_entry(p, head, list) {
0486         struct vlan_hdr *vhdr2;
0487 
0488         if (!NAPI_GRO_CB(p)->same_flow)
0489             continue;
0490 
0491         vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
0492         if (compare_vlan_header(vhdr, vhdr2))
0493             NAPI_GRO_CB(p)->same_flow = 0;
0494     }
0495 
0496     skb_gro_pull(skb, sizeof(*vhdr));
0497     skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
0498 
0499     pp = indirect_call_gro_receive_inet(ptype->callbacks.gro_receive,
0500                         ipv6_gro_receive, inet_gro_receive,
0501                         head, skb);
0502 
0503 out:
0504     skb_gro_flush_final(skb, pp, flush);
0505 
0506     return pp;
0507 }
0508 
0509 static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
0510 {
0511     struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
0512     __be16 type = vhdr->h_vlan_encapsulated_proto;
0513     struct packet_offload *ptype;
0514     int err = -ENOENT;
0515 
0516     ptype = gro_find_complete_by_type(type);
0517     if (ptype)
0518         err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
0519                      ipv6_gro_complete, inet_gro_complete,
0520                      skb, nhoff + sizeof(*vhdr));
0521 
0522     return err;
0523 }
0524 
0525 static struct packet_offload vlan_packet_offloads[] __read_mostly = {
0526     {
0527         .type = cpu_to_be16(ETH_P_8021Q),
0528         .priority = 10,
0529         .callbacks = {
0530             .gro_receive = vlan_gro_receive,
0531             .gro_complete = vlan_gro_complete,
0532         },
0533     },
0534     {
0535         .type = cpu_to_be16(ETH_P_8021AD),
0536         .priority = 10,
0537         .callbacks = {
0538             .gro_receive = vlan_gro_receive,
0539             .gro_complete = vlan_gro_complete,
0540         },
0541     },
0542 };
0543 
0544 static int __init vlan_offload_init(void)
0545 {
0546     unsigned int i;
0547 
0548     for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
0549         dev_add_offload(&vlan_packet_offloads[i]);
0550 
0551     return 0;
0552 }
0553 
0554 fs_initcall(vlan_offload_init);