Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0-only
0002 #include <linux/kernel.h>
0003 #include <linux/netdevice.h>
0004 #include <linux/rtnetlink.h>
0005 #include <linux/slab.h>
0006 #include <net/switchdev.h>
0007 
0008 #include "br_private.h"
0009 #include "br_private_tunnel.h"
0010 
0011 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
0012 
0013 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
0014                   const void *ptr)
0015 {
0016     const struct net_bridge_vlan *vle = ptr;
0017     u16 vid = *(u16 *)arg->key;
0018 
0019     return vle->vid != vid;
0020 }
0021 
0022 static const struct rhashtable_params br_vlan_rht_params = {
0023     .head_offset = offsetof(struct net_bridge_vlan, vnode),
0024     .key_offset = offsetof(struct net_bridge_vlan, vid),
0025     .key_len = sizeof(u16),
0026     .nelem_hint = 3,
0027     .max_size = VLAN_N_VID,
0028     .obj_cmpfn = br_vlan_cmp,
0029     .automatic_shrinking = true,
0030 };
0031 
0032 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
0033 {
0034     return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
0035 }
0036 
0037 static void __vlan_add_pvid(struct net_bridge_vlan_group *vg,
0038                 const struct net_bridge_vlan *v)
0039 {
0040     if (vg->pvid == v->vid)
0041         return;
0042 
0043     smp_wmb();
0044     br_vlan_set_pvid_state(vg, v->state);
0045     vg->pvid = v->vid;
0046 }
0047 
0048 static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
0049 {
0050     if (vg->pvid != vid)
0051         return;
0052 
0053     smp_wmb();
0054     vg->pvid = 0;
0055 }
0056 
0057 /* Update the BRIDGE_VLAN_INFO_PVID and BRIDGE_VLAN_INFO_UNTAGGED flags of @v.
0058  * If @commit is false, return just whether the BRIDGE_VLAN_INFO_PVID and
0059  * BRIDGE_VLAN_INFO_UNTAGGED bits of @flags would produce any change onto @v.
0060  */
0061 static bool __vlan_flags_update(struct net_bridge_vlan *v, u16 flags,
0062                 bool commit)
0063 {
0064     struct net_bridge_vlan_group *vg;
0065     bool change;
0066 
0067     if (br_vlan_is_master(v))
0068         vg = br_vlan_group(v->br);
0069     else
0070         vg = nbp_vlan_group(v->port);
0071 
0072     /* check if anything would be changed on commit */
0073     change = !!(flags & BRIDGE_VLAN_INFO_PVID) == !!(vg->pvid != v->vid) ||
0074          ((flags ^ v->flags) & BRIDGE_VLAN_INFO_UNTAGGED);
0075 
0076     if (!commit)
0077         goto out;
0078 
0079     if (flags & BRIDGE_VLAN_INFO_PVID)
0080         __vlan_add_pvid(vg, v);
0081     else
0082         __vlan_delete_pvid(vg, v->vid);
0083 
0084     if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
0085         v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
0086     else
0087         v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
0088 
0089 out:
0090     return change;
0091 }
0092 
0093 static bool __vlan_flags_would_change(struct net_bridge_vlan *v, u16 flags)
0094 {
0095     return __vlan_flags_update(v, flags, false);
0096 }
0097 
0098 static void __vlan_flags_commit(struct net_bridge_vlan *v, u16 flags)
0099 {
0100     __vlan_flags_update(v, flags, true);
0101 }
0102 
0103 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
0104               struct net_bridge_vlan *v, u16 flags,
0105               struct netlink_ext_ack *extack)
0106 {
0107     int err;
0108 
0109     /* Try switchdev op first. In case it is not supported, fallback to
0110      * 8021q add.
0111      */
0112     err = br_switchdev_port_vlan_add(dev, v->vid, flags, false, extack);
0113     if (err == -EOPNOTSUPP)
0114         return vlan_vid_add(dev, br->vlan_proto, v->vid);
0115     v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
0116     return err;
0117 }
0118 
0119 static void __vlan_add_list(struct net_bridge_vlan *v)
0120 {
0121     struct net_bridge_vlan_group *vg;
0122     struct list_head *headp, *hpos;
0123     struct net_bridge_vlan *vent;
0124 
0125     if (br_vlan_is_master(v))
0126         vg = br_vlan_group(v->br);
0127     else
0128         vg = nbp_vlan_group(v->port);
0129 
0130     headp = &vg->vlan_list;
0131     list_for_each_prev(hpos, headp) {
0132         vent = list_entry(hpos, struct net_bridge_vlan, vlist);
0133         if (v->vid >= vent->vid)
0134             break;
0135     }
0136     list_add_rcu(&v->vlist, hpos);
0137 }
0138 
0139 static void __vlan_del_list(struct net_bridge_vlan *v)
0140 {
0141     list_del_rcu(&v->vlist);
0142 }
0143 
0144 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
0145               const struct net_bridge_vlan *v)
0146 {
0147     int err;
0148 
0149     /* Try switchdev op first. In case it is not supported, fallback to
0150      * 8021q del.
0151      */
0152     err = br_switchdev_port_vlan_del(dev, v->vid);
0153     if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
0154         vlan_vid_del(dev, br->vlan_proto, v->vid);
0155     return err == -EOPNOTSUPP ? 0 : err;
0156 }
0157 
0158 /* Returns a master vlan, if it didn't exist it gets created. In all cases
0159  * a reference is taken to the master vlan before returning.
0160  */
0161 static struct net_bridge_vlan *
0162 br_vlan_get_master(struct net_bridge *br, u16 vid,
0163            struct netlink_ext_ack *extack)
0164 {
0165     struct net_bridge_vlan_group *vg;
0166     struct net_bridge_vlan *masterv;
0167 
0168     vg = br_vlan_group(br);
0169     masterv = br_vlan_find(vg, vid);
0170     if (!masterv) {
0171         bool changed;
0172 
0173         /* missing global ctx, create it now */
0174         if (br_vlan_add(br, vid, 0, &changed, extack))
0175             return NULL;
0176         masterv = br_vlan_find(vg, vid);
0177         if (WARN_ON(!masterv))
0178             return NULL;
0179         refcount_set(&masterv->refcnt, 1);
0180         return masterv;
0181     }
0182     refcount_inc(&masterv->refcnt);
0183 
0184     return masterv;
0185 }
0186 
0187 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
0188 {
0189     struct net_bridge_vlan *v;
0190 
0191     v = container_of(rcu, struct net_bridge_vlan, rcu);
0192     WARN_ON(!br_vlan_is_master(v));
0193     free_percpu(v->stats);
0194     v->stats = NULL;
0195     kfree(v);
0196 }
0197 
0198 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
0199 {
0200     struct net_bridge_vlan_group *vg;
0201 
0202     if (!br_vlan_is_master(masterv))
0203         return;
0204 
0205     vg = br_vlan_group(masterv->br);
0206     if (refcount_dec_and_test(&masterv->refcnt)) {
0207         rhashtable_remove_fast(&vg->vlan_hash,
0208                        &masterv->vnode, br_vlan_rht_params);
0209         __vlan_del_list(masterv);
0210         br_multicast_toggle_one_vlan(masterv, false);
0211         br_multicast_ctx_deinit(&masterv->br_mcast_ctx);
0212         call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
0213     }
0214 }
0215 
0216 static void nbp_vlan_rcu_free(struct rcu_head *rcu)
0217 {
0218     struct net_bridge_vlan *v;
0219 
0220     v = container_of(rcu, struct net_bridge_vlan, rcu);
0221     WARN_ON(br_vlan_is_master(v));
0222     /* if we had per-port stats configured then free them here */
0223     if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
0224         free_percpu(v->stats);
0225     v->stats = NULL;
0226     kfree(v);
0227 }
0228 
0229 static void br_vlan_init_state(struct net_bridge_vlan *v)
0230 {
0231     struct net_bridge *br;
0232 
0233     if (br_vlan_is_master(v))
0234         br = v->br;
0235     else
0236         br = v->port->br;
0237 
0238     if (br_opt_get(br, BROPT_MST_ENABLED)) {
0239         br_mst_vlan_init_state(v);
0240         return;
0241     }
0242 
0243     v->state = BR_STATE_FORWARDING;
0244     v->msti = 0;
0245 }
0246 
0247 /* This is the shared VLAN add function which works for both ports and bridge
0248  * devices. There are four possible calls to this function in terms of the
0249  * vlan entry type:
0250  * 1. vlan is being added on a port (no master flags, global entry exists)
0251  * 2. vlan is being added on a bridge (both master and brentry flags)
0252  * 3. vlan is being added on a port, but a global entry didn't exist which
0253  *    is being created right now (master flag set, brentry flag unset), the
0254  *    global entry is used for global per-vlan features, but not for filtering
0255  * 4. same as 3 but with both master and brentry flags set so the entry
0256  *    will be used for filtering in both the port and the bridge
0257  */
0258 static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
0259               struct netlink_ext_ack *extack)
0260 {
0261     struct net_bridge_vlan *masterv = NULL;
0262     struct net_bridge_port *p = NULL;
0263     struct net_bridge_vlan_group *vg;
0264     struct net_device *dev;
0265     struct net_bridge *br;
0266     int err;
0267 
0268     if (br_vlan_is_master(v)) {
0269         br = v->br;
0270         dev = br->dev;
0271         vg = br_vlan_group(br);
0272     } else {
0273         p = v->port;
0274         br = p->br;
0275         dev = p->dev;
0276         vg = nbp_vlan_group(p);
0277     }
0278 
0279     if (p) {
0280         /* Add VLAN to the device filter if it is supported.
0281          * This ensures tagged traffic enters the bridge when
0282          * promiscuous mode is disabled by br_manage_promisc().
0283          */
0284         err = __vlan_vid_add(dev, br, v, flags, extack);
0285         if (err)
0286             goto out;
0287 
0288         /* need to work on the master vlan too */
0289         if (flags & BRIDGE_VLAN_INFO_MASTER) {
0290             bool changed;
0291 
0292             err = br_vlan_add(br, v->vid,
0293                       flags | BRIDGE_VLAN_INFO_BRENTRY,
0294                       &changed, extack);
0295             if (err)
0296                 goto out_filt;
0297 
0298             if (changed)
0299                 br_vlan_notify(br, NULL, v->vid, 0,
0300                            RTM_NEWVLAN);
0301         }
0302 
0303         masterv = br_vlan_get_master(br, v->vid, extack);
0304         if (!masterv) {
0305             err = -ENOMEM;
0306             goto out_filt;
0307         }
0308         v->brvlan = masterv;
0309         if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
0310             v->stats =
0311                  netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
0312             if (!v->stats) {
0313                 err = -ENOMEM;
0314                 goto out_filt;
0315             }
0316             v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
0317         } else {
0318             v->stats = masterv->stats;
0319         }
0320         br_multicast_port_ctx_init(p, v, &v->port_mcast_ctx);
0321     } else {
0322         if (br_vlan_should_use(v)) {
0323             err = br_switchdev_port_vlan_add(dev, v->vid, flags,
0324                              false, extack);
0325             if (err && err != -EOPNOTSUPP)
0326                 goto out;
0327         }
0328         br_multicast_ctx_init(br, v, &v->br_mcast_ctx);
0329         v->priv_flags |= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
0330     }
0331 
0332     /* Add the dev mac and count the vlan only if it's usable */
0333     if (br_vlan_should_use(v)) {
0334         err = br_fdb_add_local(br, p, dev->dev_addr, v->vid);
0335         if (err) {
0336             br_err(br, "failed insert local address into bridge forwarding table\n");
0337             goto out_filt;
0338         }
0339         vg->num_vlans++;
0340     }
0341 
0342     /* set the state before publishing */
0343     br_vlan_init_state(v);
0344 
0345     err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
0346                         br_vlan_rht_params);
0347     if (err)
0348         goto out_fdb_insert;
0349 
0350     __vlan_add_list(v);
0351     __vlan_flags_commit(v, flags);
0352     br_multicast_toggle_one_vlan(v, true);
0353 
0354     if (p)
0355         nbp_vlan_set_vlan_dev_state(p, v->vid);
0356 out:
0357     return err;
0358 
0359 out_fdb_insert:
0360     if (br_vlan_should_use(v)) {
0361         br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
0362         vg->num_vlans--;
0363     }
0364 
0365 out_filt:
0366     if (p) {
0367         __vlan_vid_del(dev, br, v);
0368         if (masterv) {
0369             if (v->stats && masterv->stats != v->stats)
0370                 free_percpu(v->stats);
0371             v->stats = NULL;
0372 
0373             br_vlan_put_master(masterv);
0374             v->brvlan = NULL;
0375         }
0376     } else {
0377         br_switchdev_port_vlan_del(dev, v->vid);
0378     }
0379 
0380     goto out;
0381 }
0382 
0383 static int __vlan_del(struct net_bridge_vlan *v)
0384 {
0385     struct net_bridge_vlan *masterv = v;
0386     struct net_bridge_vlan_group *vg;
0387     struct net_bridge_port *p = NULL;
0388     int err = 0;
0389 
0390     if (br_vlan_is_master(v)) {
0391         vg = br_vlan_group(v->br);
0392     } else {
0393         p = v->port;
0394         vg = nbp_vlan_group(v->port);
0395         masterv = v->brvlan;
0396     }
0397 
0398     __vlan_delete_pvid(vg, v->vid);
0399     if (p) {
0400         err = __vlan_vid_del(p->dev, p->br, v);
0401         if (err)
0402             goto out;
0403     } else {
0404         err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
0405         if (err && err != -EOPNOTSUPP)
0406             goto out;
0407         err = 0;
0408     }
0409 
0410     if (br_vlan_should_use(v)) {
0411         v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
0412         vg->num_vlans--;
0413     }
0414 
0415     if (masterv != v) {
0416         vlan_tunnel_info_del(vg, v);
0417         rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
0418                        br_vlan_rht_params);
0419         __vlan_del_list(v);
0420         nbp_vlan_set_vlan_dev_state(p, v->vid);
0421         br_multicast_toggle_one_vlan(v, false);
0422         br_multicast_port_ctx_deinit(&v->port_mcast_ctx);
0423         call_rcu(&v->rcu, nbp_vlan_rcu_free);
0424     }
0425 
0426     br_vlan_put_master(masterv);
0427 out:
0428     return err;
0429 }
0430 
0431 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
0432 {
0433     WARN_ON(!list_empty(&vg->vlan_list));
0434     rhashtable_destroy(&vg->vlan_hash);
0435     vlan_tunnel_deinit(vg);
0436     kfree(vg);
0437 }
0438 
0439 static void __vlan_flush(const struct net_bridge *br,
0440              const struct net_bridge_port *p,
0441              struct net_bridge_vlan_group *vg)
0442 {
0443     struct net_bridge_vlan *vlan, *tmp;
0444     u16 v_start = 0, v_end = 0;
0445     int err;
0446 
0447     __vlan_delete_pvid(vg, vg->pvid);
0448     list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
0449         /* take care of disjoint ranges */
0450         if (!v_start) {
0451             v_start = vlan->vid;
0452         } else if (vlan->vid - v_end != 1) {
0453             /* found range end, notify and start next one */
0454             br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
0455             v_start = vlan->vid;
0456         }
0457         v_end = vlan->vid;
0458 
0459         err = __vlan_del(vlan);
0460         if (err) {
0461             br_err(br,
0462                    "port %u(%s) failed to delete vlan %d: %pe\n",
0463                    (unsigned int) p->port_no, p->dev->name,
0464                    vlan->vid, ERR_PTR(err));
0465         }
0466     }
0467 
0468     /* notify about the last/whole vlan range */
0469     if (v_start)
0470         br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
0471 }
0472 
0473 struct sk_buff *br_handle_vlan(struct net_bridge *br,
0474                    const struct net_bridge_port *p,
0475                    struct net_bridge_vlan_group *vg,
0476                    struct sk_buff *skb)
0477 {
0478     struct pcpu_sw_netstats *stats;
0479     struct net_bridge_vlan *v;
0480     u16 vid;
0481 
0482     /* If this packet was not filtered at input, let it pass */
0483     if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
0484         goto out;
0485 
0486     /* At this point, we know that the frame was filtered and contains
0487      * a valid vlan id.  If the vlan id has untagged flag set,
0488      * send untagged; otherwise, send tagged.
0489      */
0490     br_vlan_get_tag(skb, &vid);
0491     v = br_vlan_find(vg, vid);
0492     /* Vlan entry must be configured at this point.  The
0493      * only exception is the bridge is set in promisc mode and the
0494      * packet is destined for the bridge device.  In this case
0495      * pass the packet as is.
0496      */
0497     if (!v || !br_vlan_should_use(v)) {
0498         if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
0499             goto out;
0500         } else {
0501             kfree_skb(skb);
0502             return NULL;
0503         }
0504     }
0505     if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
0506         stats = this_cpu_ptr(v->stats);
0507         u64_stats_update_begin(&stats->syncp);
0508         u64_stats_add(&stats->tx_bytes, skb->len);
0509         u64_stats_inc(&stats->tx_packets);
0510         u64_stats_update_end(&stats->syncp);
0511     }
0512 
0513     /* If the skb will be sent using forwarding offload, the assumption is
0514      * that the switchdev will inject the packet into hardware together
0515      * with the bridge VLAN, so that it can be forwarded according to that
0516      * VLAN. The switchdev should deal with popping the VLAN header in
0517      * hardware on each egress port as appropriate. So only strip the VLAN
0518      * header if forwarding offload is not being used.
0519      */
0520     if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED &&
0521         !br_switchdev_frame_uses_tx_fwd_offload(skb))
0522         __vlan_hwaccel_clear_tag(skb);
0523 
0524     if (p && (p->flags & BR_VLAN_TUNNEL) &&
0525         br_handle_egress_vlan_tunnel(skb, v)) {
0526         kfree_skb(skb);
0527         return NULL;
0528     }
0529 out:
0530     return skb;
0531 }
0532 
0533 /* Called under RCU */
0534 static bool __allowed_ingress(const struct net_bridge *br,
0535                   struct net_bridge_vlan_group *vg,
0536                   struct sk_buff *skb, u16 *vid,
0537                   u8 *state,
0538                   struct net_bridge_vlan **vlan)
0539 {
0540     struct pcpu_sw_netstats *stats;
0541     struct net_bridge_vlan *v;
0542     bool tagged;
0543 
0544     BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
0545     /* If vlan tx offload is disabled on bridge device and frame was
0546      * sent from vlan device on the bridge device, it does not have
0547      * HW accelerated vlan tag.
0548      */
0549     if (unlikely(!skb_vlan_tag_present(skb) &&
0550              skb->protocol == br->vlan_proto)) {
0551         skb = skb_vlan_untag(skb);
0552         if (unlikely(!skb))
0553             return false;
0554     }
0555 
0556     if (!br_vlan_get_tag(skb, vid)) {
0557         /* Tagged frame */
0558         if (skb->vlan_proto != br->vlan_proto) {
0559             /* Protocol-mismatch, empty out vlan_tci for new tag */
0560             skb_push(skb, ETH_HLEN);
0561             skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
0562                             skb_vlan_tag_get(skb));
0563             if (unlikely(!skb))
0564                 return false;
0565 
0566             skb_pull(skb, ETH_HLEN);
0567             skb_reset_mac_len(skb);
0568             *vid = 0;
0569             tagged = false;
0570         } else {
0571             tagged = true;
0572         }
0573     } else {
0574         /* Untagged frame */
0575         tagged = false;
0576     }
0577 
0578     if (!*vid) {
0579         u16 pvid = br_get_pvid(vg);
0580 
0581         /* Frame had a tag with VID 0 or did not have a tag.
0582          * See if pvid is set on this port.  That tells us which
0583          * vlan untagged or priority-tagged traffic belongs to.
0584          */
0585         if (!pvid)
0586             goto drop;
0587 
0588         /* PVID is set on this port.  Any untagged or priority-tagged
0589          * ingress frame is considered to belong to this vlan.
0590          */
0591         *vid = pvid;
0592         if (likely(!tagged))
0593             /* Untagged Frame. */
0594             __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
0595         else
0596             /* Priority-tagged Frame.
0597              * At this point, we know that skb->vlan_tci VID
0598              * field was 0.
0599              * We update only VID field and preserve PCP field.
0600              */
0601             skb->vlan_tci |= pvid;
0602 
0603         /* if snooping and stats are disabled we can avoid the lookup */
0604         if (!br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) &&
0605             !br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
0606             if (*state == BR_STATE_FORWARDING) {
0607                 *state = br_vlan_get_pvid_state(vg);
0608                 if (!br_vlan_state_allowed(*state, true))
0609                     goto drop;
0610             }
0611             return true;
0612         }
0613     }
0614     v = br_vlan_find(vg, *vid);
0615     if (!v || !br_vlan_should_use(v))
0616         goto drop;
0617 
0618     if (*state == BR_STATE_FORWARDING) {
0619         *state = br_vlan_get_state(v);
0620         if (!br_vlan_state_allowed(*state, true))
0621             goto drop;
0622     }
0623 
0624     if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
0625         stats = this_cpu_ptr(v->stats);
0626         u64_stats_update_begin(&stats->syncp);
0627         u64_stats_add(&stats->rx_bytes, skb->len);
0628         u64_stats_inc(&stats->rx_packets);
0629         u64_stats_update_end(&stats->syncp);
0630     }
0631 
0632     *vlan = v;
0633 
0634     return true;
0635 
0636 drop:
0637     kfree_skb(skb);
0638     return false;
0639 }
0640 
0641 bool br_allowed_ingress(const struct net_bridge *br,
0642             struct net_bridge_vlan_group *vg, struct sk_buff *skb,
0643             u16 *vid, u8 *state,
0644             struct net_bridge_vlan **vlan)
0645 {
0646     /* If VLAN filtering is disabled on the bridge, all packets are
0647      * permitted.
0648      */
0649     *vlan = NULL;
0650     if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
0651         BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
0652         return true;
0653     }
0654 
0655     return __allowed_ingress(br, vg, skb, vid, state, vlan);
0656 }
0657 
0658 /* Called under RCU. */
0659 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
0660                const struct sk_buff *skb)
0661 {
0662     const struct net_bridge_vlan *v;
0663     u16 vid;
0664 
0665     /* If this packet was not filtered at input, let it pass */
0666     if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
0667         return true;
0668 
0669     br_vlan_get_tag(skb, &vid);
0670     v = br_vlan_find(vg, vid);
0671     if (v && br_vlan_should_use(v) &&
0672         br_vlan_state_allowed(br_vlan_get_state(v), false))
0673         return true;
0674 
0675     return false;
0676 }
0677 
0678 /* Called under RCU */
0679 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
0680 {
0681     struct net_bridge_vlan_group *vg;
0682     struct net_bridge *br = p->br;
0683     struct net_bridge_vlan *v;
0684 
0685     /* If filtering was disabled at input, let it pass. */
0686     if (!br_opt_get(br, BROPT_VLAN_ENABLED))
0687         return true;
0688 
0689     vg = nbp_vlan_group_rcu(p);
0690     if (!vg || !vg->num_vlans)
0691         return false;
0692 
0693     if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
0694         *vid = 0;
0695 
0696     if (!*vid) {
0697         *vid = br_get_pvid(vg);
0698         if (!*vid ||
0699             !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
0700             return false;
0701 
0702         return true;
0703     }
0704 
0705     v = br_vlan_find(vg, *vid);
0706     if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
0707         return true;
0708 
0709     return false;
0710 }
0711 
0712 static int br_vlan_add_existing(struct net_bridge *br,
0713                 struct net_bridge_vlan_group *vg,
0714                 struct net_bridge_vlan *vlan,
0715                 u16 flags, bool *changed,
0716                 struct netlink_ext_ack *extack)
0717 {
0718     bool would_change = __vlan_flags_would_change(vlan, flags);
0719     bool becomes_brentry = false;
0720     int err;
0721 
0722     if (!br_vlan_is_brentry(vlan)) {
0723         /* Trying to change flags of non-existent bridge vlan */
0724         if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
0725             return -EINVAL;
0726 
0727         becomes_brentry = true;
0728     }
0729 
0730     /* Master VLANs that aren't brentries weren't notified before,
0731      * time to notify them now.
0732      */
0733     if (becomes_brentry || would_change) {
0734         err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags,
0735                          would_change, extack);
0736         if (err && err != -EOPNOTSUPP)
0737             return err;
0738     }
0739 
0740     if (becomes_brentry) {
0741         /* It was only kept for port vlans, now make it real */
0742         err = br_fdb_add_local(br, NULL, br->dev->dev_addr, vlan->vid);
0743         if (err) {
0744             br_err(br, "failed to insert local address into bridge forwarding table\n");
0745             goto err_fdb_insert;
0746         }
0747 
0748         refcount_inc(&vlan->refcnt);
0749         vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
0750         vg->num_vlans++;
0751         *changed = true;
0752         br_multicast_toggle_one_vlan(vlan, true);
0753     }
0754 
0755     __vlan_flags_commit(vlan, flags);
0756     if (would_change)
0757         *changed = true;
0758 
0759     return 0;
0760 
0761 err_fdb_insert:
0762     br_switchdev_port_vlan_del(br->dev, vlan->vid);
0763     return err;
0764 }
0765 
0766 /* Must be protected by RTNL.
0767  * Must be called with vid in range from 1 to 4094 inclusive.
0768  * changed must be true only if the vlan was created or updated
0769  */
0770 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
0771         struct netlink_ext_ack *extack)
0772 {
0773     struct net_bridge_vlan_group *vg;
0774     struct net_bridge_vlan *vlan;
0775     int ret;
0776 
0777     ASSERT_RTNL();
0778 
0779     *changed = false;
0780     vg = br_vlan_group(br);
0781     vlan = br_vlan_find(vg, vid);
0782     if (vlan)
0783         return br_vlan_add_existing(br, vg, vlan, flags, changed,
0784                         extack);
0785 
0786     vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
0787     if (!vlan)
0788         return -ENOMEM;
0789 
0790     vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
0791     if (!vlan->stats) {
0792         kfree(vlan);
0793         return -ENOMEM;
0794     }
0795     vlan->vid = vid;
0796     vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
0797     vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
0798     vlan->br = br;
0799     if (flags & BRIDGE_VLAN_INFO_BRENTRY)
0800         refcount_set(&vlan->refcnt, 1);
0801     ret = __vlan_add(vlan, flags, extack);
0802     if (ret) {
0803         free_percpu(vlan->stats);
0804         kfree(vlan);
0805     } else {
0806         *changed = true;
0807     }
0808 
0809     return ret;
0810 }
0811 
0812 /* Must be protected by RTNL.
0813  * Must be called with vid in range from 1 to 4094 inclusive.
0814  */
0815 int br_vlan_delete(struct net_bridge *br, u16 vid)
0816 {
0817     struct net_bridge_vlan_group *vg;
0818     struct net_bridge_vlan *v;
0819 
0820     ASSERT_RTNL();
0821 
0822     vg = br_vlan_group(br);
0823     v = br_vlan_find(vg, vid);
0824     if (!v || !br_vlan_is_brentry(v))
0825         return -ENOENT;
0826 
0827     br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
0828     br_fdb_delete_by_port(br, NULL, vid, 0);
0829 
0830     vlan_tunnel_info_del(vg, v);
0831 
0832     return __vlan_del(v);
0833 }
0834 
0835 void br_vlan_flush(struct net_bridge *br)
0836 {
0837     struct net_bridge_vlan_group *vg;
0838 
0839     ASSERT_RTNL();
0840 
0841     vg = br_vlan_group(br);
0842     __vlan_flush(br, NULL, vg);
0843     RCU_INIT_POINTER(br->vlgrp, NULL);
0844     synchronize_rcu();
0845     __vlan_group_free(vg);
0846 }
0847 
0848 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
0849 {
0850     if (!vg)
0851         return NULL;
0852 
0853     return br_vlan_lookup(&vg->vlan_hash, vid);
0854 }
0855 
0856 /* Must be protected by RTNL. */
0857 static void recalculate_group_addr(struct net_bridge *br)
0858 {
0859     if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
0860         return;
0861 
0862     spin_lock_bh(&br->lock);
0863     if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
0864         br->vlan_proto == htons(ETH_P_8021Q)) {
0865         /* Bridge Group Address */
0866         br->group_addr[5] = 0x00;
0867     } else { /* vlan_enabled && ETH_P_8021AD */
0868         /* Provider Bridge Group Address */
0869         br->group_addr[5] = 0x08;
0870     }
0871     spin_unlock_bh(&br->lock);
0872 }
0873 
0874 /* Must be protected by RTNL. */
0875 void br_recalculate_fwd_mask(struct net_bridge *br)
0876 {
0877     if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
0878         br->vlan_proto == htons(ETH_P_8021Q))
0879         br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
0880     else /* vlan_enabled && ETH_P_8021AD */
0881         br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
0882                           ~(1u << br->group_addr[5]);
0883 }
0884 
0885 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val,
0886               struct netlink_ext_ack *extack)
0887 {
0888     struct switchdev_attr attr = {
0889         .orig_dev = br->dev,
0890         .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
0891         .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
0892         .u.vlan_filtering = val,
0893     };
0894     int err;
0895 
0896     if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
0897         return 0;
0898 
0899     br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
0900 
0901     err = switchdev_port_attr_set(br->dev, &attr, extack);
0902     if (err && err != -EOPNOTSUPP) {
0903         br_opt_toggle(br, BROPT_VLAN_ENABLED, !val);
0904         return err;
0905     }
0906 
0907     br_manage_promisc(br);
0908     recalculate_group_addr(br);
0909     br_recalculate_fwd_mask(br);
0910     if (!val && br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
0911         br_info(br, "vlan filtering disabled, automatically disabling multicast vlan snooping\n");
0912         br_multicast_toggle_vlan_snooping(br, false, NULL);
0913     }
0914 
0915     return 0;
0916 }
0917 
0918 bool br_vlan_enabled(const struct net_device *dev)
0919 {
0920     struct net_bridge *br = netdev_priv(dev);
0921 
0922     return br_opt_get(br, BROPT_VLAN_ENABLED);
0923 }
0924 EXPORT_SYMBOL_GPL(br_vlan_enabled);
0925 
0926 int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
0927 {
0928     struct net_bridge *br = netdev_priv(dev);
0929 
0930     *p_proto = ntohs(br->vlan_proto);
0931 
0932     return 0;
0933 }
0934 EXPORT_SYMBOL_GPL(br_vlan_get_proto);
0935 
0936 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
0937             struct netlink_ext_ack *extack)
0938 {
0939     struct switchdev_attr attr = {
0940         .orig_dev = br->dev,
0941         .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
0942         .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
0943         .u.vlan_protocol = ntohs(proto),
0944     };
0945     int err = 0;
0946     struct net_bridge_port *p;
0947     struct net_bridge_vlan *vlan;
0948     struct net_bridge_vlan_group *vg;
0949     __be16 oldproto = br->vlan_proto;
0950 
0951     if (br->vlan_proto == proto)
0952         return 0;
0953 
0954     err = switchdev_port_attr_set(br->dev, &attr, extack);
0955     if (err && err != -EOPNOTSUPP)
0956         return err;
0957 
0958     /* Add VLANs for the new proto to the device filter. */
0959     list_for_each_entry(p, &br->port_list, list) {
0960         vg = nbp_vlan_group(p);
0961         list_for_each_entry(vlan, &vg->vlan_list, vlist) {
0962             err = vlan_vid_add(p->dev, proto, vlan->vid);
0963             if (err)
0964                 goto err_filt;
0965         }
0966     }
0967 
0968     br->vlan_proto = proto;
0969 
0970     recalculate_group_addr(br);
0971     br_recalculate_fwd_mask(br);
0972 
0973     /* Delete VLANs for the old proto from the device filter. */
0974     list_for_each_entry(p, &br->port_list, list) {
0975         vg = nbp_vlan_group(p);
0976         list_for_each_entry(vlan, &vg->vlan_list, vlist)
0977             vlan_vid_del(p->dev, oldproto, vlan->vid);
0978     }
0979 
0980     return 0;
0981 
0982 err_filt:
0983     attr.u.vlan_protocol = ntohs(oldproto);
0984     switchdev_port_attr_set(br->dev, &attr, NULL);
0985 
0986     list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
0987         vlan_vid_del(p->dev, proto, vlan->vid);
0988 
0989     list_for_each_entry_continue_reverse(p, &br->port_list, list) {
0990         vg = nbp_vlan_group(p);
0991         list_for_each_entry(vlan, &vg->vlan_list, vlist)
0992             vlan_vid_del(p->dev, proto, vlan->vid);
0993     }
0994 
0995     return err;
0996 }
0997 
0998 int br_vlan_set_proto(struct net_bridge *br, unsigned long val,
0999               struct netlink_ext_ack *extack)
1000 {
1001     if (!eth_type_vlan(htons(val)))
1002         return -EPROTONOSUPPORT;
1003 
1004     return __br_vlan_set_proto(br, htons(val), extack);
1005 }
1006 
1007 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
1008 {
1009     switch (val) {
1010     case 0:
1011     case 1:
1012         br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
1013         break;
1014     default:
1015         return -EINVAL;
1016     }
1017 
1018     return 0;
1019 }
1020 
1021 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
1022 {
1023     struct net_bridge_port *p;
1024 
1025     /* allow to change the option if there are no port vlans configured */
1026     list_for_each_entry(p, &br->port_list, list) {
1027         struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1028 
1029         if (vg->num_vlans)
1030             return -EBUSY;
1031     }
1032 
1033     switch (val) {
1034     case 0:
1035     case 1:
1036         br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
1037         break;
1038     default:
1039         return -EINVAL;
1040     }
1041 
1042     return 0;
1043 }
1044 
1045 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
1046 {
1047     struct net_bridge_vlan *v;
1048 
1049     if (vid != vg->pvid)
1050         return false;
1051 
1052     v = br_vlan_lookup(&vg->vlan_hash, vid);
1053     if (v && br_vlan_should_use(v) &&
1054         (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1055         return true;
1056 
1057     return false;
1058 }
1059 
1060 static void br_vlan_disable_default_pvid(struct net_bridge *br)
1061 {
1062     struct net_bridge_port *p;
1063     u16 pvid = br->default_pvid;
1064 
1065     /* Disable default_pvid on all ports where it is still
1066      * configured.
1067      */
1068     if (vlan_default_pvid(br_vlan_group(br), pvid)) {
1069         if (!br_vlan_delete(br, pvid))
1070             br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1071     }
1072 
1073     list_for_each_entry(p, &br->port_list, list) {
1074         if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
1075             !nbp_vlan_delete(p, pvid))
1076             br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1077     }
1078 
1079     br->default_pvid = 0;
1080 }
1081 
1082 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
1083                    struct netlink_ext_ack *extack)
1084 {
1085     const struct net_bridge_vlan *pvent;
1086     struct net_bridge_vlan_group *vg;
1087     struct net_bridge_port *p;
1088     unsigned long *changed;
1089     bool vlchange;
1090     u16 old_pvid;
1091     int err = 0;
1092 
1093     if (!pvid) {
1094         br_vlan_disable_default_pvid(br);
1095         return 0;
1096     }
1097 
1098     changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
1099     if (!changed)
1100         return -ENOMEM;
1101 
1102     old_pvid = br->default_pvid;
1103 
1104     /* Update default_pvid config only if we do not conflict with
1105      * user configuration.
1106      */
1107     vg = br_vlan_group(br);
1108     pvent = br_vlan_find(vg, pvid);
1109     if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
1110         (!pvent || !br_vlan_should_use(pvent))) {
1111         err = br_vlan_add(br, pvid,
1112                   BRIDGE_VLAN_INFO_PVID |
1113                   BRIDGE_VLAN_INFO_UNTAGGED |
1114                   BRIDGE_VLAN_INFO_BRENTRY,
1115                   &vlchange, extack);
1116         if (err)
1117             goto out;
1118 
1119         if (br_vlan_delete(br, old_pvid))
1120             br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
1121         br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
1122         __set_bit(0, changed);
1123     }
1124 
1125     list_for_each_entry(p, &br->port_list, list) {
1126         /* Update default_pvid config only if we do not conflict with
1127          * user configuration.
1128          */
1129         vg = nbp_vlan_group(p);
1130         if ((old_pvid &&
1131              !vlan_default_pvid(vg, old_pvid)) ||
1132             br_vlan_find(vg, pvid))
1133             continue;
1134 
1135         err = nbp_vlan_add(p, pvid,
1136                    BRIDGE_VLAN_INFO_PVID |
1137                    BRIDGE_VLAN_INFO_UNTAGGED,
1138                    &vlchange, extack);
1139         if (err)
1140             goto err_port;
1141         if (nbp_vlan_delete(p, old_pvid))
1142             br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
1143         br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
1144         __set_bit(p->port_no, changed);
1145     }
1146 
1147     br->default_pvid = pvid;
1148 
1149 out:
1150     bitmap_free(changed);
1151     return err;
1152 
1153 err_port:
1154     list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1155         if (!test_bit(p->port_no, changed))
1156             continue;
1157 
1158         if (old_pvid) {
1159             nbp_vlan_add(p, old_pvid,
1160                      BRIDGE_VLAN_INFO_PVID |
1161                      BRIDGE_VLAN_INFO_UNTAGGED,
1162                      &vlchange, NULL);
1163             br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
1164         }
1165         nbp_vlan_delete(p, pvid);
1166         br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1167     }
1168 
1169     if (test_bit(0, changed)) {
1170         if (old_pvid) {
1171             br_vlan_add(br, old_pvid,
1172                     BRIDGE_VLAN_INFO_PVID |
1173                     BRIDGE_VLAN_INFO_UNTAGGED |
1174                     BRIDGE_VLAN_INFO_BRENTRY,
1175                     &vlchange, NULL);
1176             br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
1177         }
1178         br_vlan_delete(br, pvid);
1179         br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1180     }
1181     goto out;
1182 }
1183 
1184 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val,
1185                  struct netlink_ext_ack *extack)
1186 {
1187     u16 pvid = val;
1188     int err = 0;
1189 
1190     if (val >= VLAN_VID_MASK)
1191         return -EINVAL;
1192 
1193     if (pvid == br->default_pvid)
1194         goto out;
1195 
1196     /* Only allow default pvid change when filtering is disabled */
1197     if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1198         pr_info_once("Please disable vlan filtering to change default_pvid\n");
1199         err = -EPERM;
1200         goto out;
1201     }
1202     err = __br_vlan_set_default_pvid(br, pvid, extack);
1203 out:
1204     return err;
1205 }
1206 
1207 int br_vlan_init(struct net_bridge *br)
1208 {
1209     struct net_bridge_vlan_group *vg;
1210     int ret = -ENOMEM;
1211 
1212     vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1213     if (!vg)
1214         goto out;
1215     ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1216     if (ret)
1217         goto err_rhtbl;
1218     ret = vlan_tunnel_init(vg);
1219     if (ret)
1220         goto err_tunnel_init;
1221     INIT_LIST_HEAD(&vg->vlan_list);
1222     br->vlan_proto = htons(ETH_P_8021Q);
1223     br->default_pvid = 1;
1224     rcu_assign_pointer(br->vlgrp, vg);
1225 
1226 out:
1227     return ret;
1228 
1229 err_tunnel_init:
1230     rhashtable_destroy(&vg->vlan_hash);
1231 err_rhtbl:
1232     kfree(vg);
1233 
1234     goto out;
1235 }
1236 
1237 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1238 {
1239     struct switchdev_attr attr = {
1240         .orig_dev = p->br->dev,
1241         .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1242         .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1243         .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1244     };
1245     struct net_bridge_vlan_group *vg;
1246     int ret = -ENOMEM;
1247 
1248     vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1249     if (!vg)
1250         goto out;
1251 
1252     ret = switchdev_port_attr_set(p->dev, &attr, extack);
1253     if (ret && ret != -EOPNOTSUPP)
1254         goto err_vlan_enabled;
1255 
1256     ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1257     if (ret)
1258         goto err_rhtbl;
1259     ret = vlan_tunnel_init(vg);
1260     if (ret)
1261         goto err_tunnel_init;
1262     INIT_LIST_HEAD(&vg->vlan_list);
1263     rcu_assign_pointer(p->vlgrp, vg);
1264     if (p->br->default_pvid) {
1265         bool changed;
1266 
1267         ret = nbp_vlan_add(p, p->br->default_pvid,
1268                    BRIDGE_VLAN_INFO_PVID |
1269                    BRIDGE_VLAN_INFO_UNTAGGED,
1270                    &changed, extack);
1271         if (ret)
1272             goto err_vlan_add;
1273         br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
1274     }
1275 out:
1276     return ret;
1277 
1278 err_vlan_add:
1279     RCU_INIT_POINTER(p->vlgrp, NULL);
1280     synchronize_rcu();
1281     vlan_tunnel_deinit(vg);
1282 err_tunnel_init:
1283     rhashtable_destroy(&vg->vlan_hash);
1284 err_rhtbl:
1285 err_vlan_enabled:
1286     kfree(vg);
1287 
1288     goto out;
1289 }
1290 
1291 /* Must be protected by RTNL.
1292  * Must be called with vid in range from 1 to 4094 inclusive.
1293  * changed must be true only if the vlan was created or updated
1294  */
1295 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1296          bool *changed, struct netlink_ext_ack *extack)
1297 {
1298     struct net_bridge_vlan *vlan;
1299     int ret;
1300 
1301     ASSERT_RTNL();
1302 
1303     *changed = false;
1304     vlan = br_vlan_find(nbp_vlan_group(port), vid);
1305     if (vlan) {
1306         bool would_change = __vlan_flags_would_change(vlan, flags);
1307 
1308         if (would_change) {
1309             /* Pass the flags to the hardware bridge */
1310             ret = br_switchdev_port_vlan_add(port->dev, vid, flags,
1311                              true, extack);
1312             if (ret && ret != -EOPNOTSUPP)
1313                 return ret;
1314         }
1315 
1316         __vlan_flags_commit(vlan, flags);
1317         *changed = would_change;
1318 
1319         return 0;
1320     }
1321 
1322     vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1323     if (!vlan)
1324         return -ENOMEM;
1325 
1326     vlan->vid = vid;
1327     vlan->port = port;
1328     ret = __vlan_add(vlan, flags, extack);
1329     if (ret)
1330         kfree(vlan);
1331     else
1332         *changed = true;
1333 
1334     return ret;
1335 }
1336 
1337 /* Must be protected by RTNL.
1338  * Must be called with vid in range from 1 to 4094 inclusive.
1339  */
1340 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1341 {
1342     struct net_bridge_vlan *v;
1343 
1344     ASSERT_RTNL();
1345 
1346     v = br_vlan_find(nbp_vlan_group(port), vid);
1347     if (!v)
1348         return -ENOENT;
1349     br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1350     br_fdb_delete_by_port(port->br, port, vid, 0);
1351 
1352     return __vlan_del(v);
1353 }
1354 
1355 void nbp_vlan_flush(struct net_bridge_port *port)
1356 {
1357     struct net_bridge_vlan_group *vg;
1358 
1359     ASSERT_RTNL();
1360 
1361     vg = nbp_vlan_group(port);
1362     __vlan_flush(port->br, port, vg);
1363     RCU_INIT_POINTER(port->vlgrp, NULL);
1364     synchronize_rcu();
1365     __vlan_group_free(vg);
1366 }
1367 
1368 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1369                struct pcpu_sw_netstats *stats)
1370 {
1371     int i;
1372 
1373     memset(stats, 0, sizeof(*stats));
1374     for_each_possible_cpu(i) {
1375         u64 rxpackets, rxbytes, txpackets, txbytes;
1376         struct pcpu_sw_netstats *cpu_stats;
1377         unsigned int start;
1378 
1379         cpu_stats = per_cpu_ptr(v->stats, i);
1380         do {
1381             start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1382             rxpackets = u64_stats_read(&cpu_stats->rx_packets);
1383             rxbytes = u64_stats_read(&cpu_stats->rx_bytes);
1384             txbytes = u64_stats_read(&cpu_stats->tx_bytes);
1385             txpackets = u64_stats_read(&cpu_stats->tx_packets);
1386         } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1387 
1388         u64_stats_add(&stats->rx_packets, rxpackets);
1389         u64_stats_add(&stats->rx_bytes, rxbytes);
1390         u64_stats_add(&stats->tx_bytes, txbytes);
1391         u64_stats_add(&stats->tx_packets, txpackets);
1392     }
1393 }
1394 
1395 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1396 {
1397     struct net_bridge_vlan_group *vg;
1398     struct net_bridge_port *p;
1399 
1400     ASSERT_RTNL();
1401     p = br_port_get_check_rtnl(dev);
1402     if (p)
1403         vg = nbp_vlan_group(p);
1404     else if (netif_is_bridge_master(dev))
1405         vg = br_vlan_group(netdev_priv(dev));
1406     else
1407         return -EINVAL;
1408 
1409     *p_pvid = br_get_pvid(vg);
1410     return 0;
1411 }
1412 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1413 
1414 int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1415 {
1416     struct net_bridge_vlan_group *vg;
1417     struct net_bridge_port *p;
1418 
1419     p = br_port_get_check_rcu(dev);
1420     if (p)
1421         vg = nbp_vlan_group_rcu(p);
1422     else if (netif_is_bridge_master(dev))
1423         vg = br_vlan_group_rcu(netdev_priv(dev));
1424     else
1425         return -EINVAL;
1426 
1427     *p_pvid = br_get_pvid(vg);
1428     return 0;
1429 }
1430 EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1431 
1432 void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
1433                     struct net_device_path_ctx *ctx,
1434                     struct net_device_path *path)
1435 {
1436     struct net_bridge_vlan_group *vg;
1437     int idx = ctx->num_vlans - 1;
1438     u16 vid;
1439 
1440     path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1441 
1442     if (!br_opt_get(br, BROPT_VLAN_ENABLED))
1443         return;
1444 
1445     vg = br_vlan_group(br);
1446 
1447     if (idx >= 0 &&
1448         ctx->vlan[idx].proto == br->vlan_proto) {
1449         vid = ctx->vlan[idx].id;
1450     } else {
1451         path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG;
1452         vid = br_get_pvid(vg);
1453     }
1454 
1455     path->bridge.vlan_id = vid;
1456     path->bridge.vlan_proto = br->vlan_proto;
1457 }
1458 
1459 int br_vlan_fill_forward_path_mode(struct net_bridge *br,
1460                    struct net_bridge_port *dst,
1461                    struct net_device_path *path)
1462 {
1463     struct net_bridge_vlan_group *vg;
1464     struct net_bridge_vlan *v;
1465 
1466     if (!br_opt_get(br, BROPT_VLAN_ENABLED))
1467         return 0;
1468 
1469     vg = nbp_vlan_group_rcu(dst);
1470     v = br_vlan_find(vg, path->bridge.vlan_id);
1471     if (!v || !br_vlan_should_use(v))
1472         return -EINVAL;
1473 
1474     if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1475         return 0;
1476 
1477     if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
1478         path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1479     else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
1480         path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
1481     else
1482         path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;
1483 
1484     return 0;
1485 }
1486 
1487 int br_vlan_get_info(const struct net_device *dev, u16 vid,
1488              struct bridge_vlan_info *p_vinfo)
1489 {
1490     struct net_bridge_vlan_group *vg;
1491     struct net_bridge_vlan *v;
1492     struct net_bridge_port *p;
1493 
1494     ASSERT_RTNL();
1495     p = br_port_get_check_rtnl(dev);
1496     if (p)
1497         vg = nbp_vlan_group(p);
1498     else if (netif_is_bridge_master(dev))
1499         vg = br_vlan_group(netdev_priv(dev));
1500     else
1501         return -EINVAL;
1502 
1503     v = br_vlan_find(vg, vid);
1504     if (!v)
1505         return -ENOENT;
1506 
1507     p_vinfo->vid = vid;
1508     p_vinfo->flags = v->flags;
1509     if (vid == br_get_pvid(vg))
1510         p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1511     return 0;
1512 }
1513 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1514 
1515 int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid,
1516              struct bridge_vlan_info *p_vinfo)
1517 {
1518     struct net_bridge_vlan_group *vg;
1519     struct net_bridge_vlan *v;
1520     struct net_bridge_port *p;
1521 
1522     p = br_port_get_check_rcu(dev);
1523     if (p)
1524         vg = nbp_vlan_group_rcu(p);
1525     else if (netif_is_bridge_master(dev))
1526         vg = br_vlan_group_rcu(netdev_priv(dev));
1527     else
1528         return -EINVAL;
1529 
1530     v = br_vlan_find(vg, vid);
1531     if (!v)
1532         return -ENOENT;
1533 
1534     p_vinfo->vid = vid;
1535     p_vinfo->flags = v->flags;
1536     if (vid == br_get_pvid(vg))
1537         p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1538     return 0;
1539 }
1540 EXPORT_SYMBOL_GPL(br_vlan_get_info_rcu);
1541 
1542 static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1543 {
1544     return is_vlan_dev(dev) &&
1545         !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1546 }
1547 
1548 static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1549                    __always_unused struct netdev_nested_priv *priv)
1550 {
1551     return br_vlan_is_bind_vlan_dev(dev);
1552 }
1553 
1554 static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1555 {
1556     int found;
1557 
1558     rcu_read_lock();
1559     found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1560                           NULL);
1561     rcu_read_unlock();
1562 
1563     return !!found;
1564 }
1565 
1566 struct br_vlan_bind_walk_data {
1567     u16 vid;
1568     struct net_device *result;
1569 };
1570 
1571 static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1572                       struct netdev_nested_priv *priv)
1573 {
1574     struct br_vlan_bind_walk_data *data = priv->data;
1575     int found = 0;
1576 
1577     if (br_vlan_is_bind_vlan_dev(dev) &&
1578         vlan_dev_priv(dev)->vlan_id == data->vid) {
1579         data->result = dev;
1580         found = 1;
1581     }
1582 
1583     return found;
1584 }
1585 
1586 static struct net_device *
1587 br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1588 {
1589     struct br_vlan_bind_walk_data data = {
1590         .vid = vid,
1591     };
1592     struct netdev_nested_priv priv = {
1593         .data = (void *)&data,
1594     };
1595 
1596     rcu_read_lock();
1597     netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1598                       &priv);
1599     rcu_read_unlock();
1600 
1601     return data.result;
1602 }
1603 
1604 static bool br_vlan_is_dev_up(const struct net_device *dev)
1605 {
1606     return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1607 }
1608 
1609 static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1610                        struct net_device *vlan_dev)
1611 {
1612     u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1613     struct net_bridge_vlan_group *vg;
1614     struct net_bridge_port *p;
1615     bool has_carrier = false;
1616 
1617     if (!netif_carrier_ok(br->dev)) {
1618         netif_carrier_off(vlan_dev);
1619         return;
1620     }
1621 
1622     list_for_each_entry(p, &br->port_list, list) {
1623         vg = nbp_vlan_group(p);
1624         if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1625             has_carrier = true;
1626             break;
1627         }
1628     }
1629 
1630     if (has_carrier)
1631         netif_carrier_on(vlan_dev);
1632     else
1633         netif_carrier_off(vlan_dev);
1634 }
1635 
1636 static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1637 {
1638     struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1639     struct net_bridge_vlan *vlan;
1640     struct net_device *vlan_dev;
1641 
1642     list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1643         vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1644                                vlan->vid);
1645         if (vlan_dev) {
1646             if (br_vlan_is_dev_up(p->dev)) {
1647                 if (netif_carrier_ok(p->br->dev))
1648                     netif_carrier_on(vlan_dev);
1649             } else {
1650                 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1651             }
1652         }
1653     }
1654 }
1655 
1656 static void br_vlan_upper_change(struct net_device *dev,
1657                  struct net_device *upper_dev,
1658                  bool linking)
1659 {
1660     struct net_bridge *br = netdev_priv(dev);
1661 
1662     if (!br_vlan_is_bind_vlan_dev(upper_dev))
1663         return;
1664 
1665     if (linking) {
1666         br_vlan_set_vlan_dev_state(br, upper_dev);
1667         br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1668     } else {
1669         br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1670                   br_vlan_has_upper_bind_vlan_dev(dev));
1671     }
1672 }
1673 
1674 struct br_vlan_link_state_walk_data {
1675     struct net_bridge *br;
1676 };
1677 
1678 static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1679                     struct netdev_nested_priv *priv)
1680 {
1681     struct br_vlan_link_state_walk_data *data = priv->data;
1682 
1683     if (br_vlan_is_bind_vlan_dev(vlan_dev))
1684         br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1685 
1686     return 0;
1687 }
1688 
1689 static void br_vlan_link_state_change(struct net_device *dev,
1690                       struct net_bridge *br)
1691 {
1692     struct br_vlan_link_state_walk_data data = {
1693         .br = br
1694     };
1695     struct netdev_nested_priv priv = {
1696         .data = (void *)&data,
1697     };
1698 
1699     rcu_read_lock();
1700     netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1701                       &priv);
1702     rcu_read_unlock();
1703 }
1704 
1705 /* Must be protected by RTNL. */
1706 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1707 {
1708     struct net_device *vlan_dev;
1709 
1710     if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1711         return;
1712 
1713     vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1714     if (vlan_dev)
1715         br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1716 }
1717 
1718 /* Must be protected by RTNL. */
1719 int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1720 {
1721     struct netdev_notifier_changeupper_info *info;
1722     struct net_bridge *br = netdev_priv(dev);
1723     int vlcmd = 0, ret = 0;
1724     bool changed = false;
1725 
1726     switch (event) {
1727     case NETDEV_REGISTER:
1728         ret = br_vlan_add(br, br->default_pvid,
1729                   BRIDGE_VLAN_INFO_PVID |
1730                   BRIDGE_VLAN_INFO_UNTAGGED |
1731                   BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1732         vlcmd = RTM_NEWVLAN;
1733         break;
1734     case NETDEV_UNREGISTER:
1735         changed = !br_vlan_delete(br, br->default_pvid);
1736         vlcmd = RTM_DELVLAN;
1737         break;
1738     case NETDEV_CHANGEUPPER:
1739         info = ptr;
1740         br_vlan_upper_change(dev, info->upper_dev, info->linking);
1741         break;
1742 
1743     case NETDEV_CHANGE:
1744     case NETDEV_UP:
1745         if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1746             break;
1747         br_vlan_link_state_change(dev, br);
1748         break;
1749     }
1750     if (changed)
1751         br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1752 
1753     return ret;
1754 }
1755 
1756 /* Must be protected by RTNL. */
1757 void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1758 {
1759     if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1760         return;
1761 
1762     switch (event) {
1763     case NETDEV_CHANGE:
1764     case NETDEV_DOWN:
1765     case NETDEV_UP:
1766         br_vlan_set_all_vlan_dev_state(p);
1767         break;
1768     }
1769 }
1770 
1771 static bool br_vlan_stats_fill(struct sk_buff *skb,
1772                    const struct net_bridge_vlan *v)
1773 {
1774     struct pcpu_sw_netstats stats;
1775     struct nlattr *nest;
1776 
1777     nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
1778     if (!nest)
1779         return false;
1780 
1781     br_vlan_get_stats(v, &stats);
1782     if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES,
1783                   u64_stats_read(&stats.rx_bytes),
1784                   BRIDGE_VLANDB_STATS_PAD) ||
1785         nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
1786                   u64_stats_read(&stats.rx_packets),
1787                   BRIDGE_VLANDB_STATS_PAD) ||
1788         nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES,
1789                   u64_stats_read(&stats.tx_bytes),
1790                   BRIDGE_VLANDB_STATS_PAD) ||
1791         nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
1792                   u64_stats_read(&stats.tx_packets),
1793                   BRIDGE_VLANDB_STATS_PAD))
1794         goto out_err;
1795 
1796     nla_nest_end(skb, nest);
1797 
1798     return true;
1799 
1800 out_err:
1801     nla_nest_cancel(skb, nest);
1802     return false;
1803 }
1804 
1805 /* v_opts is used to dump the options which must be equal in the whole range */
1806 static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1807                   const struct net_bridge_vlan *v_opts,
1808                   u16 flags,
1809                   bool dump_stats)
1810 {
1811     struct bridge_vlan_info info;
1812     struct nlattr *nest;
1813 
1814     nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
1815     if (!nest)
1816         return false;
1817 
1818     memset(&info, 0, sizeof(info));
1819     info.vid = vid;
1820     if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
1821         info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1822     if (flags & BRIDGE_VLAN_INFO_PVID)
1823         info.flags |= BRIDGE_VLAN_INFO_PVID;
1824 
1825     if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
1826         goto out_err;
1827 
1828     if (vid_range && vid < vid_range &&
1829         !(flags & BRIDGE_VLAN_INFO_PVID) &&
1830         nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
1831         goto out_err;
1832 
1833     if (v_opts) {
1834         if (!br_vlan_opts_fill(skb, v_opts))
1835             goto out_err;
1836 
1837         if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
1838             goto out_err;
1839     }
1840 
1841     nla_nest_end(skb, nest);
1842 
1843     return true;
1844 
1845 out_err:
1846     nla_nest_cancel(skb, nest);
1847     return false;
1848 }
1849 
1850 static size_t rtnl_vlan_nlmsg_size(void)
1851 {
1852     return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
1853         + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
1854         + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1855         + nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
1856         + br_vlan_opts_nl_size(); /* bridge vlan options */
1857 }
1858 
1859 void br_vlan_notify(const struct net_bridge *br,
1860             const struct net_bridge_port *p,
1861             u16 vid, u16 vid_range,
1862             int cmd)
1863 {
1864     struct net_bridge_vlan_group *vg;
1865     struct net_bridge_vlan *v = NULL;
1866     struct br_vlan_msg *bvm;
1867     struct nlmsghdr *nlh;
1868     struct sk_buff *skb;
1869     int err = -ENOBUFS;
1870     struct net *net;
1871     u16 flags = 0;
1872     int ifindex;
1873 
1874     /* right now notifications are done only with rtnl held */
1875     ASSERT_RTNL();
1876 
1877     if (p) {
1878         ifindex = p->dev->ifindex;
1879         vg = nbp_vlan_group(p);
1880         net = dev_net(p->dev);
1881     } else {
1882         ifindex = br->dev->ifindex;
1883         vg = br_vlan_group(br);
1884         net = dev_net(br->dev);
1885     }
1886 
1887     skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
1888     if (!skb)
1889         goto out_err;
1890 
1891     err = -EMSGSIZE;
1892     nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
1893     if (!nlh)
1894         goto out_err;
1895     bvm = nlmsg_data(nlh);
1896     memset(bvm, 0, sizeof(*bvm));
1897     bvm->family = AF_BRIDGE;
1898     bvm->ifindex = ifindex;
1899 
1900     switch (cmd) {
1901     case RTM_NEWVLAN:
1902         /* need to find the vlan due to flags/options */
1903         v = br_vlan_find(vg, vid);
1904         if (!v || !br_vlan_should_use(v))
1905             goto out_kfree;
1906 
1907         flags = v->flags;
1908         if (br_get_pvid(vg) == v->vid)
1909             flags |= BRIDGE_VLAN_INFO_PVID;
1910         break;
1911     case RTM_DELVLAN:
1912         break;
1913     default:
1914         goto out_kfree;
1915     }
1916 
1917     if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
1918         goto out_err;
1919 
1920     nlmsg_end(skb, nlh);
1921     rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
1922     return;
1923 
1924 out_err:
1925     rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
1926 out_kfree:
1927     kfree_skb(skb);
1928 }
1929 
1930 /* check if v_curr can enter a range ending in range_end */
1931 bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
1932                  const struct net_bridge_vlan *range_end)
1933 {
1934     return v_curr->vid - range_end->vid == 1 &&
1935            range_end->flags == v_curr->flags &&
1936            br_vlan_opts_eq_range(v_curr, range_end);
1937 }
1938 
1939 static int br_vlan_dump_dev(const struct net_device *dev,
1940                 struct sk_buff *skb,
1941                 struct netlink_callback *cb,
1942                 u32 dump_flags)
1943 {
1944     struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1945     bool dump_global = !!(dump_flags & BRIDGE_VLANDB_DUMPF_GLOBAL);
1946     bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1947     struct net_bridge_vlan_group *vg;
1948     int idx = 0, s_idx = cb->args[1];
1949     struct nlmsghdr *nlh = NULL;
1950     struct net_bridge_port *p;
1951     struct br_vlan_msg *bvm;
1952     struct net_bridge *br;
1953     int err = 0;
1954     u16 pvid;
1955 
1956     if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1957         return -EINVAL;
1958 
1959     if (netif_is_bridge_master(dev)) {
1960         br = netdev_priv(dev);
1961         vg = br_vlan_group_rcu(br);
1962         p = NULL;
1963     } else {
1964         /* global options are dumped only for bridge devices */
1965         if (dump_global)
1966             return 0;
1967 
1968         p = br_port_get_rcu(dev);
1969         if (WARN_ON(!p))
1970             return -EINVAL;
1971         vg = nbp_vlan_group_rcu(p);
1972         br = p->br;
1973     }
1974 
1975     if (!vg)
1976         return 0;
1977 
1978     nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1979             RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
1980     if (!nlh)
1981         return -EMSGSIZE;
1982     bvm = nlmsg_data(nlh);
1983     memset(bvm, 0, sizeof(*bvm));
1984     bvm->family = PF_BRIDGE;
1985     bvm->ifindex = dev->ifindex;
1986     pvid = br_get_pvid(vg);
1987 
1988     /* idx must stay at range's beginning until it is filled in */
1989     list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
1990         if (!dump_global && !br_vlan_should_use(v))
1991             continue;
1992         if (idx < s_idx) {
1993             idx++;
1994             continue;
1995         }
1996 
1997         if (!range_start) {
1998             range_start = v;
1999             range_end = v;
2000             continue;
2001         }
2002 
2003         if (dump_global) {
2004             if (br_vlan_global_opts_can_enter_range(v, range_end))
2005                 goto update_end;
2006             if (!br_vlan_global_opts_fill(skb, range_start->vid,
2007                               range_end->vid,
2008                               range_start)) {
2009                 err = -EMSGSIZE;
2010                 break;
2011             }
2012             /* advance number of filled vlans */
2013             idx += range_end->vid - range_start->vid + 1;
2014 
2015             range_start = v;
2016         } else if (dump_stats || v->vid == pvid ||
2017                !br_vlan_can_enter_range(v, range_end)) {
2018             u16 vlan_flags = br_vlan_flags(range_start, pvid);
2019 
2020             if (!br_vlan_fill_vids(skb, range_start->vid,
2021                            range_end->vid, range_start,
2022                            vlan_flags, dump_stats)) {
2023                 err = -EMSGSIZE;
2024                 break;
2025             }
2026             /* advance number of filled vlans */
2027             idx += range_end->vid - range_start->vid + 1;
2028 
2029             range_start = v;
2030         }
2031 update_end:
2032         range_end = v;
2033     }
2034 
2035     /* err will be 0 and range_start will be set in 3 cases here:
2036      * - first vlan (range_start == range_end)
2037      * - last vlan (range_start == range_end, not in range)
2038      * - last vlan range (range_start != range_end, in range)
2039      */
2040     if (!err && range_start) {
2041         if (dump_global &&
2042             !br_vlan_global_opts_fill(skb, range_start->vid,
2043                           range_end->vid, range_start))
2044             err = -EMSGSIZE;
2045         else if (!dump_global &&
2046              !br_vlan_fill_vids(skb, range_start->vid,
2047                         range_end->vid, range_start,
2048                         br_vlan_flags(range_start, pvid),
2049                         dump_stats))
2050             err = -EMSGSIZE;
2051     }
2052 
2053     cb->args[1] = err ? idx : 0;
2054 
2055     nlmsg_end(skb, nlh);
2056 
2057     return err;
2058 }
2059 
2060 static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
2061     [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
2062 };
2063 
2064 static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
2065 {
2066     struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
2067     int idx = 0, err = 0, s_idx = cb->args[0];
2068     struct net *net = sock_net(skb->sk);
2069     struct br_vlan_msg *bvm;
2070     struct net_device *dev;
2071     u32 dump_flags = 0;
2072 
2073     err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
2074               br_vlan_db_dump_pol, cb->extack);
2075     if (err < 0)
2076         return err;
2077 
2078     bvm = nlmsg_data(cb->nlh);
2079     if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
2080         dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
2081 
2082     rcu_read_lock();
2083     if (bvm->ifindex) {
2084         dev = dev_get_by_index_rcu(net, bvm->ifindex);
2085         if (!dev) {
2086             err = -ENODEV;
2087             goto out_err;
2088         }
2089         err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2090         /* if the dump completed without an error we return 0 here */
2091         if (err != -EMSGSIZE)
2092             goto out_err;
2093     } else {
2094         for_each_netdev_rcu(net, dev) {
2095             if (idx < s_idx)
2096                 goto skip;
2097 
2098             err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2099             if (err == -EMSGSIZE)
2100                 break;
2101 skip:
2102             idx++;
2103         }
2104     }
2105     cb->args[0] = idx;
2106     rcu_read_unlock();
2107 
2108     return skb->len;
2109 
2110 out_err:
2111     rcu_read_unlock();
2112 
2113     return err;
2114 }
2115 
2116 static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
2117     [BRIDGE_VLANDB_ENTRY_INFO]  =
2118         NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
2119     [BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 },
2120     [BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
2121     [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
2122     [BRIDGE_VLANDB_ENTRY_MCAST_ROUTER]  = { .type = NLA_U8 },
2123 };
2124 
2125 static int br_vlan_rtm_process_one(struct net_device *dev,
2126                    const struct nlattr *attr,
2127                    int cmd, struct netlink_ext_ack *extack)
2128 {
2129     struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
2130     struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
2131     bool changed = false, skip_processing = false;
2132     struct net_bridge_vlan_group *vg;
2133     struct net_bridge_port *p = NULL;
2134     int err = 0, cmdmap = 0;
2135     struct net_bridge *br;
2136 
2137     if (netif_is_bridge_master(dev)) {
2138         br = netdev_priv(dev);
2139         vg = br_vlan_group(br);
2140     } else {
2141         p = br_port_get_rtnl(dev);
2142         if (WARN_ON(!p))
2143             return -ENODEV;
2144         br = p->br;
2145         vg = nbp_vlan_group(p);
2146     }
2147 
2148     if (WARN_ON(!vg))
2149         return -ENODEV;
2150 
2151     err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
2152                    br_vlan_db_policy, extack);
2153     if (err)
2154         return err;
2155 
2156     if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
2157         NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
2158         return -EINVAL;
2159     }
2160     memset(&vrange_end, 0, sizeof(vrange_end));
2161 
2162     vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
2163     if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
2164                 BRIDGE_VLAN_INFO_RANGE_END)) {
2165         NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
2166         return -EINVAL;
2167     }
2168     if (!br_vlan_valid_id(vinfo->vid, extack))
2169         return -EINVAL;
2170 
2171     if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
2172         vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
2173         /* validate user-provided flags without RANGE_BEGIN */
2174         vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
2175         vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
2176 
2177         /* vinfo_last is the range start, vinfo the range end */
2178         vinfo_last = vinfo;
2179         vinfo = &vrange_end;
2180 
2181         if (!br_vlan_valid_id(vinfo->vid, extack) ||
2182             !br_vlan_valid_range(vinfo, vinfo_last, extack))
2183             return -EINVAL;
2184     }
2185 
2186     switch (cmd) {
2187     case RTM_NEWVLAN:
2188         cmdmap = RTM_SETLINK;
2189         skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
2190         break;
2191     case RTM_DELVLAN:
2192         cmdmap = RTM_DELLINK;
2193         break;
2194     }
2195 
2196     if (!skip_processing) {
2197         struct bridge_vlan_info *tmp_last = vinfo_last;
2198 
2199         /* br_process_vlan_info may overwrite vinfo_last */
2200         err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
2201                        &changed, extack);
2202 
2203         /* notify first if anything changed */
2204         if (changed)
2205             br_ifinfo_notify(cmdmap, br, p);
2206 
2207         if (err)
2208             return err;
2209     }
2210 
2211     /* deal with options */
2212     if (cmd == RTM_NEWVLAN) {
2213         struct net_bridge_vlan *range_start, *range_end;
2214 
2215         if (vinfo_last) {
2216             range_start = br_vlan_find(vg, vinfo_last->vid);
2217             range_end = br_vlan_find(vg, vinfo->vid);
2218         } else {
2219             range_start = br_vlan_find(vg, vinfo->vid);
2220             range_end = range_start;
2221         }
2222 
2223         err = br_vlan_process_options(br, p, range_start, range_end,
2224                           tb, extack);
2225     }
2226 
2227     return err;
2228 }
2229 
2230 static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
2231                    struct netlink_ext_ack *extack)
2232 {
2233     struct net *net = sock_net(skb->sk);
2234     struct br_vlan_msg *bvm;
2235     struct net_device *dev;
2236     struct nlattr *attr;
2237     int err, vlans = 0;
2238     int rem;
2239 
2240     /* this should validate the header and check for remaining bytes */
2241     err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
2242               extack);
2243     if (err < 0)
2244         return err;
2245 
2246     bvm = nlmsg_data(nlh);
2247     dev = __dev_get_by_index(net, bvm->ifindex);
2248     if (!dev)
2249         return -ENODEV;
2250 
2251     if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
2252         NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
2253         return -EINVAL;
2254     }
2255 
2256     nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
2257         switch (nla_type(attr)) {
2258         case BRIDGE_VLANDB_ENTRY:
2259             err = br_vlan_rtm_process_one(dev, attr,
2260                               nlh->nlmsg_type,
2261                               extack);
2262             break;
2263         case BRIDGE_VLANDB_GLOBAL_OPTIONS:
2264             err = br_vlan_rtm_process_global_options(dev, attr,
2265                                  nlh->nlmsg_type,
2266                                  extack);
2267             break;
2268         default:
2269             continue;
2270         }
2271 
2272         vlans++;
2273         if (err)
2274             break;
2275     }
2276     if (!vlans) {
2277         NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
2278         err = -EINVAL;
2279     }
2280 
2281     return err;
2282 }
2283 
2284 void br_vlan_rtnl_init(void)
2285 {
2286     rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
2287                  br_vlan_rtm_dump, 0);
2288     rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
2289                  br_vlan_rtm_process, NULL, 0);
2290     rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
2291                  br_vlan_rtm_process, NULL, 0);
2292 }
2293 
2294 void br_vlan_rtnl_uninit(void)
2295 {
2296     rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
2297     rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
2298     rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
2299 }