0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/err.h>
0011 #include <linux/slab.h>
0012 #include <linux/kernel.h>
0013 #include <linux/netdevice.h>
0014 #include <linux/netpoll.h>
0015 #include <linux/skbuff.h>
0016 #include <linux/if_vlan.h>
0017 #include <linux/netfilter_bridge.h>
0018 #include "br_private.h"
0019
0020
0021 static inline int should_deliver(const struct net_bridge_port *p,
0022 const struct sk_buff *skb)
0023 {
0024 struct net_bridge_vlan_group *vg;
0025
0026 vg = nbp_vlan_group_rcu(p);
0027 return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
0028 p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) &&
0029 nbp_switchdev_allowed_egress(p, skb) &&
0030 !br_skb_isolated(p, skb);
0031 }
0032
0033 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
0034 {
0035 skb_push(skb, ETH_HLEN);
0036 if (!is_skb_forwardable(skb->dev, skb))
0037 goto drop;
0038
0039 br_drop_fake_rtable(skb);
0040
0041 if (skb->ip_summed == CHECKSUM_PARTIAL &&
0042 eth_type_vlan(skb->protocol)) {
0043 int depth;
0044
0045 if (!__vlan_get_protocol(skb, skb->protocol, &depth))
0046 goto drop;
0047
0048 skb_set_network_header(skb, depth);
0049 }
0050
0051 br_switchdev_frame_set_offload_fwd_mark(skb);
0052
0053 dev_queue_xmit(skb);
0054
0055 return 0;
0056
0057 drop:
0058 kfree_skb(skb);
0059 return 0;
0060 }
0061 EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
0062
0063 int br_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
0064 {
0065 skb_clear_tstamp(skb);
0066 return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING,
0067 net, sk, skb, NULL, skb->dev,
0068 br_dev_queue_push_xmit);
0069
0070 }
0071 EXPORT_SYMBOL_GPL(br_forward_finish);
0072
0073 static void __br_forward(const struct net_bridge_port *to,
0074 struct sk_buff *skb, bool local_orig)
0075 {
0076 struct net_bridge_vlan_group *vg;
0077 struct net_device *indev;
0078 struct net *net;
0079 int br_hook;
0080
0081
0082
0083
0084 nbp_switchdev_frame_mark_tx_fwd_offload(to, skb);
0085
0086 vg = nbp_vlan_group_rcu(to);
0087 skb = br_handle_vlan(to->br, to, vg, skb);
0088 if (!skb)
0089 return;
0090
0091 indev = skb->dev;
0092 skb->dev = to->dev;
0093 if (!local_orig) {
0094 if (skb_warn_if_lro(skb)) {
0095 kfree_skb(skb);
0096 return;
0097 }
0098 br_hook = NF_BR_FORWARD;
0099 skb_forward_csum(skb);
0100 net = dev_net(indev);
0101 } else {
0102 if (unlikely(netpoll_tx_running(to->br->dev))) {
0103 skb_push(skb, ETH_HLEN);
0104 if (!is_skb_forwardable(skb->dev, skb))
0105 kfree_skb(skb);
0106 else
0107 br_netpoll_send_skb(to, skb);
0108 return;
0109 }
0110 br_hook = NF_BR_LOCAL_OUT;
0111 net = dev_net(skb->dev);
0112 indev = NULL;
0113 }
0114
0115 NF_HOOK(NFPROTO_BRIDGE, br_hook,
0116 net, NULL, skb, indev, skb->dev,
0117 br_forward_finish);
0118 }
0119
0120 static int deliver_clone(const struct net_bridge_port *prev,
0121 struct sk_buff *skb, bool local_orig)
0122 {
0123 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
0124
0125 skb = skb_clone(skb, GFP_ATOMIC);
0126 if (!skb) {
0127 dev->stats.tx_dropped++;
0128 return -ENOMEM;
0129 }
0130
0131 __br_forward(prev, skb, local_orig);
0132 return 0;
0133 }
0134
0135
0136
0137
0138
0139
0140
0141
0142
0143
0144 void br_forward(const struct net_bridge_port *to,
0145 struct sk_buff *skb, bool local_rcv, bool local_orig)
0146 {
0147 if (unlikely(!to))
0148 goto out;
0149
0150
0151 if (rcu_access_pointer(to->backup_port) && !netif_carrier_ok(to->dev)) {
0152 struct net_bridge_port *backup_port;
0153
0154 backup_port = rcu_dereference(to->backup_port);
0155 if (unlikely(!backup_port))
0156 goto out;
0157 to = backup_port;
0158 }
0159
0160 if (should_deliver(to, skb)) {
0161 if (local_rcv)
0162 deliver_clone(to, skb, local_orig);
0163 else
0164 __br_forward(to, skb, local_orig);
0165 return;
0166 }
0167
0168 out:
0169 if (!local_rcv)
0170 kfree_skb(skb);
0171 }
0172 EXPORT_SYMBOL_GPL(br_forward);
0173
0174 static struct net_bridge_port *maybe_deliver(
0175 struct net_bridge_port *prev, struct net_bridge_port *p,
0176 struct sk_buff *skb, bool local_orig)
0177 {
0178 u8 igmp_type = br_multicast_igmp_type(skb);
0179 int err;
0180
0181 if (!should_deliver(p, skb))
0182 return prev;
0183
0184 nbp_switchdev_frame_mark_tx_fwd_to_hwdom(p, skb);
0185
0186 if (!prev)
0187 goto out;
0188
0189 err = deliver_clone(prev, skb, local_orig);
0190 if (err)
0191 return ERR_PTR(err);
0192 out:
0193 br_multicast_count(p->br, p, skb, igmp_type, BR_MCAST_DIR_TX);
0194
0195 return p;
0196 }
0197
0198
0199 void br_flood(struct net_bridge *br, struct sk_buff *skb,
0200 enum br_pkt_type pkt_type, bool local_rcv, bool local_orig)
0201 {
0202 struct net_bridge_port *prev = NULL;
0203 struct net_bridge_port *p;
0204
0205 list_for_each_entry_rcu(p, &br->port_list, list) {
0206
0207
0208
0209 switch (pkt_type) {
0210 case BR_PKT_UNICAST:
0211 if (!(p->flags & BR_FLOOD))
0212 continue;
0213 break;
0214 case BR_PKT_MULTICAST:
0215 if (!(p->flags & BR_MCAST_FLOOD) && skb->dev != br->dev)
0216 continue;
0217 break;
0218 case BR_PKT_BROADCAST:
0219 if (!(p->flags & BR_BCAST_FLOOD) && skb->dev != br->dev)
0220 continue;
0221 break;
0222 }
0223
0224
0225 if (p->flags & BR_PROXYARP)
0226 continue;
0227 if ((p->flags & (BR_PROXYARP_WIFI | BR_NEIGH_SUPPRESS)) &&
0228 BR_INPUT_SKB_CB(skb)->proxyarp_replied)
0229 continue;
0230
0231 prev = maybe_deliver(prev, p, skb, local_orig);
0232 if (IS_ERR(prev))
0233 goto out;
0234 }
0235
0236 if (!prev)
0237 goto out;
0238
0239 if (local_rcv)
0240 deliver_clone(prev, skb, local_orig);
0241 else
0242 __br_forward(prev, skb, local_orig);
0243 return;
0244
0245 out:
0246 if (!local_rcv)
0247 kfree_skb(skb);
0248 }
0249
0250 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
0251 static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
0252 const unsigned char *addr, bool local_orig)
0253 {
0254 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
0255 const unsigned char *src = eth_hdr(skb)->h_source;
0256
0257 if (!should_deliver(p, skb))
0258 return;
0259
0260
0261 if (skb->dev == p->dev && ether_addr_equal(src, addr))
0262 return;
0263
0264 skb = skb_copy(skb, GFP_ATOMIC);
0265 if (!skb) {
0266 dev->stats.tx_dropped++;
0267 return;
0268 }
0269
0270 if (!is_broadcast_ether_addr(addr))
0271 memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
0272
0273 __br_forward(p, skb, local_orig);
0274 }
0275
0276
0277 void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
0278 struct sk_buff *skb,
0279 struct net_bridge_mcast *brmctx,
0280 bool local_rcv, bool local_orig)
0281 {
0282 struct net_bridge_port *prev = NULL;
0283 struct net_bridge_port_group *p;
0284 bool allow_mode_include = true;
0285 struct hlist_node *rp;
0286
0287 rp = br_multicast_get_first_rport_node(brmctx, skb);
0288
0289 if (mdst) {
0290 p = rcu_dereference(mdst->ports);
0291 if (br_multicast_should_handle_mode(brmctx, mdst->addr.proto) &&
0292 br_multicast_is_star_g(&mdst->addr))
0293 allow_mode_include = false;
0294 } else {
0295 p = NULL;
0296 }
0297
0298 while (p || rp) {
0299 struct net_bridge_port *port, *lport, *rport;
0300
0301 lport = p ? p->key.port : NULL;
0302 rport = br_multicast_rport_from_node_skb(rp, skb);
0303
0304 if ((unsigned long)lport > (unsigned long)rport) {
0305 port = lport;
0306
0307 if (port->flags & BR_MULTICAST_TO_UNICAST) {
0308 maybe_deliver_addr(lport, skb, p->eth_addr,
0309 local_orig);
0310 goto delivered;
0311 }
0312 if ((!allow_mode_include &&
0313 p->filter_mode == MCAST_INCLUDE) ||
0314 (p->flags & MDB_PG_FLAGS_BLOCKED))
0315 goto delivered;
0316 } else {
0317 port = rport;
0318 }
0319
0320 prev = maybe_deliver(prev, port, skb, local_orig);
0321 if (IS_ERR(prev))
0322 goto out;
0323 delivered:
0324 if ((unsigned long)lport >= (unsigned long)port)
0325 p = rcu_dereference(p->next);
0326 if ((unsigned long)rport >= (unsigned long)port)
0327 rp = rcu_dereference(hlist_next_rcu(rp));
0328 }
0329
0330 if (!prev)
0331 goto out;
0332
0333 if (local_rcv)
0334 deliver_clone(prev, skb, local_orig);
0335 else
0336 __br_forward(prev, skb, local_orig);
0337 return;
0338
0339 out:
0340 if (!local_rcv)
0341 kfree_skb(skb);
0342 }
0343 #endif