0001
0002
0003
0004
0005 #include "ipvlan.h"
0006
0007 static u32 ipvlan_jhash_secret __read_mostly;
0008
0009 void ipvlan_init_secret(void)
0010 {
0011 net_get_random_once(&ipvlan_jhash_secret, sizeof(ipvlan_jhash_secret));
0012 }
0013
0014 void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
0015 unsigned int len, bool success, bool mcast)
0016 {
0017 if (likely(success)) {
0018 struct ipvl_pcpu_stats *pcptr;
0019
0020 pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
0021 u64_stats_update_begin(&pcptr->syncp);
0022 u64_stats_inc(&pcptr->rx_pkts);
0023 u64_stats_add(&pcptr->rx_bytes, len);
0024 if (mcast)
0025 u64_stats_inc(&pcptr->rx_mcast);
0026 u64_stats_update_end(&pcptr->syncp);
0027 } else {
0028 this_cpu_inc(ipvlan->pcpu_stats->rx_errs);
0029 }
0030 }
0031 EXPORT_SYMBOL_GPL(ipvlan_count_rx);
0032
0033 #if IS_ENABLED(CONFIG_IPV6)
0034 static u8 ipvlan_get_v6_hash(const void *iaddr)
0035 {
0036 const struct in6_addr *ip6_addr = iaddr;
0037
0038 return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
0039 IPVLAN_HASH_MASK;
0040 }
0041 #else
0042 static u8 ipvlan_get_v6_hash(const void *iaddr)
0043 {
0044 return 0;
0045 }
0046 #endif
0047
0048 static u8 ipvlan_get_v4_hash(const void *iaddr)
0049 {
0050 const struct in_addr *ip4_addr = iaddr;
0051
0052 return jhash_1word(ip4_addr->s_addr, ipvlan_jhash_secret) &
0053 IPVLAN_HASH_MASK;
0054 }
0055
0056 static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
0057 {
0058 if (!is_v6 && addr->atype == IPVL_IPV4) {
0059 struct in_addr *i4addr = (struct in_addr *)iaddr;
0060
0061 return addr->ip4addr.s_addr == i4addr->s_addr;
0062 #if IS_ENABLED(CONFIG_IPV6)
0063 } else if (is_v6 && addr->atype == IPVL_IPV6) {
0064 struct in6_addr *i6addr = (struct in6_addr *)iaddr;
0065
0066 return ipv6_addr_equal(&addr->ip6addr, i6addr);
0067 #endif
0068 }
0069
0070 return false;
0071 }
0072
0073 static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
0074 const void *iaddr, bool is_v6)
0075 {
0076 struct ipvl_addr *addr;
0077 u8 hash;
0078
0079 hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
0080 ipvlan_get_v4_hash(iaddr);
0081 hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
0082 if (addr_equal(is_v6, addr, iaddr))
0083 return addr;
0084 return NULL;
0085 }
0086
0087 void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
0088 {
0089 struct ipvl_port *port = ipvlan->port;
0090 u8 hash;
0091
0092 hash = (addr->atype == IPVL_IPV6) ?
0093 ipvlan_get_v6_hash(&addr->ip6addr) :
0094 ipvlan_get_v4_hash(&addr->ip4addr);
0095 if (hlist_unhashed(&addr->hlnode))
0096 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
0097 }
0098
0099 void ipvlan_ht_addr_del(struct ipvl_addr *addr)
0100 {
0101 hlist_del_init_rcu(&addr->hlnode);
0102 }
0103
0104 struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
0105 const void *iaddr, bool is_v6)
0106 {
0107 struct ipvl_addr *addr, *ret = NULL;
0108
0109 rcu_read_lock();
0110 list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
0111 if (addr_equal(is_v6, addr, iaddr)) {
0112 ret = addr;
0113 break;
0114 }
0115 }
0116 rcu_read_unlock();
0117 return ret;
0118 }
0119
0120 bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
0121 {
0122 struct ipvl_dev *ipvlan;
0123 bool ret = false;
0124
0125 rcu_read_lock();
0126 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
0127 if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
0128 ret = true;
0129 break;
0130 }
0131 }
0132 rcu_read_unlock();
0133 return ret;
0134 }
0135
0136 void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
0137 {
0138 void *lyr3h = NULL;
0139
0140 switch (skb->protocol) {
0141 case htons(ETH_P_ARP): {
0142 struct arphdr *arph;
0143
0144 if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
0145 return NULL;
0146
0147 arph = arp_hdr(skb);
0148 *type = IPVL_ARP;
0149 lyr3h = arph;
0150 break;
0151 }
0152 case htons(ETH_P_IP): {
0153 u32 pktlen;
0154 struct iphdr *ip4h;
0155
0156 if (unlikely(!pskb_may_pull(skb, sizeof(*ip4h))))
0157 return NULL;
0158
0159 ip4h = ip_hdr(skb);
0160 pktlen = ntohs(ip4h->tot_len);
0161 if (ip4h->ihl < 5 || ip4h->version != 4)
0162 return NULL;
0163 if (skb->len < pktlen || pktlen < (ip4h->ihl * 4))
0164 return NULL;
0165
0166 *type = IPVL_IPV4;
0167 lyr3h = ip4h;
0168 break;
0169 }
0170 #if IS_ENABLED(CONFIG_IPV6)
0171 case htons(ETH_P_IPV6): {
0172 struct ipv6hdr *ip6h;
0173
0174 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h))))
0175 return NULL;
0176
0177 ip6h = ipv6_hdr(skb);
0178 if (ip6h->version != 6)
0179 return NULL;
0180
0181 *type = IPVL_IPV6;
0182 lyr3h = ip6h;
0183
0184 if (ipv6_addr_any(&ip6h->saddr) &&
0185 ip6h->nexthdr == NEXTHDR_ICMP) {
0186 struct icmp6hdr *icmph;
0187
0188 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
0189 return NULL;
0190
0191 ip6h = ipv6_hdr(skb);
0192 icmph = (struct icmp6hdr *)(ip6h + 1);
0193
0194 if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
0195
0196 if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
0197 + sizeof(struct in6_addr))))
0198 return NULL;
0199
0200 ip6h = ipv6_hdr(skb);
0201 icmph = (struct icmp6hdr *)(ip6h + 1);
0202 }
0203
0204 *type = IPVL_ICMPV6;
0205 lyr3h = icmph;
0206 }
0207 break;
0208 }
0209 #endif
0210 default:
0211 return NULL;
0212 }
0213
0214 return lyr3h;
0215 }
0216
0217 unsigned int ipvlan_mac_hash(const unsigned char *addr)
0218 {
0219 u32 hash = jhash_1word(__get_unaligned_cpu32(addr+2),
0220 ipvlan_jhash_secret);
0221
0222 return hash & IPVLAN_MAC_FILTER_MASK;
0223 }
0224
0225 void ipvlan_process_multicast(struct work_struct *work)
0226 {
0227 struct ipvl_port *port = container_of(work, struct ipvl_port, wq);
0228 struct ethhdr *ethh;
0229 struct ipvl_dev *ipvlan;
0230 struct sk_buff *skb, *nskb;
0231 struct sk_buff_head list;
0232 unsigned int len;
0233 unsigned int mac_hash;
0234 int ret;
0235 u8 pkt_type;
0236 bool tx_pkt;
0237
0238 __skb_queue_head_init(&list);
0239
0240 spin_lock_bh(&port->backlog.lock);
0241 skb_queue_splice_tail_init(&port->backlog, &list);
0242 spin_unlock_bh(&port->backlog.lock);
0243
0244 while ((skb = __skb_dequeue(&list)) != NULL) {
0245 struct net_device *dev = skb->dev;
0246 bool consumed = false;
0247
0248 ethh = eth_hdr(skb);
0249 tx_pkt = IPVL_SKB_CB(skb)->tx_pkt;
0250 mac_hash = ipvlan_mac_hash(ethh->h_dest);
0251
0252 if (ether_addr_equal(ethh->h_dest, port->dev->broadcast))
0253 pkt_type = PACKET_BROADCAST;
0254 else
0255 pkt_type = PACKET_MULTICAST;
0256
0257 rcu_read_lock();
0258 list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
0259 if (tx_pkt && (ipvlan->dev == skb->dev))
0260 continue;
0261 if (!test_bit(mac_hash, ipvlan->mac_filters))
0262 continue;
0263 if (!(ipvlan->dev->flags & IFF_UP))
0264 continue;
0265 ret = NET_RX_DROP;
0266 len = skb->len + ETH_HLEN;
0267 nskb = skb_clone(skb, GFP_ATOMIC);
0268 local_bh_disable();
0269 if (nskb) {
0270 consumed = true;
0271 nskb->pkt_type = pkt_type;
0272 nskb->dev = ipvlan->dev;
0273 if (tx_pkt)
0274 ret = dev_forward_skb(ipvlan->dev, nskb);
0275 else
0276 ret = netif_rx(nskb);
0277 }
0278 ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true);
0279 local_bh_enable();
0280 }
0281 rcu_read_unlock();
0282
0283 if (tx_pkt) {
0284
0285 skb->dev = port->dev;
0286 skb->pkt_type = pkt_type;
0287 dev_queue_xmit(skb);
0288 } else {
0289 if (consumed)
0290 consume_skb(skb);
0291 else
0292 kfree_skb(skb);
0293 }
0294 dev_put(dev);
0295 cond_resched();
0296 }
0297 }
0298
0299 static void ipvlan_skb_crossing_ns(struct sk_buff *skb, struct net_device *dev)
0300 {
0301 bool xnet = true;
0302
0303 if (dev)
0304 xnet = !net_eq(dev_net(skb->dev), dev_net(dev));
0305
0306 skb_scrub_packet(skb, xnet);
0307 if (dev)
0308 skb->dev = dev;
0309 }
0310
0311 static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
0312 bool local)
0313 {
0314 struct ipvl_dev *ipvlan = addr->master;
0315 struct net_device *dev = ipvlan->dev;
0316 unsigned int len;
0317 rx_handler_result_t ret = RX_HANDLER_CONSUMED;
0318 bool success = false;
0319 struct sk_buff *skb = *pskb;
0320
0321 len = skb->len + ETH_HLEN;
0322
0323
0324
0325 if (local) {
0326 if (unlikely(!(dev->flags & IFF_UP))) {
0327 kfree_skb(skb);
0328 goto out;
0329 }
0330
0331 skb = skb_share_check(skb, GFP_ATOMIC);
0332 if (!skb)
0333 goto out;
0334
0335 *pskb = skb;
0336 }
0337
0338 if (local) {
0339 skb->pkt_type = PACKET_HOST;
0340 if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS)
0341 success = true;
0342 } else {
0343 skb->dev = dev;
0344 ret = RX_HANDLER_ANOTHER;
0345 success = true;
0346 }
0347
0348 out:
0349 ipvlan_count_rx(ipvlan, len, success, false);
0350 return ret;
0351 }
0352
0353 struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
0354 int addr_type, bool use_dest)
0355 {
0356 struct ipvl_addr *addr = NULL;
0357
0358 switch (addr_type) {
0359 #if IS_ENABLED(CONFIG_IPV6)
0360 case IPVL_IPV6: {
0361 struct ipv6hdr *ip6h;
0362 struct in6_addr *i6addr;
0363
0364 ip6h = (struct ipv6hdr *)lyr3h;
0365 i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
0366 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
0367 break;
0368 }
0369 case IPVL_ICMPV6: {
0370 struct nd_msg *ndmh;
0371 struct in6_addr *i6addr;
0372
0373
0374
0375
0376 ndmh = (struct nd_msg *)lyr3h;
0377 if (ndmh->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
0378 i6addr = &ndmh->target;
0379 addr = ipvlan_ht_addr_lookup(port, i6addr, true);
0380 }
0381 break;
0382 }
0383 #endif
0384 case IPVL_IPV4: {
0385 struct iphdr *ip4h;
0386 __be32 *i4addr;
0387
0388 ip4h = (struct iphdr *)lyr3h;
0389 i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
0390 addr = ipvlan_ht_addr_lookup(port, i4addr, false);
0391 break;
0392 }
0393 case IPVL_ARP: {
0394 struct arphdr *arph;
0395 unsigned char *arp_ptr;
0396 __be32 dip;
0397
0398 arph = (struct arphdr *)lyr3h;
0399 arp_ptr = (unsigned char *)(arph + 1);
0400 if (use_dest)
0401 arp_ptr += (2 * port->dev->addr_len) + 4;
0402 else
0403 arp_ptr += port->dev->addr_len;
0404
0405 memcpy(&dip, arp_ptr, 4);
0406 addr = ipvlan_ht_addr_lookup(port, &dip, false);
0407 break;
0408 }
0409 }
0410
0411 return addr;
0412 }
0413
0414 static int ipvlan_process_v4_outbound(struct sk_buff *skb)
0415 {
0416 const struct iphdr *ip4h = ip_hdr(skb);
0417 struct net_device *dev = skb->dev;
0418 struct net *net = dev_net(dev);
0419 struct rtable *rt;
0420 int err, ret = NET_XMIT_DROP;
0421 struct flowi4 fl4 = {
0422 .flowi4_oif = dev->ifindex,
0423 .flowi4_tos = RT_TOS(ip4h->tos),
0424 .flowi4_flags = FLOWI_FLAG_ANYSRC,
0425 .flowi4_mark = skb->mark,
0426 .daddr = ip4h->daddr,
0427 .saddr = ip4h->saddr,
0428 };
0429
0430 rt = ip_route_output_flow(net, &fl4, NULL);
0431 if (IS_ERR(rt))
0432 goto err;
0433
0434 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
0435 ip_rt_put(rt);
0436 goto err;
0437 }
0438 skb_dst_set(skb, &rt->dst);
0439 err = ip_local_out(net, skb->sk, skb);
0440 if (unlikely(net_xmit_eval(err)))
0441 dev->stats.tx_errors++;
0442 else
0443 ret = NET_XMIT_SUCCESS;
0444 goto out;
0445 err:
0446 dev->stats.tx_errors++;
0447 kfree_skb(skb);
0448 out:
0449 return ret;
0450 }
0451
0452 #if IS_ENABLED(CONFIG_IPV6)
0453 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
0454 {
0455 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
0456 struct net_device *dev = skb->dev;
0457 struct net *net = dev_net(dev);
0458 struct dst_entry *dst;
0459 int err, ret = NET_XMIT_DROP;
0460 struct flowi6 fl6 = {
0461 .flowi6_oif = dev->ifindex,
0462 .daddr = ip6h->daddr,
0463 .saddr = ip6h->saddr,
0464 .flowi6_flags = FLOWI_FLAG_ANYSRC,
0465 .flowlabel = ip6_flowinfo(ip6h),
0466 .flowi6_mark = skb->mark,
0467 .flowi6_proto = ip6h->nexthdr,
0468 };
0469
0470 dst = ip6_route_output(net, NULL, &fl6);
0471 if (dst->error) {
0472 ret = dst->error;
0473 dst_release(dst);
0474 goto err;
0475 }
0476 skb_dst_set(skb, dst);
0477 err = ip6_local_out(net, skb->sk, skb);
0478 if (unlikely(net_xmit_eval(err)))
0479 dev->stats.tx_errors++;
0480 else
0481 ret = NET_XMIT_SUCCESS;
0482 goto out;
0483 err:
0484 dev->stats.tx_errors++;
0485 kfree_skb(skb);
0486 out:
0487 return ret;
0488 }
0489 #else
0490 static int ipvlan_process_v6_outbound(struct sk_buff *skb)
0491 {
0492 return NET_XMIT_DROP;
0493 }
0494 #endif
0495
0496 static int ipvlan_process_outbound(struct sk_buff *skb)
0497 {
0498 int ret = NET_XMIT_DROP;
0499
0500
0501
0502
0503
0504 if (skb_mac_header_was_set(skb)) {
0505
0506
0507 struct ethhdr *ethh = eth_hdr(skb);
0508
0509 if (is_multicast_ether_addr(ethh->h_dest)) {
0510 pr_debug_ratelimited(
0511 "Dropped {multi|broad}cast of type=[%x]\n",
0512 ntohs(skb->protocol));
0513 kfree_skb(skb);
0514 goto out;
0515 }
0516
0517 skb_pull(skb, sizeof(*ethh));
0518 skb->mac_header = (typeof(skb->mac_header))~0U;
0519 skb_reset_network_header(skb);
0520 }
0521
0522 if (skb->protocol == htons(ETH_P_IPV6))
0523 ret = ipvlan_process_v6_outbound(skb);
0524 else if (skb->protocol == htons(ETH_P_IP))
0525 ret = ipvlan_process_v4_outbound(skb);
0526 else {
0527 pr_warn_ratelimited("Dropped outbound packet type=%x\n",
0528 ntohs(skb->protocol));
0529 kfree_skb(skb);
0530 }
0531 out:
0532 return ret;
0533 }
0534
0535 static void ipvlan_multicast_enqueue(struct ipvl_port *port,
0536 struct sk_buff *skb, bool tx_pkt)
0537 {
0538 if (skb->protocol == htons(ETH_P_PAUSE)) {
0539 kfree_skb(skb);
0540 return;
0541 }
0542
0543
0544
0545
0546
0547
0548 IPVL_SKB_CB(skb)->tx_pkt = tx_pkt;
0549
0550 spin_lock(&port->backlog.lock);
0551 if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) {
0552 if (skb->dev)
0553 dev_hold(skb->dev);
0554 __skb_queue_tail(&port->backlog, skb);
0555 spin_unlock(&port->backlog.lock);
0556 schedule_work(&port->wq);
0557 } else {
0558 spin_unlock(&port->backlog.lock);
0559 dev_core_stats_rx_dropped_inc(skb->dev);
0560 kfree_skb(skb);
0561 }
0562 }
0563
0564 static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev)
0565 {
0566 const struct ipvl_dev *ipvlan = netdev_priv(dev);
0567 void *lyr3h;
0568 struct ipvl_addr *addr;
0569 int addr_type;
0570
0571 lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
0572 if (!lyr3h)
0573 goto out;
0574
0575 if (!ipvlan_is_vepa(ipvlan->port)) {
0576 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
0577 if (addr) {
0578 if (ipvlan_is_private(ipvlan->port)) {
0579 consume_skb(skb);
0580 return NET_XMIT_DROP;
0581 }
0582 return ipvlan_rcv_frame(addr, &skb, true);
0583 }
0584 }
0585 out:
0586 ipvlan_skb_crossing_ns(skb, ipvlan->phy_dev);
0587 return ipvlan_process_outbound(skb);
0588 }
0589
0590 static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
0591 {
0592 const struct ipvl_dev *ipvlan = netdev_priv(dev);
0593 struct ethhdr *eth = skb_eth_hdr(skb);
0594 struct ipvl_addr *addr;
0595 void *lyr3h;
0596 int addr_type;
0597
0598 if (!ipvlan_is_vepa(ipvlan->port) &&
0599 ether_addr_equal(eth->h_dest, eth->h_source)) {
0600 lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
0601 if (lyr3h) {
0602 addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
0603 if (addr) {
0604 if (ipvlan_is_private(ipvlan->port)) {
0605 consume_skb(skb);
0606 return NET_XMIT_DROP;
0607 }
0608 return ipvlan_rcv_frame(addr, &skb, true);
0609 }
0610 }
0611 skb = skb_share_check(skb, GFP_ATOMIC);
0612 if (!skb)
0613 return NET_XMIT_DROP;
0614
0615
0616
0617
0618
0619
0620 return dev_forward_skb(ipvlan->phy_dev, skb);
0621
0622 } else if (is_multicast_ether_addr(eth->h_dest)) {
0623 skb_reset_mac_header(skb);
0624 ipvlan_skb_crossing_ns(skb, NULL);
0625 ipvlan_multicast_enqueue(ipvlan->port, skb, true);
0626 return NET_XMIT_SUCCESS;
0627 }
0628
0629 skb->dev = ipvlan->phy_dev;
0630 return dev_queue_xmit(skb);
0631 }
0632
0633 int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
0634 {
0635 struct ipvl_dev *ipvlan = netdev_priv(dev);
0636 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
0637
0638 if (!port)
0639 goto out;
0640
0641 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
0642 goto out;
0643
0644 switch(port->mode) {
0645 case IPVLAN_MODE_L2:
0646 return ipvlan_xmit_mode_l2(skb, dev);
0647 case IPVLAN_MODE_L3:
0648 #ifdef CONFIG_IPVLAN_L3S
0649 case IPVLAN_MODE_L3S:
0650 #endif
0651 return ipvlan_xmit_mode_l3(skb, dev);
0652 }
0653
0654
0655 WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
0656 out:
0657 kfree_skb(skb);
0658 return NET_XMIT_DROP;
0659 }
0660
0661 static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port)
0662 {
0663 struct ethhdr *eth = eth_hdr(skb);
0664 struct ipvl_addr *addr;
0665 void *lyr3h;
0666 int addr_type;
0667
0668 if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
0669 lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
0670 if (!lyr3h)
0671 return true;
0672
0673 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, false);
0674 if (addr)
0675 return false;
0676 }
0677
0678 return true;
0679 }
0680
0681 static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb,
0682 struct ipvl_port *port)
0683 {
0684 void *lyr3h;
0685 int addr_type;
0686 struct ipvl_addr *addr;
0687 struct sk_buff *skb = *pskb;
0688 rx_handler_result_t ret = RX_HANDLER_PASS;
0689
0690 lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
0691 if (!lyr3h)
0692 goto out;
0693
0694 addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
0695 if (addr)
0696 ret = ipvlan_rcv_frame(addr, pskb, false);
0697
0698 out:
0699 return ret;
0700 }
0701
0702 static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb,
0703 struct ipvl_port *port)
0704 {
0705 struct sk_buff *skb = *pskb;
0706 struct ethhdr *eth = eth_hdr(skb);
0707 rx_handler_result_t ret = RX_HANDLER_PASS;
0708
0709 if (is_multicast_ether_addr(eth->h_dest)) {
0710 if (ipvlan_external_frame(skb, port)) {
0711 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
0712
0713
0714
0715
0716
0717
0718
0719 if (nskb) {
0720 ipvlan_skb_crossing_ns(nskb, NULL);
0721 ipvlan_multicast_enqueue(port, nskb, false);
0722 }
0723 }
0724 } else {
0725
0726 ret = ipvlan_handle_mode_l3(pskb, port);
0727 }
0728
0729 return ret;
0730 }
0731
0732 rx_handler_result_t ipvlan_handle_frame(struct sk_buff **pskb)
0733 {
0734 struct sk_buff *skb = *pskb;
0735 struct ipvl_port *port = ipvlan_port_get_rcu(skb->dev);
0736
0737 if (!port)
0738 return RX_HANDLER_PASS;
0739
0740 switch (port->mode) {
0741 case IPVLAN_MODE_L2:
0742 return ipvlan_handle_mode_l2(pskb, port);
0743 case IPVLAN_MODE_L3:
0744 return ipvlan_handle_mode_l3(pskb, port);
0745 #ifdef CONFIG_IPVLAN_L3S
0746 case IPVLAN_MODE_L3S:
0747 return RX_HANDLER_PASS;
0748 #endif
0749 }
0750
0751
0752 WARN_ONCE(true, "%s called for mode = [%x]\n", __func__, port->mode);
0753 kfree_skb(skb);
0754 return RX_HANDLER_CONSUMED;
0755 }