0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include "hsr_forward.h"
0011 #include <linux/types.h>
0012 #include <linux/skbuff.h>
0013 #include <linux/etherdevice.h>
0014 #include <linux/if_vlan.h>
0015 #include "hsr_main.h"
0016 #include "hsr_framereg.h"
0017
0018 struct hsr_node;
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035 static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
0036 {
0037 struct ethhdr *eth_hdr;
0038 struct hsr_sup_tag *hsr_sup_tag;
0039 struct hsrv1_ethhdr_sp *hsr_V1_hdr;
0040 struct hsr_sup_tlv *hsr_sup_tlv;
0041 u16 total_length = 0;
0042
0043 WARN_ON_ONCE(!skb_mac_header_was_set(skb));
0044 eth_hdr = (struct ethhdr *)skb_mac_header(skb);
0045
0046
0047 if (!ether_addr_equal(eth_hdr->h_dest,
0048 hsr->sup_multicast_addr))
0049 return false;
0050
0051
0052 if (!(eth_hdr->h_proto == htons(ETH_P_PRP) ||
0053 eth_hdr->h_proto == htons(ETH_P_HSR)))
0054 return false;
0055
0056
0057 if (eth_hdr->h_proto == htons(ETH_P_HSR)) {
0058 total_length = sizeof(struct hsrv1_ethhdr_sp);
0059 if (!pskb_may_pull(skb, total_length))
0060 return false;
0061
0062 hsr_V1_hdr = (struct hsrv1_ethhdr_sp *)skb_mac_header(skb);
0063 if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
0064 return false;
0065
0066 hsr_sup_tag = &hsr_V1_hdr->hsr_sup;
0067 } else {
0068 total_length = sizeof(struct hsrv0_ethhdr_sp);
0069 if (!pskb_may_pull(skb, total_length))
0070 return false;
0071
0072 hsr_sup_tag =
0073 &((struct hsrv0_ethhdr_sp *)skb_mac_header(skb))->hsr_sup;
0074 }
0075
0076 if (hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_ANNOUNCE &&
0077 hsr_sup_tag->tlv.HSR_TLV_type != HSR_TLV_LIFE_CHECK &&
0078 hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DD &&
0079 hsr_sup_tag->tlv.HSR_TLV_type != PRP_TLV_LIFE_CHECK_DA)
0080 return false;
0081 if (hsr_sup_tag->tlv.HSR_TLV_length != 12 &&
0082 hsr_sup_tag->tlv.HSR_TLV_length != sizeof(struct hsr_sup_payload))
0083 return false;
0084
0085
0086 total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tag->tlv.HSR_TLV_length;
0087 if (!pskb_may_pull(skb, total_length))
0088 return false;
0089 skb_pull(skb, total_length);
0090 hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
0091 skb_push(skb, total_length);
0092
0093
0094
0095
0096 if (hsr_sup_tlv->HSR_TLV_type == PRP_TLV_REDBOX_MAC) {
0097
0098 if (hsr_sup_tlv->HSR_TLV_length != sizeof(struct hsr_sup_payload))
0099 return false;
0100
0101
0102 total_length += sizeof(struct hsr_sup_tlv) + hsr_sup_tlv->HSR_TLV_length;
0103 if (!pskb_may_pull(skb, total_length))
0104 return false;
0105
0106
0107 skb_pull(skb, total_length);
0108 hsr_sup_tlv = (struct hsr_sup_tlv *)skb->data;
0109 skb_push(skb, total_length);
0110 }
0111
0112
0113 if (hsr_sup_tlv->HSR_TLV_type == HSR_TLV_EOT &&
0114 hsr_sup_tlv->HSR_TLV_length != 0)
0115 return false;
0116
0117 return true;
0118 }
0119
0120 static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in,
0121 struct hsr_frame_info *frame)
0122 {
0123 struct sk_buff *skb;
0124 int copylen;
0125 unsigned char *dst, *src;
0126
0127 skb_pull(skb_in, HSR_HLEN);
0128 skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
0129 skb_push(skb_in, HSR_HLEN);
0130 if (!skb)
0131 return NULL;
0132
0133 skb_reset_mac_header(skb);
0134
0135 if (skb->ip_summed == CHECKSUM_PARTIAL)
0136 skb->csum_start -= HSR_HLEN;
0137
0138 copylen = 2 * ETH_ALEN;
0139 if (frame->is_vlan)
0140 copylen += VLAN_HLEN;
0141 src = skb_mac_header(skb_in);
0142 dst = skb_mac_header(skb);
0143 memcpy(dst, src, copylen);
0144
0145 skb->protocol = eth_hdr(skb)->h_proto;
0146 return skb;
0147 }
0148
0149 struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
0150 struct hsr_port *port)
0151 {
0152 if (!frame->skb_std) {
0153 if (frame->skb_hsr) {
0154 frame->skb_std =
0155 create_stripped_skb_hsr(frame->skb_hsr, frame);
0156 } else {
0157
0158 WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
0159 __FILE__, __LINE__, port->dev->name);
0160 return NULL;
0161 }
0162 }
0163
0164 return skb_clone(frame->skb_std, GFP_ATOMIC);
0165 }
0166
0167 struct sk_buff *prp_get_untagged_frame(struct hsr_frame_info *frame,
0168 struct hsr_port *port)
0169 {
0170 if (!frame->skb_std) {
0171 if (frame->skb_prp) {
0172
0173 skb_trim(frame->skb_prp,
0174 frame->skb_prp->len - HSR_HLEN);
0175 frame->skb_std =
0176 __pskb_copy(frame->skb_prp,
0177 skb_headroom(frame->skb_prp),
0178 GFP_ATOMIC);
0179 } else {
0180
0181 WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
0182 __FILE__, __LINE__, port->dev->name);
0183 return NULL;
0184 }
0185 }
0186
0187 return skb_clone(frame->skb_std, GFP_ATOMIC);
0188 }
0189
0190 static void prp_set_lan_id(struct prp_rct *trailer,
0191 struct hsr_port *port)
0192 {
0193 int lane_id;
0194
0195 if (port->type == HSR_PT_SLAVE_A)
0196 lane_id = 0;
0197 else
0198 lane_id = 1;
0199
0200
0201 lane_id |= port->hsr->net_id;
0202 set_prp_lan_id(trailer, lane_id);
0203 }
0204
0205
0206 static struct sk_buff *prp_fill_rct(struct sk_buff *skb,
0207 struct hsr_frame_info *frame,
0208 struct hsr_port *port)
0209 {
0210 struct prp_rct *trailer;
0211 int min_size = ETH_ZLEN;
0212 int lsdu_size;
0213
0214 if (!skb)
0215 return skb;
0216
0217 if (frame->is_vlan)
0218 min_size = VLAN_ETH_ZLEN;
0219
0220 if (skb_put_padto(skb, min_size))
0221 return NULL;
0222
0223 trailer = (struct prp_rct *)skb_put(skb, HSR_HLEN);
0224 lsdu_size = skb->len - 14;
0225 if (frame->is_vlan)
0226 lsdu_size -= 4;
0227 prp_set_lan_id(trailer, port);
0228 set_prp_LSDU_size(trailer, lsdu_size);
0229 trailer->sequence_nr = htons(frame->sequence_nr);
0230 trailer->PRP_suffix = htons(ETH_P_PRP);
0231 skb->protocol = eth_hdr(skb)->h_proto;
0232
0233 return skb;
0234 }
0235
0236 static void hsr_set_path_id(struct hsr_ethhdr *hsr_ethhdr,
0237 struct hsr_port *port)
0238 {
0239 int path_id;
0240
0241 if (port->type == HSR_PT_SLAVE_A)
0242 path_id = 0;
0243 else
0244 path_id = 1;
0245
0246 set_hsr_tag_path(&hsr_ethhdr->hsr_tag, path_id);
0247 }
0248
0249 static struct sk_buff *hsr_fill_tag(struct sk_buff *skb,
0250 struct hsr_frame_info *frame,
0251 struct hsr_port *port, u8 proto_version)
0252 {
0253 struct hsr_ethhdr *hsr_ethhdr;
0254 int lsdu_size;
0255
0256
0257 if (skb_put_padto(skb, ETH_ZLEN + HSR_HLEN))
0258 return NULL;
0259
0260 lsdu_size = skb->len - 14;
0261 if (frame->is_vlan)
0262 lsdu_size -= 4;
0263
0264 hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
0265
0266 hsr_set_path_id(hsr_ethhdr, port);
0267 set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
0268 hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
0269 hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
0270 hsr_ethhdr->ethhdr.h_proto = htons(proto_version ?
0271 ETH_P_HSR : ETH_P_PRP);
0272 skb->protocol = hsr_ethhdr->ethhdr.h_proto;
0273
0274 return skb;
0275 }
0276
0277
0278
0279
0280 struct sk_buff *hsr_create_tagged_frame(struct hsr_frame_info *frame,
0281 struct hsr_port *port)
0282 {
0283 unsigned char *dst, *src;
0284 struct sk_buff *skb;
0285 int movelen;
0286
0287 if (frame->skb_hsr) {
0288 struct hsr_ethhdr *hsr_ethhdr =
0289 (struct hsr_ethhdr *)skb_mac_header(frame->skb_hsr);
0290
0291
0292 hsr_set_path_id(hsr_ethhdr, port);
0293 return skb_clone(frame->skb_hsr, GFP_ATOMIC);
0294 } else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
0295 return skb_clone(frame->skb_std, GFP_ATOMIC);
0296 }
0297
0298
0299 skb = __pskb_copy(frame->skb_std,
0300 skb_headroom(frame->skb_std) + HSR_HLEN, GFP_ATOMIC);
0301 if (!skb)
0302 return NULL;
0303 skb_reset_mac_header(skb);
0304
0305 if (skb->ip_summed == CHECKSUM_PARTIAL)
0306 skb->csum_start += HSR_HLEN;
0307
0308 movelen = ETH_HLEN;
0309 if (frame->is_vlan)
0310 movelen += VLAN_HLEN;
0311
0312 src = skb_mac_header(skb);
0313 dst = skb_push(skb, HSR_HLEN);
0314 memmove(dst, src, movelen);
0315 skb_reset_mac_header(skb);
0316
0317
0318
0319
0320 return hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
0321 }
0322
0323 struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
0324 struct hsr_port *port)
0325 {
0326 struct sk_buff *skb;
0327
0328 if (frame->skb_prp) {
0329 struct prp_rct *trailer = skb_get_PRP_rct(frame->skb_prp);
0330
0331 if (trailer) {
0332 prp_set_lan_id(trailer, port);
0333 } else {
0334 WARN_ONCE(!trailer, "errored PRP skb");
0335 return NULL;
0336 }
0337 return skb_clone(frame->skb_prp, GFP_ATOMIC);
0338 } else if (port->dev->features & NETIF_F_HW_HSR_TAG_INS) {
0339 return skb_clone(frame->skb_std, GFP_ATOMIC);
0340 }
0341
0342 skb = skb_copy_expand(frame->skb_std, 0,
0343 skb_tailroom(frame->skb_std) + HSR_HLEN,
0344 GFP_ATOMIC);
0345 prp_fill_rct(skb, frame, port);
0346
0347 return skb;
0348 }
0349
0350 static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
0351 struct hsr_node *node_src)
0352 {
0353 bool was_multicast_frame;
0354 int res;
0355
0356 was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
0357 hsr_addr_subst_source(node_src, skb);
0358 skb_pull(skb, ETH_HLEN);
0359 res = netif_rx(skb);
0360 if (res == NET_RX_DROP) {
0361 dev->stats.rx_dropped++;
0362 } else {
0363 dev->stats.rx_packets++;
0364 dev->stats.rx_bytes += skb->len;
0365 if (was_multicast_frame)
0366 dev->stats.multicast++;
0367 }
0368 }
0369
0370 static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
0371 struct hsr_frame_info *frame)
0372 {
0373 if (frame->port_rcv->type == HSR_PT_MASTER) {
0374 hsr_addr_subst_dest(frame->node_src, skb, port);
0375
0376
0377
0378
0379 ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
0380 }
0381 return dev_queue_xmit(skb);
0382 }
0383
0384 bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
0385 {
0386 return ((frame->port_rcv->type == HSR_PT_SLAVE_A &&
0387 port->type == HSR_PT_SLAVE_B) ||
0388 (frame->port_rcv->type == HSR_PT_SLAVE_B &&
0389 port->type == HSR_PT_SLAVE_A));
0390 }
0391
0392 bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port)
0393 {
0394 if (port->dev->features & NETIF_F_HW_HSR_FWD)
0395 return prp_drop_frame(frame, port);
0396
0397 return false;
0398 }
0399
0400
0401
0402
0403
0404
0405
0406
0407
0408
0409
0410
0411
0412 static void hsr_forward_do(struct hsr_frame_info *frame)
0413 {
0414 struct hsr_port *port;
0415 struct sk_buff *skb;
0416 bool sent = false;
0417
0418 hsr_for_each_port(frame->port_rcv->hsr, port) {
0419 struct hsr_priv *hsr = port->hsr;
0420
0421 if (port == frame->port_rcv)
0422 continue;
0423
0424
0425 if (port->type == HSR_PT_MASTER && !frame->is_local_dest)
0426 continue;
0427
0428
0429 if (port->type != HSR_PT_MASTER && frame->is_local_exclusive)
0430 continue;
0431
0432
0433
0434
0435 if ((port->dev->features & NETIF_F_HW_HSR_DUP) && sent)
0436 continue;
0437
0438
0439
0440
0441 if (!frame->is_from_san &&
0442 hsr_register_frame_out(port, frame->node_src,
0443 frame->sequence_nr))
0444 continue;
0445
0446 if (frame->is_supervision && port->type == HSR_PT_MASTER) {
0447 hsr_handle_sup_frame(frame);
0448 continue;
0449 }
0450
0451
0452
0453
0454 if (hsr->proto_ops->drop_frame &&
0455 hsr->proto_ops->drop_frame(frame, port))
0456 continue;
0457
0458 if (port->type != HSR_PT_MASTER)
0459 skb = hsr->proto_ops->create_tagged_frame(frame, port);
0460 else
0461 skb = hsr->proto_ops->get_untagged_frame(frame, port);
0462
0463 if (!skb) {
0464 frame->port_rcv->dev->stats.rx_dropped++;
0465 continue;
0466 }
0467
0468 skb->dev = port->dev;
0469 if (port->type == HSR_PT_MASTER) {
0470 hsr_deliver_master(skb, port->dev, frame->node_src);
0471 } else {
0472 if (!hsr_xmit(skb, port, frame))
0473 sent = true;
0474 }
0475 }
0476 }
0477
0478 static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
0479 struct hsr_frame_info *frame)
0480 {
0481 if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
0482 frame->is_local_exclusive = true;
0483 skb->pkt_type = PACKET_HOST;
0484 } else {
0485 frame->is_local_exclusive = false;
0486 }
0487
0488 if (skb->pkt_type == PACKET_HOST ||
0489 skb->pkt_type == PACKET_MULTICAST ||
0490 skb->pkt_type == PACKET_BROADCAST) {
0491 frame->is_local_dest = true;
0492 } else {
0493 frame->is_local_dest = false;
0494 }
0495 }
0496
0497 static void handle_std_frame(struct sk_buff *skb,
0498 struct hsr_frame_info *frame)
0499 {
0500 struct hsr_port *port = frame->port_rcv;
0501 struct hsr_priv *hsr = port->hsr;
0502 unsigned long irqflags;
0503
0504 frame->skb_hsr = NULL;
0505 frame->skb_prp = NULL;
0506 frame->skb_std = skb;
0507
0508 if (port->type != HSR_PT_MASTER) {
0509 frame->is_from_san = true;
0510 } else {
0511
0512 spin_lock_irqsave(&hsr->seqnr_lock, irqflags);
0513 frame->sequence_nr = hsr->sequence_nr;
0514 hsr->sequence_nr++;
0515 spin_unlock_irqrestore(&hsr->seqnr_lock, irqflags);
0516 }
0517 }
0518
0519 int hsr_fill_frame_info(__be16 proto, struct sk_buff *skb,
0520 struct hsr_frame_info *frame)
0521 {
0522 struct hsr_port *port = frame->port_rcv;
0523 struct hsr_priv *hsr = port->hsr;
0524
0525
0526 if ((!hsr->prot_version && proto == htons(ETH_P_PRP)) ||
0527 proto == htons(ETH_P_HSR)) {
0528
0529 if (skb->mac_len < sizeof(struct hsr_ethhdr))
0530 return -EINVAL;
0531
0532
0533 frame->skb_std = NULL;
0534 frame->skb_prp = NULL;
0535 frame->skb_hsr = skb;
0536 frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
0537 return 0;
0538 }
0539
0540
0541 handle_std_frame(skb, frame);
0542
0543 return 0;
0544 }
0545
0546 int prp_fill_frame_info(__be16 proto, struct sk_buff *skb,
0547 struct hsr_frame_info *frame)
0548 {
0549
0550 struct prp_rct *rct = skb_get_PRP_rct(skb);
0551
0552 if (rct &&
0553 prp_check_lsdu_size(skb, rct, frame->is_supervision)) {
0554 frame->skb_hsr = NULL;
0555 frame->skb_std = NULL;
0556 frame->skb_prp = skb;
0557 frame->sequence_nr = prp_get_skb_sequence_nr(rct);
0558 return 0;
0559 }
0560 handle_std_frame(skb, frame);
0561
0562 return 0;
0563 }
0564
0565 static int fill_frame_info(struct hsr_frame_info *frame,
0566 struct sk_buff *skb, struct hsr_port *port)
0567 {
0568 struct hsr_priv *hsr = port->hsr;
0569 struct hsr_vlan_ethhdr *vlan_hdr;
0570 struct ethhdr *ethhdr;
0571 __be16 proto;
0572 int ret;
0573 u32 hash;
0574
0575
0576 if (skb->mac_len < sizeof(struct ethhdr))
0577 return -EINVAL;
0578
0579 memset(frame, 0, sizeof(*frame));
0580
0581 ethhdr = (struct ethhdr *)skb_mac_header(skb);
0582 hash = hsr_mac_hash(port->hsr, ethhdr->h_source);
0583 frame->is_supervision = is_supervision_frame(port->hsr, skb);
0584 frame->node_src = hsr_get_node(port, &hsr->node_db[hash], skb,
0585 frame->is_supervision,
0586 port->type);
0587 if (!frame->node_src)
0588 return -1;
0589
0590 frame->is_vlan = false;
0591 proto = ethhdr->h_proto;
0592
0593 if (proto == htons(ETH_P_8021Q))
0594 frame->is_vlan = true;
0595
0596 if (frame->is_vlan) {
0597 vlan_hdr = (struct hsr_vlan_ethhdr *)ethhdr;
0598 proto = vlan_hdr->vlanhdr.h_vlan_encapsulated_proto;
0599
0600 netdev_warn_once(skb->dev, "VLAN not yet supported");
0601 }
0602
0603 frame->is_from_san = false;
0604 frame->port_rcv = port;
0605 ret = hsr->proto_ops->fill_frame_info(proto, skb, frame);
0606 if (ret)
0607 return ret;
0608
0609 check_local_dest(port->hsr, skb, frame);
0610
0611 return 0;
0612 }
0613
0614
0615 void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
0616 {
0617 struct hsr_frame_info frame;
0618
0619 if (fill_frame_info(&frame, skb, port) < 0)
0620 goto out_drop;
0621
0622 hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
0623 hsr_forward_do(&frame);
0624
0625
0626
0627 if (port->type == HSR_PT_MASTER) {
0628 port->dev->stats.tx_packets++;
0629 port->dev->stats.tx_bytes += skb->len;
0630 }
0631
0632 kfree_skb(frame.skb_hsr);
0633 kfree_skb(frame.skb_prp);
0634 kfree_skb(frame.skb_std);
0635 return;
0636
0637 out_drop:
0638 port->dev->stats.tx_dropped++;
0639 kfree_skb(skb);
0640 }