0001
0002
0003
0004
0005
0006
0007
0008
0009
0010 #include <linux/netdevice.h>
0011 #include <linux/skbuff.h>
0012 #include <linux/etherdevice.h>
0013 #include <linux/rtnetlink.h>
0014 #include <linux/pkt_sched.h>
0015 #include "hsr_device.h"
0016 #include "hsr_slave.h"
0017 #include "hsr_framereg.h"
0018 #include "hsr_main.h"
0019 #include "hsr_forward.h"
0020
0021 static bool is_admin_up(struct net_device *dev)
0022 {
0023 return dev && (dev->flags & IFF_UP);
0024 }
0025
0026 static bool is_slave_up(struct net_device *dev)
0027 {
0028 return dev && is_admin_up(dev) && netif_oper_up(dev);
0029 }
0030
0031 static void __hsr_set_operstate(struct net_device *dev, int transition)
0032 {
0033 write_lock(&dev_base_lock);
0034 if (dev->operstate != transition) {
0035 dev->operstate = transition;
0036 write_unlock(&dev_base_lock);
0037 netdev_state_change(dev);
0038 } else {
0039 write_unlock(&dev_base_lock);
0040 }
0041 }
0042
0043 static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
0044 {
0045 if (!is_admin_up(master->dev)) {
0046 __hsr_set_operstate(master->dev, IF_OPER_DOWN);
0047 return;
0048 }
0049
0050 if (has_carrier)
0051 __hsr_set_operstate(master->dev, IF_OPER_UP);
0052 else
0053 __hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
0054 }
0055
0056 static bool hsr_check_carrier(struct hsr_port *master)
0057 {
0058 struct hsr_port *port;
0059
0060 ASSERT_RTNL();
0061
0062 hsr_for_each_port(master->hsr, port) {
0063 if (port->type != HSR_PT_MASTER && is_slave_up(port->dev)) {
0064 netif_carrier_on(master->dev);
0065 return true;
0066 }
0067 }
0068
0069 netif_carrier_off(master->dev);
0070
0071 return false;
0072 }
0073
0074 static void hsr_check_announce(struct net_device *hsr_dev,
0075 unsigned char old_operstate)
0076 {
0077 struct hsr_priv *hsr;
0078
0079 hsr = netdev_priv(hsr_dev);
0080
0081 if (hsr_dev->operstate == IF_OPER_UP && old_operstate != IF_OPER_UP) {
0082
0083 hsr->announce_count = 0;
0084 mod_timer(&hsr->announce_timer,
0085 jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
0086 }
0087
0088 if (hsr_dev->operstate != IF_OPER_UP && old_operstate == IF_OPER_UP)
0089
0090 del_timer(&hsr->announce_timer);
0091 }
0092
0093 void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
0094 {
0095 struct hsr_port *master;
0096 unsigned char old_operstate;
0097 bool has_carrier;
0098
0099 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
0100
0101
0102
0103 old_operstate = master->dev->operstate;
0104 has_carrier = hsr_check_carrier(master);
0105 hsr_set_operstate(master, has_carrier);
0106 hsr_check_announce(master->dev, old_operstate);
0107 }
0108
0109 int hsr_get_max_mtu(struct hsr_priv *hsr)
0110 {
0111 unsigned int mtu_max;
0112 struct hsr_port *port;
0113
0114 mtu_max = ETH_DATA_LEN;
0115 hsr_for_each_port(hsr, port)
0116 if (port->type != HSR_PT_MASTER)
0117 mtu_max = min(port->dev->mtu, mtu_max);
0118
0119 if (mtu_max < HSR_HLEN)
0120 return 0;
0121 return mtu_max - HSR_HLEN;
0122 }
0123
0124 static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
0125 {
0126 struct hsr_priv *hsr;
0127
0128 hsr = netdev_priv(dev);
0129
0130 if (new_mtu > hsr_get_max_mtu(hsr)) {
0131 netdev_info(dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
0132 HSR_HLEN);
0133 return -EINVAL;
0134 }
0135
0136 dev->mtu = new_mtu;
0137
0138 return 0;
0139 }
0140
0141 static int hsr_dev_open(struct net_device *dev)
0142 {
0143 struct hsr_priv *hsr;
0144 struct hsr_port *port;
0145 char designation;
0146
0147 hsr = netdev_priv(dev);
0148 designation = '\0';
0149
0150 hsr_for_each_port(hsr, port) {
0151 if (port->type == HSR_PT_MASTER)
0152 continue;
0153 switch (port->type) {
0154 case HSR_PT_SLAVE_A:
0155 designation = 'A';
0156 break;
0157 case HSR_PT_SLAVE_B:
0158 designation = 'B';
0159 break;
0160 default:
0161 designation = '?';
0162 }
0163 if (!is_slave_up(port->dev))
0164 netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
0165 designation, port->dev->name);
0166 }
0167
0168 if (designation == '\0')
0169 netdev_warn(dev, "No slave devices configured\n");
0170
0171 return 0;
0172 }
0173
0174 static int hsr_dev_close(struct net_device *dev)
0175 {
0176
0177 return 0;
0178 }
0179
0180 static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
0181 netdev_features_t features)
0182 {
0183 netdev_features_t mask;
0184 struct hsr_port *port;
0185
0186 mask = features;
0187
0188
0189
0190
0191
0192
0193
0194
0195 features &= ~NETIF_F_ONE_FOR_ALL;
0196 hsr_for_each_port(hsr, port)
0197 features = netdev_increment_features(features,
0198 port->dev->features,
0199 mask);
0200
0201 return features;
0202 }
0203
0204 static netdev_features_t hsr_fix_features(struct net_device *dev,
0205 netdev_features_t features)
0206 {
0207 struct hsr_priv *hsr = netdev_priv(dev);
0208
0209 return hsr_features_recompute(hsr, features);
0210 }
0211
0212 static netdev_tx_t hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
0213 {
0214 struct hsr_priv *hsr = netdev_priv(dev);
0215 struct hsr_port *master;
0216
0217 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
0218 if (master) {
0219 skb->dev = master->dev;
0220 skb_reset_mac_header(skb);
0221 skb_reset_mac_len(skb);
0222 hsr_forward_skb(skb, master);
0223 } else {
0224 dev_core_stats_tx_dropped_inc(dev);
0225 dev_kfree_skb_any(skb);
0226 }
0227 return NETDEV_TX_OK;
0228 }
0229
0230 static const struct header_ops hsr_header_ops = {
0231 .create = eth_header,
0232 .parse = eth_header_parse,
0233 };
0234
0235 static struct sk_buff *hsr_init_skb(struct hsr_port *master)
0236 {
0237 struct hsr_priv *hsr = master->hsr;
0238 struct sk_buff *skb;
0239 int hlen, tlen;
0240
0241 hlen = LL_RESERVED_SPACE(master->dev);
0242 tlen = master->dev->needed_tailroom;
0243
0244
0245
0246
0247 skb = dev_alloc_skb(sizeof(struct hsr_sup_tag) +
0248 sizeof(struct hsr_sup_payload) + hlen + tlen);
0249
0250 if (!skb)
0251 return skb;
0252
0253 skb_reserve(skb, hlen);
0254 skb->dev = master->dev;
0255 skb->priority = TC_PRIO_CONTROL;
0256
0257 if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
0258 hsr->sup_multicast_addr,
0259 skb->dev->dev_addr, skb->len) <= 0)
0260 goto out;
0261
0262 skb_reset_mac_header(skb);
0263 skb_reset_mac_len(skb);
0264 skb_reset_network_header(skb);
0265 skb_reset_transport_header(skb);
0266
0267 return skb;
0268 out:
0269 kfree_skb(skb);
0270
0271 return NULL;
0272 }
0273
0274 static void send_hsr_supervision_frame(struct hsr_port *master,
0275 unsigned long *interval)
0276 {
0277 struct hsr_priv *hsr = master->hsr;
0278 __u8 type = HSR_TLV_LIFE_CHECK;
0279 struct hsr_sup_payload *hsr_sp;
0280 struct hsr_sup_tag *hsr_stag;
0281 unsigned long irqflags;
0282 struct sk_buff *skb;
0283
0284 *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
0285 if (hsr->announce_count < 3 && hsr->prot_version == 0) {
0286 type = HSR_TLV_ANNOUNCE;
0287 *interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
0288 hsr->announce_count++;
0289 }
0290
0291 skb = hsr_init_skb(master);
0292 if (!skb) {
0293 WARN_ONCE(1, "HSR: Could not send supervision frame\n");
0294 return;
0295 }
0296
0297 hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
0298 set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
0299 set_hsr_stag_HSR_ver(hsr_stag, hsr->prot_version);
0300
0301
0302 spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
0303 if (hsr->prot_version > 0) {
0304 hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
0305 hsr->sup_sequence_nr++;
0306 } else {
0307 hsr_stag->sequence_nr = htons(hsr->sequence_nr);
0308 hsr->sequence_nr++;
0309 }
0310 spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
0311
0312 hsr_stag->tlv.HSR_TLV_type = type;
0313
0314 hsr_stag->tlv.HSR_TLV_length = hsr->prot_version ?
0315 sizeof(struct hsr_sup_payload) : 12;
0316
0317
0318 hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
0319 ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
0320
0321 if (skb_put_padto(skb, ETH_ZLEN))
0322 return;
0323
0324 hsr_forward_skb(skb, master);
0325
0326 return;
0327 }
0328
0329 static void send_prp_supervision_frame(struct hsr_port *master,
0330 unsigned long *interval)
0331 {
0332 struct hsr_priv *hsr = master->hsr;
0333 struct hsr_sup_payload *hsr_sp;
0334 struct hsr_sup_tag *hsr_stag;
0335 unsigned long irqflags;
0336 struct sk_buff *skb;
0337
0338 skb = hsr_init_skb(master);
0339 if (!skb) {
0340 WARN_ONCE(1, "PRP: Could not send supervision frame\n");
0341 return;
0342 }
0343
0344 *interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
0345 hsr_stag = skb_put(skb, sizeof(struct hsr_sup_tag));
0346 set_hsr_stag_path(hsr_stag, (hsr->prot_version ? 0x0 : 0xf));
0347 set_hsr_stag_HSR_ver(hsr_stag, (hsr->prot_version ? 1 : 0));
0348
0349
0350 spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
0351 hsr_stag->sequence_nr = htons(hsr->sup_sequence_nr);
0352 hsr->sup_sequence_nr++;
0353 hsr_stag->tlv.HSR_TLV_type = PRP_TLV_LIFE_CHECK_DD;
0354 hsr_stag->tlv.HSR_TLV_length = sizeof(struct hsr_sup_payload);
0355
0356
0357 hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload));
0358 ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr);
0359
0360 if (skb_put_padto(skb, ETH_ZLEN)) {
0361 spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
0362 return;
0363 }
0364
0365 spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
0366
0367 hsr_forward_skb(skb, master);
0368 }
0369
0370
0371
0372 static void hsr_announce(struct timer_list *t)
0373 {
0374 struct hsr_priv *hsr;
0375 struct hsr_port *master;
0376 unsigned long interval;
0377
0378 hsr = from_timer(hsr, t, announce_timer);
0379
0380 rcu_read_lock();
0381 master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
0382 hsr->proto_ops->send_sv_frame(master, &interval);
0383
0384 if (is_admin_up(master->dev))
0385 mod_timer(&hsr->announce_timer, jiffies + interval);
0386
0387 rcu_read_unlock();
0388 }
0389
0390 void hsr_del_ports(struct hsr_priv *hsr)
0391 {
0392 struct hsr_port *port;
0393
0394 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
0395 if (port)
0396 hsr_del_port(port);
0397
0398 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
0399 if (port)
0400 hsr_del_port(port);
0401
0402 port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
0403 if (port)
0404 hsr_del_port(port);
0405 }
0406
0407 static const struct net_device_ops hsr_device_ops = {
0408 .ndo_change_mtu = hsr_dev_change_mtu,
0409 .ndo_open = hsr_dev_open,
0410 .ndo_stop = hsr_dev_close,
0411 .ndo_start_xmit = hsr_dev_xmit,
0412 .ndo_fix_features = hsr_fix_features,
0413 };
0414
0415 static struct device_type hsr_type = {
0416 .name = "hsr",
0417 };
0418
0419 static struct hsr_proto_ops hsr_ops = {
0420 .send_sv_frame = send_hsr_supervision_frame,
0421 .create_tagged_frame = hsr_create_tagged_frame,
0422 .get_untagged_frame = hsr_get_untagged_frame,
0423 .drop_frame = hsr_drop_frame,
0424 .fill_frame_info = hsr_fill_frame_info,
0425 .invalid_dan_ingress_frame = hsr_invalid_dan_ingress_frame,
0426 };
0427
0428 static struct hsr_proto_ops prp_ops = {
0429 .send_sv_frame = send_prp_supervision_frame,
0430 .create_tagged_frame = prp_create_tagged_frame,
0431 .get_untagged_frame = prp_get_untagged_frame,
0432 .drop_frame = prp_drop_frame,
0433 .fill_frame_info = prp_fill_frame_info,
0434 .handle_san_frame = prp_handle_san_frame,
0435 .update_san_info = prp_update_san_info,
0436 };
0437
0438 void hsr_dev_setup(struct net_device *dev)
0439 {
0440 eth_hw_addr_random(dev);
0441
0442 ether_setup(dev);
0443 dev->min_mtu = 0;
0444 dev->header_ops = &hsr_header_ops;
0445 dev->netdev_ops = &hsr_device_ops;
0446 SET_NETDEV_DEVTYPE(dev, &hsr_type);
0447 dev->priv_flags |= IFF_NO_QUEUE;
0448
0449 dev->needs_free_netdev = true;
0450
0451 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
0452 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
0453 NETIF_F_HW_VLAN_CTAG_TX;
0454
0455 dev->features = dev->hw_features;
0456
0457
0458 dev->features |= NETIF_F_LLTX;
0459
0460
0461
0462 dev->features |= NETIF_F_VLAN_CHALLENGED;
0463
0464
0465
0466 dev->features |= NETIF_F_NETNS_LOCAL;
0467 }
0468
0469
0470
0471 bool is_hsr_master(struct net_device *dev)
0472 {
0473 return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
0474 }
0475 EXPORT_SYMBOL(is_hsr_master);
0476
0477
0478 static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
0479 0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
0480 };
0481
0482 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
0483 unsigned char multicast_spec, u8 protocol_version,
0484 struct netlink_ext_ack *extack)
0485 {
0486 bool unregister = false;
0487 struct hsr_priv *hsr;
0488 int res, i;
0489
0490 hsr = netdev_priv(hsr_dev);
0491 INIT_LIST_HEAD(&hsr->ports);
0492 INIT_HLIST_HEAD(&hsr->self_node_db);
0493 hsr->hash_buckets = HSR_HSIZE;
0494 get_random_bytes(&hsr->hash_seed, sizeof(hsr->hash_seed));
0495 for (i = 0; i < hsr->hash_buckets; i++)
0496 INIT_HLIST_HEAD(&hsr->node_db[i]);
0497
0498 spin_lock_init(&hsr->list_lock);
0499
0500 eth_hw_addr_set(hsr_dev, slave[0]->dev_addr);
0501
0502
0503 if (protocol_version == PRP_V1) {
0504
0505
0506
0507 hsr->net_id = PRP_LAN_ID << 1;
0508 hsr->proto_ops = &prp_ops;
0509 } else {
0510 hsr->proto_ops = &hsr_ops;
0511 }
0512
0513
0514 res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
0515 slave[1]->dev_addr);
0516 if (res < 0)
0517 return res;
0518
0519 spin_lock_init(&hsr->seqnr_lock);
0520
0521 hsr->sequence_nr = HSR_SEQNR_START;
0522 hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
0523
0524 timer_setup(&hsr->announce_timer, hsr_announce, 0);
0525 timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
0526
0527 ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
0528 hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
0529
0530 hsr->prot_version = protocol_version;
0531
0532
0533 netif_carrier_off(hsr_dev);
0534
0535 res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER, extack);
0536 if (res)
0537 goto err_add_master;
0538
0539 res = register_netdevice(hsr_dev);
0540 if (res)
0541 goto err_unregister;
0542
0543 unregister = true;
0544
0545 res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A, extack);
0546 if (res)
0547 goto err_unregister;
0548
0549 res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B, extack);
0550 if (res)
0551 goto err_unregister;
0552
0553 hsr_debugfs_init(hsr, hsr_dev);
0554 mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
0555
0556 return 0;
0557
0558 err_unregister:
0559 hsr_del_ports(hsr);
0560 err_add_master:
0561 hsr_del_self_node(hsr);
0562
0563 if (unregister)
0564 unregister_netdevice(hsr_dev);
0565 return res;
0566 }