0001
0002
0003 #include <linux/mrp_bridge.h>
0004 #include "br_private_mrp.h"
0005
0006 static const u8 mrp_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x1 };
0007 static const u8 mrp_in_test_dmac[ETH_ALEN] = { 0x1, 0x15, 0x4e, 0x0, 0x0, 0x3 };
0008
0009 static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb);
0010
0011 static struct br_frame_type mrp_frame_type __read_mostly = {
0012 .type = cpu_to_be16(ETH_P_MRP),
0013 .frame_handler = br_mrp_process,
0014 };
0015
0016 static bool br_mrp_is_ring_port(struct net_bridge_port *p_port,
0017 struct net_bridge_port *s_port,
0018 struct net_bridge_port *port)
0019 {
0020 if (port == p_port ||
0021 port == s_port)
0022 return true;
0023
0024 return false;
0025 }
0026
0027 static bool br_mrp_is_in_port(struct net_bridge_port *i_port,
0028 struct net_bridge_port *port)
0029 {
0030 if (port == i_port)
0031 return true;
0032
0033 return false;
0034 }
0035
0036 static struct net_bridge_port *br_mrp_get_port(struct net_bridge *br,
0037 u32 ifindex)
0038 {
0039 struct net_bridge_port *res = NULL;
0040 struct net_bridge_port *port;
0041
0042 list_for_each_entry(port, &br->port_list, list) {
0043 if (port->dev->ifindex == ifindex) {
0044 res = port;
0045 break;
0046 }
0047 }
0048
0049 return res;
0050 }
0051
0052 static struct br_mrp *br_mrp_find_id(struct net_bridge *br, u32 ring_id)
0053 {
0054 struct br_mrp *res = NULL;
0055 struct br_mrp *mrp;
0056
0057 hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
0058 lockdep_rtnl_is_held()) {
0059 if (mrp->ring_id == ring_id) {
0060 res = mrp;
0061 break;
0062 }
0063 }
0064
0065 return res;
0066 }
0067
0068 static struct br_mrp *br_mrp_find_in_id(struct net_bridge *br, u32 in_id)
0069 {
0070 struct br_mrp *res = NULL;
0071 struct br_mrp *mrp;
0072
0073 hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
0074 lockdep_rtnl_is_held()) {
0075 if (mrp->in_id == in_id) {
0076 res = mrp;
0077 break;
0078 }
0079 }
0080
0081 return res;
0082 }
0083
0084 static bool br_mrp_unique_ifindex(struct net_bridge *br, u32 ifindex)
0085 {
0086 struct br_mrp *mrp;
0087
0088 hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
0089 lockdep_rtnl_is_held()) {
0090 struct net_bridge_port *p;
0091
0092 p = rtnl_dereference(mrp->p_port);
0093 if (p && p->dev->ifindex == ifindex)
0094 return false;
0095
0096 p = rtnl_dereference(mrp->s_port);
0097 if (p && p->dev->ifindex == ifindex)
0098 return false;
0099
0100 p = rtnl_dereference(mrp->i_port);
0101 if (p && p->dev->ifindex == ifindex)
0102 return false;
0103 }
0104
0105 return true;
0106 }
0107
0108 static struct br_mrp *br_mrp_find_port(struct net_bridge *br,
0109 struct net_bridge_port *p)
0110 {
0111 struct br_mrp *res = NULL;
0112 struct br_mrp *mrp;
0113
0114 hlist_for_each_entry_rcu(mrp, &br->mrp_list, list,
0115 lockdep_rtnl_is_held()) {
0116 if (rcu_access_pointer(mrp->p_port) == p ||
0117 rcu_access_pointer(mrp->s_port) == p ||
0118 rcu_access_pointer(mrp->i_port) == p) {
0119 res = mrp;
0120 break;
0121 }
0122 }
0123
0124 return res;
0125 }
0126
0127 static int br_mrp_next_seq(struct br_mrp *mrp)
0128 {
0129 mrp->seq_id++;
0130 return mrp->seq_id;
0131 }
0132
0133 static struct sk_buff *br_mrp_skb_alloc(struct net_bridge_port *p,
0134 const u8 *src, const u8 *dst)
0135 {
0136 struct ethhdr *eth_hdr;
0137 struct sk_buff *skb;
0138 __be16 *version;
0139
0140 skb = dev_alloc_skb(MRP_MAX_FRAME_LENGTH);
0141 if (!skb)
0142 return NULL;
0143
0144 skb->dev = p->dev;
0145 skb->protocol = htons(ETH_P_MRP);
0146 skb->priority = MRP_FRAME_PRIO;
0147 skb_reserve(skb, sizeof(*eth_hdr));
0148
0149 eth_hdr = skb_push(skb, sizeof(*eth_hdr));
0150 ether_addr_copy(eth_hdr->h_dest, dst);
0151 ether_addr_copy(eth_hdr->h_source, src);
0152 eth_hdr->h_proto = htons(ETH_P_MRP);
0153
0154 version = skb_put(skb, sizeof(*version));
0155 *version = cpu_to_be16(MRP_VERSION);
0156
0157 return skb;
0158 }
0159
0160 static void br_mrp_skb_tlv(struct sk_buff *skb,
0161 enum br_mrp_tlv_header_type type,
0162 u8 length)
0163 {
0164 struct br_mrp_tlv_hdr *hdr;
0165
0166 hdr = skb_put(skb, sizeof(*hdr));
0167 hdr->type = type;
0168 hdr->length = length;
0169 }
0170
0171 static void br_mrp_skb_common(struct sk_buff *skb, struct br_mrp *mrp)
0172 {
0173 struct br_mrp_common_hdr *hdr;
0174
0175 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_COMMON, sizeof(*hdr));
0176
0177 hdr = skb_put(skb, sizeof(*hdr));
0178 hdr->seq_id = cpu_to_be16(br_mrp_next_seq(mrp));
0179 memset(hdr->domain, 0xff, MRP_DOMAIN_UUID_LENGTH);
0180 }
0181
0182 static struct sk_buff *br_mrp_alloc_test_skb(struct br_mrp *mrp,
0183 struct net_bridge_port *p,
0184 enum br_mrp_port_role_type port_role)
0185 {
0186 struct br_mrp_ring_test_hdr *hdr = NULL;
0187 struct sk_buff *skb = NULL;
0188
0189 if (!p)
0190 return NULL;
0191
0192 skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_test_dmac);
0193 if (!skb)
0194 return NULL;
0195
0196 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_RING_TEST, sizeof(*hdr));
0197 hdr = skb_put(skb, sizeof(*hdr));
0198
0199 hdr->prio = cpu_to_be16(mrp->prio);
0200 ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
0201 hdr->port_role = cpu_to_be16(port_role);
0202 hdr->state = cpu_to_be16(mrp->ring_state);
0203 hdr->transitions = cpu_to_be16(mrp->ring_transitions);
0204 hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
0205
0206 br_mrp_skb_common(skb, mrp);
0207
0208
0209
0210
0211
0212 if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
0213 struct br_mrp_sub_option1_hdr *sub_opt = NULL;
0214 struct br_mrp_tlv_hdr *sub_tlv = NULL;
0215 struct br_mrp_oui_hdr *oui = NULL;
0216 u8 length;
0217
0218 length = sizeof(*sub_opt) + sizeof(*sub_tlv) + sizeof(oui) +
0219 MRP_OPT_PADDING;
0220 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_OPTION, length);
0221
0222 oui = skb_put(skb, sizeof(*oui));
0223 memset(oui, 0x0, sizeof(*oui));
0224 sub_opt = skb_put(skb, sizeof(*sub_opt));
0225 memset(sub_opt, 0x0, sizeof(*sub_opt));
0226
0227 sub_tlv = skb_put(skb, sizeof(*sub_tlv));
0228 sub_tlv->type = BR_MRP_SUB_TLV_HEADER_TEST_AUTO_MGR;
0229
0230
0231 skb_put(skb, MRP_OPT_PADDING);
0232 }
0233
0234 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
0235
0236 return skb;
0237 }
0238
0239 static struct sk_buff *br_mrp_alloc_in_test_skb(struct br_mrp *mrp,
0240 struct net_bridge_port *p,
0241 enum br_mrp_port_role_type port_role)
0242 {
0243 struct br_mrp_in_test_hdr *hdr = NULL;
0244 struct sk_buff *skb = NULL;
0245
0246 if (!p)
0247 return NULL;
0248
0249 skb = br_mrp_skb_alloc(p, p->dev->dev_addr, mrp_in_test_dmac);
0250 if (!skb)
0251 return NULL;
0252
0253 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_IN_TEST, sizeof(*hdr));
0254 hdr = skb_put(skb, sizeof(*hdr));
0255
0256 hdr->id = cpu_to_be16(mrp->in_id);
0257 ether_addr_copy(hdr->sa, p->br->dev->dev_addr);
0258 hdr->port_role = cpu_to_be16(port_role);
0259 hdr->state = cpu_to_be16(mrp->in_state);
0260 hdr->transitions = cpu_to_be16(mrp->in_transitions);
0261 hdr->timestamp = cpu_to_be32(jiffies_to_msecs(jiffies));
0262
0263 br_mrp_skb_common(skb, mrp);
0264 br_mrp_skb_tlv(skb, BR_MRP_TLV_HEADER_END, 0x0);
0265
0266 return skb;
0267 }
0268
0269
0270
0271
0272
0273
0274
0275
0276
0277
0278
0279 static void br_mrp_test_work_expired(struct work_struct *work)
0280 {
0281 struct delayed_work *del_work = to_delayed_work(work);
0282 struct br_mrp *mrp = container_of(del_work, struct br_mrp, test_work);
0283 struct net_bridge_port *p;
0284 bool notify_open = false;
0285 struct sk_buff *skb;
0286
0287 if (time_before_eq(mrp->test_end, jiffies))
0288 return;
0289
0290 if (mrp->test_count_miss < mrp->test_max_miss) {
0291 mrp->test_count_miss++;
0292 } else {
0293
0294
0295
0296
0297
0298
0299
0300
0301
0302 if (mrp->ring_state == BR_MRP_RING_STATE_CLOSED ||
0303 mrp->test_monitor)
0304 notify_open = true;
0305 }
0306
0307 rcu_read_lock();
0308
0309 p = rcu_dereference(mrp->p_port);
0310 if (p) {
0311 if (!mrp->test_monitor) {
0312 skb = br_mrp_alloc_test_skb(mrp, p,
0313 BR_MRP_PORT_ROLE_PRIMARY);
0314 if (!skb)
0315 goto out;
0316
0317 skb_reset_network_header(skb);
0318 dev_queue_xmit(skb);
0319 }
0320
0321 if (notify_open && !mrp->ring_role_offloaded)
0322 br_mrp_ring_port_open(p->dev, true);
0323 }
0324
0325 p = rcu_dereference(mrp->s_port);
0326 if (p) {
0327 if (!mrp->test_monitor) {
0328 skb = br_mrp_alloc_test_skb(mrp, p,
0329 BR_MRP_PORT_ROLE_SECONDARY);
0330 if (!skb)
0331 goto out;
0332
0333 skb_reset_network_header(skb);
0334 dev_queue_xmit(skb);
0335 }
0336
0337 if (notify_open && !mrp->ring_role_offloaded)
0338 br_mrp_ring_port_open(p->dev, true);
0339 }
0340
0341 out:
0342 rcu_read_unlock();
0343
0344 queue_delayed_work(system_wq, &mrp->test_work,
0345 usecs_to_jiffies(mrp->test_interval));
0346 }
0347
0348
0349
0350
0351
0352 static void br_mrp_in_test_work_expired(struct work_struct *work)
0353 {
0354 struct delayed_work *del_work = to_delayed_work(work);
0355 struct br_mrp *mrp = container_of(del_work, struct br_mrp, in_test_work);
0356 struct net_bridge_port *p;
0357 bool notify_open = false;
0358 struct sk_buff *skb;
0359
0360 if (time_before_eq(mrp->in_test_end, jiffies))
0361 return;
0362
0363 if (mrp->in_test_count_miss < mrp->in_test_max_miss) {
0364 mrp->in_test_count_miss++;
0365 } else {
0366
0367
0368
0369
0370 if (mrp->in_state == BR_MRP_IN_STATE_CLOSED)
0371 notify_open = true;
0372 }
0373
0374 rcu_read_lock();
0375
0376 p = rcu_dereference(mrp->p_port);
0377 if (p) {
0378 skb = br_mrp_alloc_in_test_skb(mrp, p,
0379 BR_MRP_PORT_ROLE_PRIMARY);
0380 if (!skb)
0381 goto out;
0382
0383 skb_reset_network_header(skb);
0384 dev_queue_xmit(skb);
0385
0386 if (notify_open && !mrp->in_role_offloaded)
0387 br_mrp_in_port_open(p->dev, true);
0388 }
0389
0390 p = rcu_dereference(mrp->s_port);
0391 if (p) {
0392 skb = br_mrp_alloc_in_test_skb(mrp, p,
0393 BR_MRP_PORT_ROLE_SECONDARY);
0394 if (!skb)
0395 goto out;
0396
0397 skb_reset_network_header(skb);
0398 dev_queue_xmit(skb);
0399
0400 if (notify_open && !mrp->in_role_offloaded)
0401 br_mrp_in_port_open(p->dev, true);
0402 }
0403
0404 p = rcu_dereference(mrp->i_port);
0405 if (p) {
0406 skb = br_mrp_alloc_in_test_skb(mrp, p,
0407 BR_MRP_PORT_ROLE_INTER);
0408 if (!skb)
0409 goto out;
0410
0411 skb_reset_network_header(skb);
0412 dev_queue_xmit(skb);
0413
0414 if (notify_open && !mrp->in_role_offloaded)
0415 br_mrp_in_port_open(p->dev, true);
0416 }
0417
0418 out:
0419 rcu_read_unlock();
0420
0421 queue_delayed_work(system_wq, &mrp->in_test_work,
0422 usecs_to_jiffies(mrp->in_test_interval));
0423 }
0424
0425
0426
0427
0428 static void br_mrp_del_impl(struct net_bridge *br, struct br_mrp *mrp)
0429 {
0430 struct net_bridge_port *p;
0431 u8 state;
0432
0433
0434 cancel_delayed_work_sync(&mrp->test_work);
0435 br_mrp_switchdev_send_ring_test(br, mrp, 0, 0, 0, 0);
0436
0437
0438 cancel_delayed_work_sync(&mrp->in_test_work);
0439 br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
0440
0441
0442 br_mrp_switchdev_set_ring_role(br, mrp, BR_MRP_RING_ROLE_DISABLED);
0443 p = rtnl_dereference(mrp->i_port);
0444 if (p)
0445 br_mrp_switchdev_set_in_role(br, mrp, mrp->in_id, mrp->ring_id,
0446 BR_MRP_IN_ROLE_DISABLED);
0447
0448 br_mrp_switchdev_del(br, mrp);
0449
0450
0451 p = rtnl_dereference(mrp->p_port);
0452 if (p) {
0453 spin_lock_bh(&br->lock);
0454 state = netif_running(br->dev) ?
0455 BR_STATE_FORWARDING : BR_STATE_DISABLED;
0456 p->state = state;
0457 p->flags &= ~BR_MRP_AWARE;
0458 spin_unlock_bh(&br->lock);
0459 br_mrp_port_switchdev_set_state(p, state);
0460 rcu_assign_pointer(mrp->p_port, NULL);
0461 }
0462
0463 p = rtnl_dereference(mrp->s_port);
0464 if (p) {
0465 spin_lock_bh(&br->lock);
0466 state = netif_running(br->dev) ?
0467 BR_STATE_FORWARDING : BR_STATE_DISABLED;
0468 p->state = state;
0469 p->flags &= ~BR_MRP_AWARE;
0470 spin_unlock_bh(&br->lock);
0471 br_mrp_port_switchdev_set_state(p, state);
0472 rcu_assign_pointer(mrp->s_port, NULL);
0473 }
0474
0475 p = rtnl_dereference(mrp->i_port);
0476 if (p) {
0477 spin_lock_bh(&br->lock);
0478 state = netif_running(br->dev) ?
0479 BR_STATE_FORWARDING : BR_STATE_DISABLED;
0480 p->state = state;
0481 p->flags &= ~BR_MRP_AWARE;
0482 spin_unlock_bh(&br->lock);
0483 br_mrp_port_switchdev_set_state(p, state);
0484 rcu_assign_pointer(mrp->i_port, NULL);
0485 }
0486
0487 hlist_del_rcu(&mrp->list);
0488 kfree_rcu(mrp, rcu);
0489
0490 if (hlist_empty(&br->mrp_list))
0491 br_del_frame(br, &mrp_frame_type);
0492 }
0493
0494
0495
0496
0497 int br_mrp_add(struct net_bridge *br, struct br_mrp_instance *instance)
0498 {
0499 struct net_bridge_port *p;
0500 struct br_mrp *mrp;
0501 int err;
0502
0503
0504
0505
0506 mrp = br_mrp_find_id(br, instance->ring_id);
0507 if (mrp)
0508 return -EINVAL;
0509
0510 if (!br_mrp_get_port(br, instance->p_ifindex) ||
0511 !br_mrp_get_port(br, instance->s_ifindex))
0512 return -EINVAL;
0513
0514
0515 if (!br_mrp_unique_ifindex(br, instance->p_ifindex) ||
0516 !br_mrp_unique_ifindex(br, instance->s_ifindex))
0517 return -EINVAL;
0518
0519 mrp = kzalloc(sizeof(*mrp), GFP_KERNEL);
0520 if (!mrp)
0521 return -ENOMEM;
0522
0523 mrp->ring_id = instance->ring_id;
0524 mrp->prio = instance->prio;
0525
0526 p = br_mrp_get_port(br, instance->p_ifindex);
0527 spin_lock_bh(&br->lock);
0528 p->state = BR_STATE_FORWARDING;
0529 p->flags |= BR_MRP_AWARE;
0530 spin_unlock_bh(&br->lock);
0531 rcu_assign_pointer(mrp->p_port, p);
0532
0533 p = br_mrp_get_port(br, instance->s_ifindex);
0534 spin_lock_bh(&br->lock);
0535 p->state = BR_STATE_FORWARDING;
0536 p->flags |= BR_MRP_AWARE;
0537 spin_unlock_bh(&br->lock);
0538 rcu_assign_pointer(mrp->s_port, p);
0539
0540 if (hlist_empty(&br->mrp_list))
0541 br_add_frame(br, &mrp_frame_type);
0542
0543 INIT_DELAYED_WORK(&mrp->test_work, br_mrp_test_work_expired);
0544 INIT_DELAYED_WORK(&mrp->in_test_work, br_mrp_in_test_work_expired);
0545 hlist_add_tail_rcu(&mrp->list, &br->mrp_list);
0546
0547 err = br_mrp_switchdev_add(br, mrp);
0548 if (err)
0549 goto delete_mrp;
0550
0551 return 0;
0552
0553 delete_mrp:
0554 br_mrp_del_impl(br, mrp);
0555
0556 return err;
0557 }
0558
0559
0560
0561
0562 void br_mrp_port_del(struct net_bridge *br, struct net_bridge_port *p)
0563 {
0564 struct br_mrp *mrp = br_mrp_find_port(br, p);
0565
0566
0567 if (!mrp)
0568 return;
0569
0570 br_mrp_del_impl(br, mrp);
0571 }
0572
0573
0574
0575
0576 int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
0577 {
0578 struct br_mrp *mrp = br_mrp_find_id(br, instance->ring_id);
0579
0580 if (!mrp)
0581 return -EINVAL;
0582
0583 br_mrp_del_impl(br, mrp);
0584
0585 return 0;
0586 }
0587
0588
0589
0590
0591 int br_mrp_set_port_state(struct net_bridge_port *p,
0592 enum br_mrp_port_state_type state)
0593 {
0594 u32 port_state;
0595
0596 if (!p || !(p->flags & BR_MRP_AWARE))
0597 return -EINVAL;
0598
0599 spin_lock_bh(&p->br->lock);
0600
0601 if (state == BR_MRP_PORT_STATE_FORWARDING)
0602 port_state = BR_STATE_FORWARDING;
0603 else
0604 port_state = BR_STATE_BLOCKING;
0605
0606 p->state = port_state;
0607 spin_unlock_bh(&p->br->lock);
0608
0609 br_mrp_port_switchdev_set_state(p, port_state);
0610
0611 return 0;
0612 }
0613
0614
0615
0616
0617 int br_mrp_set_port_role(struct net_bridge_port *p,
0618 enum br_mrp_port_role_type role)
0619 {
0620 struct br_mrp *mrp;
0621
0622 if (!p || !(p->flags & BR_MRP_AWARE))
0623 return -EINVAL;
0624
0625 mrp = br_mrp_find_port(p->br, p);
0626
0627 if (!mrp)
0628 return -EINVAL;
0629
0630 switch (role) {
0631 case BR_MRP_PORT_ROLE_PRIMARY:
0632 rcu_assign_pointer(mrp->p_port, p);
0633 break;
0634 case BR_MRP_PORT_ROLE_SECONDARY:
0635 rcu_assign_pointer(mrp->s_port, p);
0636 break;
0637 default:
0638 return -EINVAL;
0639 }
0640
0641 br_mrp_port_switchdev_set_role(p, role);
0642
0643 return 0;
0644 }
0645
0646
0647
0648
0649 int br_mrp_set_ring_state(struct net_bridge *br,
0650 struct br_mrp_ring_state *state)
0651 {
0652 struct br_mrp *mrp = br_mrp_find_id(br, state->ring_id);
0653
0654 if (!mrp)
0655 return -EINVAL;
0656
0657 if (mrp->ring_state != state->ring_state)
0658 mrp->ring_transitions++;
0659
0660 mrp->ring_state = state->ring_state;
0661
0662 br_mrp_switchdev_set_ring_state(br, mrp, state->ring_state);
0663
0664 return 0;
0665 }
0666
0667
0668
0669
0670
0671 int br_mrp_set_ring_role(struct net_bridge *br,
0672 struct br_mrp_ring_role *role)
0673 {
0674 struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
0675 enum br_mrp_hw_support support;
0676
0677 if (!mrp)
0678 return -EINVAL;
0679
0680 mrp->ring_role = role->ring_role;
0681
0682
0683 support = br_mrp_switchdev_set_ring_role(br, mrp, role->ring_role);
0684 if (support == BR_MRP_NONE)
0685 return -EOPNOTSUPP;
0686
0687
0688
0689
0690
0691
0692
0693 mrp->ring_role_offloaded = support == BR_MRP_SW ? 0 : 1;
0694
0695 return 0;
0696 }
0697
0698
0699
0700
0701
0702 int br_mrp_start_test(struct net_bridge *br,
0703 struct br_mrp_start_test *test)
0704 {
0705 struct br_mrp *mrp = br_mrp_find_id(br, test->ring_id);
0706 enum br_mrp_hw_support support;
0707
0708 if (!mrp)
0709 return -EINVAL;
0710
0711
0712
0713
0714 support = br_mrp_switchdev_send_ring_test(br, mrp, test->interval,
0715 test->max_miss, test->period,
0716 test->monitor);
0717 if (support == BR_MRP_NONE)
0718 return -EOPNOTSUPP;
0719
0720 if (support == BR_MRP_HW)
0721 return 0;
0722
0723 mrp->test_interval = test->interval;
0724 mrp->test_end = jiffies + usecs_to_jiffies(test->period);
0725 mrp->test_max_miss = test->max_miss;
0726 mrp->test_monitor = test->monitor;
0727 mrp->test_count_miss = 0;
0728 queue_delayed_work(system_wq, &mrp->test_work,
0729 usecs_to_jiffies(test->interval));
0730
0731 return 0;
0732 }
0733
0734
0735
0736
0737 int br_mrp_set_in_state(struct net_bridge *br, struct br_mrp_in_state *state)
0738 {
0739 struct br_mrp *mrp = br_mrp_find_in_id(br, state->in_id);
0740
0741 if (!mrp)
0742 return -EINVAL;
0743
0744 if (mrp->in_state != state->in_state)
0745 mrp->in_transitions++;
0746
0747 mrp->in_state = state->in_state;
0748
0749 br_mrp_switchdev_set_in_state(br, mrp, state->in_state);
0750
0751 return 0;
0752 }
0753
0754
0755
0756
0757
0758 int br_mrp_set_in_role(struct net_bridge *br, struct br_mrp_in_role *role)
0759 {
0760 struct br_mrp *mrp = br_mrp_find_id(br, role->ring_id);
0761 enum br_mrp_hw_support support;
0762 struct net_bridge_port *p;
0763
0764 if (!mrp)
0765 return -EINVAL;
0766
0767 if (!br_mrp_get_port(br, role->i_ifindex))
0768 return -EINVAL;
0769
0770 if (role->in_role == BR_MRP_IN_ROLE_DISABLED) {
0771 u8 state;
0772
0773
0774 p = rtnl_dereference(mrp->i_port);
0775 if (!p)
0776 return -EINVAL;
0777
0778
0779 cancel_delayed_work_sync(&mrp->in_test_work);
0780 br_mrp_switchdev_send_in_test(br, mrp, 0, 0, 0);
0781
0782
0783 spin_lock_bh(&br->lock);
0784 state = netif_running(br->dev) ?
0785 BR_STATE_FORWARDING : BR_STATE_DISABLED;
0786 p->state = state;
0787 p->flags &= ~BR_MRP_AWARE;
0788 spin_unlock_bh(&br->lock);
0789 br_mrp_port_switchdev_set_state(p, state);
0790 rcu_assign_pointer(mrp->i_port, NULL);
0791
0792 mrp->in_role = role->in_role;
0793 mrp->in_id = 0;
0794
0795 return 0;
0796 }
0797
0798
0799 if (!br_mrp_unique_ifindex(br, role->i_ifindex))
0800 return -EINVAL;
0801
0802
0803
0804
0805
0806 if (rcu_access_pointer(mrp->i_port))
0807 return -EINVAL;
0808
0809 p = br_mrp_get_port(br, role->i_ifindex);
0810 spin_lock_bh(&br->lock);
0811 p->state = BR_STATE_FORWARDING;
0812 p->flags |= BR_MRP_AWARE;
0813 spin_unlock_bh(&br->lock);
0814 rcu_assign_pointer(mrp->i_port, p);
0815
0816 mrp->in_role = role->in_role;
0817 mrp->in_id = role->in_id;
0818
0819
0820 support = br_mrp_switchdev_set_in_role(br, mrp, role->in_id,
0821 role->ring_id, role->in_role);
0822 if (support == BR_MRP_NONE)
0823 return -EOPNOTSUPP;
0824
0825
0826
0827
0828
0829
0830
0831 mrp->in_role_offloaded = support == BR_MRP_SW ? 0 : 1;
0832
0833 return 0;
0834 }
0835
0836
0837
0838
0839
0840 int br_mrp_start_in_test(struct net_bridge *br,
0841 struct br_mrp_start_in_test *in_test)
0842 {
0843 struct br_mrp *mrp = br_mrp_find_in_id(br, in_test->in_id);
0844 enum br_mrp_hw_support support;
0845
0846 if (!mrp)
0847 return -EINVAL;
0848
0849 if (mrp->in_role != BR_MRP_IN_ROLE_MIM)
0850 return -EINVAL;
0851
0852
0853
0854
0855 support = br_mrp_switchdev_send_in_test(br, mrp, in_test->interval,
0856 in_test->max_miss,
0857 in_test->period);
0858 if (support == BR_MRP_NONE)
0859 return -EOPNOTSUPP;
0860
0861 if (support == BR_MRP_HW)
0862 return 0;
0863
0864 mrp->in_test_interval = in_test->interval;
0865 mrp->in_test_end = jiffies + usecs_to_jiffies(in_test->period);
0866 mrp->in_test_max_miss = in_test->max_miss;
0867 mrp->in_test_count_miss = 0;
0868 queue_delayed_work(system_wq, &mrp->in_test_work,
0869 usecs_to_jiffies(in_test->interval));
0870
0871 return 0;
0872 }
0873
0874
0875 static bool br_mrp_ring_frame(struct sk_buff *skb)
0876 {
0877 const struct br_mrp_tlv_hdr *hdr;
0878 struct br_mrp_tlv_hdr _hdr;
0879
0880 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
0881 if (!hdr)
0882 return false;
0883
0884 if (hdr->type == BR_MRP_TLV_HEADER_RING_TEST ||
0885 hdr->type == BR_MRP_TLV_HEADER_RING_TOPO ||
0886 hdr->type == BR_MRP_TLV_HEADER_RING_LINK_DOWN ||
0887 hdr->type == BR_MRP_TLV_HEADER_RING_LINK_UP ||
0888 hdr->type == BR_MRP_TLV_HEADER_OPTION)
0889 return true;
0890
0891 return false;
0892 }
0893
0894
0895 static bool br_mrp_in_frame(struct sk_buff *skb)
0896 {
0897 const struct br_mrp_tlv_hdr *hdr;
0898 struct br_mrp_tlv_hdr _hdr;
0899
0900 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
0901 if (!hdr)
0902 return false;
0903
0904 if (hdr->type == BR_MRP_TLV_HEADER_IN_TEST ||
0905 hdr->type == BR_MRP_TLV_HEADER_IN_TOPO ||
0906 hdr->type == BR_MRP_TLV_HEADER_IN_LINK_DOWN ||
0907 hdr->type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
0908 hdr->type == BR_MRP_TLV_HEADER_IN_LINK_STATUS)
0909 return true;
0910
0911 return false;
0912 }
0913
0914
0915
0916
0917
0918 static void br_mrp_mrm_process(struct br_mrp *mrp, struct net_bridge_port *port,
0919 struct sk_buff *skb)
0920 {
0921 const struct br_mrp_tlv_hdr *hdr;
0922 struct br_mrp_tlv_hdr _hdr;
0923
0924
0925
0926
0927 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
0928 if (!hdr)
0929 return;
0930
0931 if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
0932 return;
0933
0934 mrp->test_count_miss = 0;
0935
0936
0937
0938
0939 if (mrp->ring_state != BR_MRP_RING_STATE_CLOSED)
0940 br_mrp_ring_port_open(port->dev, false);
0941 }
0942
0943
0944 static bool br_mrp_test_better_than_own(struct br_mrp *mrp,
0945 struct net_bridge *br,
0946 const struct br_mrp_ring_test_hdr *hdr)
0947 {
0948 u16 prio = be16_to_cpu(hdr->prio);
0949
0950 if (prio < mrp->prio ||
0951 (prio == mrp->prio &&
0952 ether_addr_to_u64(hdr->sa) < ether_addr_to_u64(br->dev->dev_addr)))
0953 return true;
0954
0955 return false;
0956 }
0957
0958
0959
0960
0961
0962 static void br_mrp_mra_process(struct br_mrp *mrp, struct net_bridge *br,
0963 struct net_bridge_port *port,
0964 struct sk_buff *skb)
0965 {
0966 const struct br_mrp_ring_test_hdr *test_hdr;
0967 struct br_mrp_ring_test_hdr _test_hdr;
0968 const struct br_mrp_tlv_hdr *hdr;
0969 struct br_mrp_tlv_hdr _hdr;
0970
0971
0972
0973
0974 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
0975 if (!hdr)
0976 return;
0977
0978 if (hdr->type != BR_MRP_TLV_HEADER_RING_TEST)
0979 return;
0980
0981 test_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
0982 sizeof(_test_hdr), &_test_hdr);
0983 if (!test_hdr)
0984 return;
0985
0986
0987
0988
0989
0990 if (br_mrp_test_better_than_own(mrp, br, test_hdr))
0991 mrp->test_count_miss = 0;
0992 }
0993
0994
0995
0996
0997
0998 static bool br_mrp_mim_process(struct br_mrp *mrp, struct net_bridge_port *port,
0999 struct sk_buff *skb)
1000 {
1001 const struct br_mrp_in_test_hdr *in_hdr;
1002 struct br_mrp_in_test_hdr _in_hdr;
1003 const struct br_mrp_tlv_hdr *hdr;
1004 struct br_mrp_tlv_hdr _hdr;
1005
1006
1007
1008
1009 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
1010 if (!hdr)
1011 return false;
1012
1013
1014 in_hdr = skb_header_pointer(skb, sizeof(uint16_t) + sizeof(_hdr),
1015 sizeof(_in_hdr), &_in_hdr);
1016 if (!in_hdr)
1017 return false;
1018
1019
1020 if (mrp->in_id != ntohs(in_hdr->id))
1021 return false;
1022
1023 mrp->in_test_count_miss = 0;
1024
1025
1026
1027
1028 if (mrp->in_state != BR_MRP_IN_STATE_CLOSED)
1029 br_mrp_in_port_open(port->dev, false);
1030
1031 return true;
1032 }
1033
1034
1035
1036
1037 static u8 br_mrp_get_frame_type(struct sk_buff *skb)
1038 {
1039 const struct br_mrp_tlv_hdr *hdr;
1040 struct br_mrp_tlv_hdr _hdr;
1041
1042
1043
1044
1045 hdr = skb_header_pointer(skb, sizeof(uint16_t), sizeof(_hdr), &_hdr);
1046 if (!hdr)
1047 return 0xff;
1048
1049 return hdr->type;
1050 }
1051
1052 static bool br_mrp_mrm_behaviour(struct br_mrp *mrp)
1053 {
1054 if (mrp->ring_role == BR_MRP_RING_ROLE_MRM ||
1055 (mrp->ring_role == BR_MRP_RING_ROLE_MRA && !mrp->test_monitor))
1056 return true;
1057
1058 return false;
1059 }
1060
1061 static bool br_mrp_mrc_behaviour(struct br_mrp *mrp)
1062 {
1063 if (mrp->ring_role == BR_MRP_RING_ROLE_MRC ||
1064 (mrp->ring_role == BR_MRP_RING_ROLE_MRA && mrp->test_monitor))
1065 return true;
1066
1067 return false;
1068 }
1069
1070
1071
1072
1073
1074 static int br_mrp_rcv(struct net_bridge_port *p,
1075 struct sk_buff *skb, struct net_device *dev)
1076 {
1077 struct net_bridge_port *p_port, *s_port, *i_port = NULL;
1078 struct net_bridge_port *p_dst, *s_dst, *i_dst = NULL;
1079 struct net_bridge *br;
1080 struct br_mrp *mrp;
1081
1082
1083 if (p->state == BR_STATE_DISABLED)
1084 return 0;
1085
1086 br = p->br;
1087 mrp = br_mrp_find_port(br, p);
1088 if (unlikely(!mrp))
1089 return 0;
1090
1091 p_port = rcu_dereference(mrp->p_port);
1092 if (!p_port)
1093 return 0;
1094 p_dst = p_port;
1095
1096 s_port = rcu_dereference(mrp->s_port);
1097 if (!s_port)
1098 return 0;
1099 s_dst = s_port;
1100
1101
1102
1103
1104 if (br_mrp_ring_frame(skb)) {
1105
1106 if (mrp->ring_role == BR_MRP_RING_ROLE_MRM) {
1107 br_mrp_mrm_process(mrp, p, skb);
1108 goto no_forward;
1109 }
1110
1111
1112
1113
1114 if (mrp->ring_role == BR_MRP_RING_ROLE_MRA) {
1115 if (!mrp->test_monitor) {
1116 br_mrp_mrm_process(mrp, p, skb);
1117 goto no_forward;
1118 }
1119
1120 br_mrp_mra_process(mrp, br, p, skb);
1121 }
1122
1123 goto forward;
1124 }
1125
1126 if (br_mrp_in_frame(skb)) {
1127 u8 in_type = br_mrp_get_frame_type(skb);
1128
1129 i_port = rcu_dereference(mrp->i_port);
1130 i_dst = i_port;
1131
1132
1133
1134
1135 if (br_mrp_is_ring_port(p_port, s_port, p) &&
1136 p->state == BR_STATE_BLOCKING &&
1137 in_type == BR_MRP_TLV_HEADER_IN_TEST)
1138 goto no_forward;
1139
1140
1141
1142
1143
1144 if (br_mrp_mrm_behaviour(mrp) &&
1145 br_mrp_is_ring_port(p_port, s_port, p) &&
1146 (s_port->state != BR_STATE_FORWARDING ||
1147 p_port->state != BR_STATE_FORWARDING)) {
1148 p_dst = NULL;
1149 s_dst = NULL;
1150 }
1151
1152
1153
1154
1155
1156 if (br_mrp_mrc_behaviour(mrp) &&
1157 mrp->in_role == BR_MRP_IN_ROLE_DISABLED)
1158 goto forward;
1159
1160 if (mrp->in_role == BR_MRP_IN_ROLE_MIM) {
1161 if (in_type == BR_MRP_TLV_HEADER_IN_TEST) {
1162
1163
1164
1165 if (br_mrp_mim_process(mrp, p, skb)) {
1166 goto no_forward;
1167 } else {
1168 if (br_mrp_is_ring_port(p_port, s_port,
1169 p))
1170 i_dst = NULL;
1171
1172 if (br_mrp_is_in_port(i_port, p))
1173 goto no_forward;
1174 }
1175 } else {
1176
1177
1178
1179
1180
1181
1182 if (br_mrp_is_ring_port(p_port, s_port, p))
1183 i_dst = NULL;
1184
1185 if (br_mrp_is_in_port(i_port, p))
1186 goto no_forward;
1187 }
1188 }
1189
1190 if (mrp->in_role == BR_MRP_IN_ROLE_MIC) {
1191
1192
1193
1194 if (in_type == BR_MRP_TLV_HEADER_IN_TEST)
1195 goto forward;
1196
1197
1198
1199
1200 if (br_mrp_is_ring_port(p_port, s_port, p) &&
1201 (in_type == BR_MRP_TLV_HEADER_IN_LINK_UP ||
1202 in_type == BR_MRP_TLV_HEADER_IN_LINK_DOWN))
1203 goto forward;
1204
1205
1206
1207
1208
1209
1210 if (br_mrp_is_ring_port(p_port, s_port, p) &&
1211 in_type == BR_MRP_TLV_HEADER_IN_LINK_STATUS) {
1212 p_dst = NULL;
1213 s_dst = NULL;
1214 }
1215
1216
1217
1218
1219 if (in_type == BR_MRP_TLV_HEADER_IN_TOPO) {
1220 i_dst = NULL;
1221 goto forward;
1222 }
1223
1224
1225 goto no_forward;
1226 }
1227 }
1228
1229 forward:
1230 if (p_dst)
1231 br_forward(p_dst, skb, true, false);
1232 if (s_dst)
1233 br_forward(s_dst, skb, true, false);
1234 if (i_dst)
1235 br_forward(i_dst, skb, true, false);
1236
1237 no_forward:
1238 return 1;
1239 }
1240
1241
1242
1243
1244
1245
1246 static int br_mrp_process(struct net_bridge_port *p, struct sk_buff *skb)
1247 {
1248
1249 if (likely(!(p->flags & BR_MRP_AWARE)))
1250 goto out;
1251
1252 return br_mrp_rcv(p, skb, p->dev);
1253 out:
1254 return 0;
1255 }
1256
1257 bool br_mrp_enabled(struct net_bridge *br)
1258 {
1259 return !hlist_empty(&br->mrp_list);
1260 }