0001
0002
0003
0004
0005
0006
0007 #include <linux/if_bridge.h>
0008 #include <net/switchdev.h>
0009
0010 #include "sparx5_main_regs.h"
0011 #include "sparx5_main.h"
0012
0013 static struct workqueue_struct *sparx5_owq;
0014
0015 struct sparx5_switchdev_event_work {
0016 struct work_struct work;
0017 struct switchdev_notifier_fdb_info fdb_info;
0018 struct net_device *dev;
0019 struct sparx5 *sparx5;
0020 unsigned long event;
0021 };
0022
0023 static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
0024 struct switchdev_brport_flags flags)
0025 {
0026 if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
0027 return -EINVAL;
0028
0029 return 0;
0030 }
0031
0032 static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
0033 struct switchdev_brport_flags flags)
0034 {
0035 int pgid;
0036
0037 if (flags.mask & BR_MCAST_FLOOD)
0038 for (pgid = PGID_MC_FLOOD; pgid <= PGID_IPV6_MC_CTRL; pgid++)
0039 sparx5_pgid_update_mask(port, pgid, !!(flags.val & BR_MCAST_FLOOD));
0040 if (flags.mask & BR_FLOOD)
0041 sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
0042 if (flags.mask & BR_BCAST_FLOOD)
0043 sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
0044 }
0045
0046 static void sparx5_attr_stp_state_set(struct sparx5_port *port,
0047 u8 state)
0048 {
0049 struct sparx5 *sparx5 = port->sparx5;
0050
0051 if (!test_bit(port->portno, sparx5->bridge_mask)) {
0052 netdev_err(port->ndev,
0053 "Controlling non-bridged port %d?\n", port->portno);
0054 return;
0055 }
0056
0057 switch (state) {
0058 case BR_STATE_FORWARDING:
0059 set_bit(port->portno, sparx5->bridge_fwd_mask);
0060 fallthrough;
0061 case BR_STATE_LEARNING:
0062 set_bit(port->portno, sparx5->bridge_lrn_mask);
0063 break;
0064
0065 default:
0066
0067 clear_bit(port->portno, sparx5->bridge_fwd_mask);
0068 clear_bit(port->portno, sparx5->bridge_lrn_mask);
0069 break;
0070 }
0071
0072
0073 sparx5_update_fwd(sparx5);
0074 }
0075
0076 static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
0077 unsigned long ageing_clock_t)
0078 {
0079 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
0080 u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
0081
0082 sparx5_set_ageing(port->sparx5, ageing_time);
0083 }
0084
0085 static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
0086 const struct switchdev_attr *attr,
0087 struct netlink_ext_ack *extack)
0088 {
0089 struct sparx5_port *port = netdev_priv(dev);
0090
0091 switch (attr->id) {
0092 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
0093 return sparx5_port_attr_pre_bridge_flags(port,
0094 attr->u.brport_flags);
0095 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
0096 sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
0097 break;
0098 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
0099 sparx5_attr_stp_state_set(port, attr->u.stp_state);
0100 break;
0101 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
0102 sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
0103 break;
0104 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
0105
0106
0107
0108 if (port->pvid == 0)
0109 port->pvid = 1;
0110 port->vlan_aware = attr->u.vlan_filtering;
0111 sparx5_vlan_port_apply(port->sparx5, port);
0112 break;
0113 default:
0114 return -EOPNOTSUPP;
0115 }
0116
0117 return 0;
0118 }
0119
0120 static int sparx5_port_bridge_join(struct sparx5_port *port,
0121 struct net_device *bridge,
0122 struct netlink_ext_ack *extack)
0123 {
0124 struct sparx5 *sparx5 = port->sparx5;
0125 struct net_device *ndev = port->ndev;
0126 int err;
0127
0128 if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
0129
0130 sparx5->hw_bridge_dev = bridge;
0131 else
0132 if (sparx5->hw_bridge_dev != bridge)
0133
0134
0135
0136 return -ENODEV;
0137
0138 set_bit(port->portno, sparx5->bridge_mask);
0139
0140 err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
0141 false, extack);
0142 if (err)
0143 goto err_switchdev_offload;
0144
0145
0146 sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
0147
0148
0149
0150
0151 __dev_mc_unsync(ndev, sparx5_mc_unsync);
0152
0153 return 0;
0154
0155 err_switchdev_offload:
0156 clear_bit(port->portno, sparx5->bridge_mask);
0157 return err;
0158 }
0159
0160 static void sparx5_port_bridge_leave(struct sparx5_port *port,
0161 struct net_device *bridge)
0162 {
0163 struct sparx5 *sparx5 = port->sparx5;
0164
0165 switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
0166
0167 clear_bit(port->portno, sparx5->bridge_mask);
0168 if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
0169 sparx5->hw_bridge_dev = NULL;
0170
0171
0172 port->vlan_aware = 0;
0173 port->pvid = NULL_VID;
0174 port->vid = NULL_VID;
0175
0176
0177 sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
0178
0179
0180 __dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
0181 }
0182
0183 static int sparx5_port_changeupper(struct net_device *dev,
0184 struct netdev_notifier_changeupper_info *info)
0185 {
0186 struct sparx5_port *port = netdev_priv(dev);
0187 struct netlink_ext_ack *extack;
0188 int err = 0;
0189
0190 extack = netdev_notifier_info_to_extack(&info->info);
0191
0192 if (netif_is_bridge_master(info->upper_dev)) {
0193 if (info->linking)
0194 err = sparx5_port_bridge_join(port, info->upper_dev,
0195 extack);
0196 else
0197 sparx5_port_bridge_leave(port, info->upper_dev);
0198
0199 sparx5_vlan_port_apply(port->sparx5, port);
0200 }
0201
0202 return err;
0203 }
0204
0205 static int sparx5_port_add_addr(struct net_device *dev, bool up)
0206 {
0207 struct sparx5_port *port = netdev_priv(dev);
0208 struct sparx5 *sparx5 = port->sparx5;
0209 u16 vid = port->pvid;
0210
0211 if (up)
0212 sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
0213 else
0214 sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
0215
0216 return 0;
0217 }
0218
0219 static int sparx5_netdevice_port_event(struct net_device *dev,
0220 struct notifier_block *nb,
0221 unsigned long event, void *ptr)
0222 {
0223 int err = 0;
0224
0225 if (!sparx5_netdevice_check(dev))
0226 return 0;
0227
0228 switch (event) {
0229 case NETDEV_CHANGEUPPER:
0230 err = sparx5_port_changeupper(dev, ptr);
0231 break;
0232 case NETDEV_PRE_UP:
0233 err = sparx5_port_add_addr(dev, true);
0234 break;
0235 case NETDEV_DOWN:
0236 err = sparx5_port_add_addr(dev, false);
0237 break;
0238 }
0239
0240 return err;
0241 }
0242
0243 static int sparx5_netdevice_event(struct notifier_block *nb,
0244 unsigned long event, void *ptr)
0245 {
0246 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
0247 int ret = 0;
0248
0249 ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
0250
0251 return notifier_from_errno(ret);
0252 }
0253
0254 static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
0255 {
0256 struct sparx5_switchdev_event_work *switchdev_work =
0257 container_of(work, struct sparx5_switchdev_event_work, work);
0258 struct net_device *dev = switchdev_work->dev;
0259 struct switchdev_notifier_fdb_info *fdb_info;
0260 struct sparx5_port *port;
0261 struct sparx5 *sparx5;
0262 bool host_addr;
0263 u16 vid;
0264
0265 rtnl_lock();
0266 if (!sparx5_netdevice_check(dev)) {
0267 host_addr = true;
0268 sparx5 = switchdev_work->sparx5;
0269 } else {
0270 host_addr = false;
0271 sparx5 = switchdev_work->sparx5;
0272 port = netdev_priv(dev);
0273 }
0274
0275 fdb_info = &switchdev_work->fdb_info;
0276
0277
0278
0279
0280 if (fdb_info->vid == 0)
0281 vid = 1;
0282 else
0283 vid = fdb_info->vid;
0284
0285 switch (switchdev_work->event) {
0286 case SWITCHDEV_FDB_ADD_TO_DEVICE:
0287 if (host_addr)
0288 sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
0289 fdb_info->addr, vid);
0290 else
0291 sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
0292 fdb_info->addr, vid);
0293 break;
0294 case SWITCHDEV_FDB_DEL_TO_DEVICE:
0295 sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
0296 break;
0297 }
0298
0299 rtnl_unlock();
0300 kfree(switchdev_work->fdb_info.addr);
0301 kfree(switchdev_work);
0302 dev_put(dev);
0303 }
0304
0305 static void sparx5_schedule_work(struct work_struct *work)
0306 {
0307 queue_work(sparx5_owq, work);
0308 }
0309
0310 static int sparx5_switchdev_event(struct notifier_block *nb,
0311 unsigned long event, void *ptr)
0312 {
0313 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
0314 struct sparx5_switchdev_event_work *switchdev_work;
0315 struct switchdev_notifier_fdb_info *fdb_info;
0316 struct switchdev_notifier_info *info = ptr;
0317 struct sparx5 *spx5;
0318 int err;
0319
0320 spx5 = container_of(nb, struct sparx5, switchdev_nb);
0321
0322 switch (event) {
0323 case SWITCHDEV_PORT_ATTR_SET:
0324 err = switchdev_handle_port_attr_set(dev, ptr,
0325 sparx5_netdevice_check,
0326 sparx5_port_attr_set);
0327 return notifier_from_errno(err);
0328 case SWITCHDEV_FDB_ADD_TO_DEVICE:
0329 fallthrough;
0330 case SWITCHDEV_FDB_DEL_TO_DEVICE:
0331 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
0332 if (!switchdev_work)
0333 return NOTIFY_BAD;
0334
0335 switchdev_work->dev = dev;
0336 switchdev_work->event = event;
0337 switchdev_work->sparx5 = spx5;
0338
0339 fdb_info = container_of(info,
0340 struct switchdev_notifier_fdb_info,
0341 info);
0342 INIT_WORK(&switchdev_work->work,
0343 sparx5_switchdev_bridge_fdb_event_work);
0344 memcpy(&switchdev_work->fdb_info, ptr,
0345 sizeof(switchdev_work->fdb_info));
0346 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
0347 if (!switchdev_work->fdb_info.addr)
0348 goto err_addr_alloc;
0349
0350 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
0351 fdb_info->addr);
0352 dev_hold(dev);
0353
0354 sparx5_schedule_work(&switchdev_work->work);
0355 break;
0356 }
0357
0358 return NOTIFY_DONE;
0359 err_addr_alloc:
0360 kfree(switchdev_work);
0361 return NOTIFY_BAD;
0362 }
0363
0364 static int sparx5_handle_port_vlan_add(struct net_device *dev,
0365 struct notifier_block *nb,
0366 const struct switchdev_obj_port_vlan *v)
0367 {
0368 struct sparx5_port *port = netdev_priv(dev);
0369
0370 if (netif_is_bridge_master(dev)) {
0371 struct sparx5 *sparx5 =
0372 container_of(nb, struct sparx5,
0373 switchdev_blocking_nb);
0374
0375
0376 sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
0377 v->vid);
0378 return 0;
0379 }
0380
0381 if (!sparx5_netdevice_check(dev))
0382 return -EOPNOTSUPP;
0383
0384 return sparx5_vlan_vid_add(port, v->vid,
0385 v->flags & BRIDGE_VLAN_INFO_PVID,
0386 v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
0387 }
0388
0389 static int sparx5_handle_port_mdb_add(struct net_device *dev,
0390 struct notifier_block *nb,
0391 const struct switchdev_obj_port_mdb *v)
0392 {
0393 struct sparx5_port *port = netdev_priv(dev);
0394 struct sparx5 *spx5 = port->sparx5;
0395 u16 pgid_idx, vid;
0396 u32 mact_entry;
0397 bool is_host;
0398 int res, err;
0399
0400 if (!sparx5_netdevice_check(dev))
0401 return -EOPNOTSUPP;
0402
0403 is_host = netif_is_bridge_master(v->obj.orig_dev);
0404
0405
0406
0407
0408 if (!br_vlan_enabled(spx5->hw_bridge_dev))
0409 vid = 1;
0410 else
0411 vid = v->vid;
0412
0413 res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
0414
0415 if (res == 0) {
0416 pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
0417
0418
0419 pgid_idx += SPX5_PORTS;
0420
0421 if (is_host)
0422 spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
0423 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
0424 ANA_AC_PGID_MISC_CFG(pgid_idx));
0425 else
0426 sparx5_pgid_update_mask(port, pgid_idx, true);
0427
0428 } else {
0429 err = sparx5_pgid_alloc_mcast(spx5, &pgid_idx);
0430 if (err) {
0431 netdev_warn(dev, "multicast pgid table full\n");
0432 return err;
0433 }
0434
0435 if (is_host)
0436 spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(1),
0437 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
0438 ANA_AC_PGID_MISC_CFG(pgid_idx));
0439 else
0440 sparx5_pgid_update_mask(port, pgid_idx, true);
0441
0442 err = sparx5_mact_learn(spx5, pgid_idx, v->addr, vid);
0443
0444 if (err) {
0445 netdev_warn(dev, "could not learn mac address %pM\n", v->addr);
0446 sparx5_pgid_free(spx5, pgid_idx);
0447 sparx5_pgid_update_mask(port, pgid_idx, false);
0448 return err;
0449 }
0450 }
0451
0452 return 0;
0453 }
0454
0455 static int sparx5_mdb_del_entry(struct net_device *dev,
0456 struct sparx5 *spx5,
0457 const unsigned char mac[ETH_ALEN],
0458 const u16 vid,
0459 u16 pgid_idx)
0460 {
0461 int err;
0462
0463 err = sparx5_mact_forget(spx5, mac, vid);
0464 if (err) {
0465 netdev_warn(dev, "could not forget mac address %pM", mac);
0466 return err;
0467 }
0468 err = sparx5_pgid_free(spx5, pgid_idx);
0469 if (err) {
0470 netdev_err(dev, "attempted to free already freed pgid\n");
0471 return err;
0472 }
0473 return 0;
0474 }
0475
0476 static int sparx5_handle_port_mdb_del(struct net_device *dev,
0477 struct notifier_block *nb,
0478 const struct switchdev_obj_port_mdb *v)
0479 {
0480 struct sparx5_port *port = netdev_priv(dev);
0481 struct sparx5 *spx5 = port->sparx5;
0482 u16 pgid_idx, vid;
0483 u32 mact_entry, res, pgid_entry[3], misc_cfg;
0484 bool host_ena;
0485
0486 if (!sparx5_netdevice_check(dev))
0487 return -EOPNOTSUPP;
0488
0489 if (!br_vlan_enabled(spx5->hw_bridge_dev))
0490 vid = 1;
0491 else
0492 vid = v->vid;
0493
0494 res = sparx5_mact_find(spx5, v->addr, vid, &mact_entry);
0495
0496 if (res == 0) {
0497 pgid_idx = LRN_MAC_ACCESS_CFG_2_MAC_ENTRY_ADDR_GET(mact_entry);
0498
0499
0500 pgid_idx += SPX5_PORTS;
0501
0502 if (netif_is_bridge_master(v->obj.orig_dev))
0503 spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(0),
0504 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
0505 ANA_AC_PGID_MISC_CFG(pgid_idx));
0506 else
0507 sparx5_pgid_update_mask(port, pgid_idx, false);
0508
0509 misc_cfg = spx5_rd(spx5, ANA_AC_PGID_MISC_CFG(pgid_idx));
0510 host_ena = ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_GET(misc_cfg);
0511
0512 sparx5_pgid_read_mask(spx5, pgid_idx, pgid_entry);
0513 if (bitmap_empty((unsigned long *)pgid_entry, SPX5_PORTS) && !host_ena)
0514
0515 return sparx5_mdb_del_entry(dev, spx5, v->addr, vid, pgid_idx);
0516 }
0517
0518 return 0;
0519 }
0520
0521 static int sparx5_handle_port_obj_add(struct net_device *dev,
0522 struct notifier_block *nb,
0523 struct switchdev_notifier_port_obj_info *info)
0524 {
0525 const struct switchdev_obj *obj = info->obj;
0526 int err;
0527
0528 switch (obj->id) {
0529 case SWITCHDEV_OBJ_ID_PORT_VLAN:
0530 err = sparx5_handle_port_vlan_add(dev, nb,
0531 SWITCHDEV_OBJ_PORT_VLAN(obj));
0532 break;
0533 case SWITCHDEV_OBJ_ID_PORT_MDB:
0534 case SWITCHDEV_OBJ_ID_HOST_MDB:
0535 err = sparx5_handle_port_mdb_add(dev, nb,
0536 SWITCHDEV_OBJ_PORT_MDB(obj));
0537 break;
0538 default:
0539 err = -EOPNOTSUPP;
0540 break;
0541 }
0542
0543 info->handled = true;
0544 return err;
0545 }
0546
0547 static int sparx5_handle_port_vlan_del(struct net_device *dev,
0548 struct notifier_block *nb,
0549 u16 vid)
0550 {
0551 struct sparx5_port *port = netdev_priv(dev);
0552 int ret;
0553
0554
0555 if (netif_is_bridge_master(dev)) {
0556 struct sparx5 *sparx5 =
0557 container_of(nb, struct sparx5,
0558 switchdev_blocking_nb);
0559
0560 sparx5_mact_forget(sparx5, dev->broadcast, vid);
0561 return 0;
0562 }
0563
0564 if (!sparx5_netdevice_check(dev))
0565 return -EOPNOTSUPP;
0566
0567 ret = sparx5_vlan_vid_del(port, vid);
0568 if (ret)
0569 return ret;
0570
0571 return 0;
0572 }
0573
0574 static int sparx5_handle_port_obj_del(struct net_device *dev,
0575 struct notifier_block *nb,
0576 struct switchdev_notifier_port_obj_info *info)
0577 {
0578 const struct switchdev_obj *obj = info->obj;
0579 int err;
0580
0581 switch (obj->id) {
0582 case SWITCHDEV_OBJ_ID_PORT_VLAN:
0583 err = sparx5_handle_port_vlan_del(dev, nb,
0584 SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
0585 break;
0586 case SWITCHDEV_OBJ_ID_PORT_MDB:
0587 case SWITCHDEV_OBJ_ID_HOST_MDB:
0588 err = sparx5_handle_port_mdb_del(dev, nb,
0589 SWITCHDEV_OBJ_PORT_MDB(obj));
0590 break;
0591 default:
0592 err = -EOPNOTSUPP;
0593 break;
0594 }
0595
0596 info->handled = true;
0597 return err;
0598 }
0599
0600 static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
0601 unsigned long event,
0602 void *ptr)
0603 {
0604 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
0605 int err;
0606
0607 switch (event) {
0608 case SWITCHDEV_PORT_OBJ_ADD:
0609 err = sparx5_handle_port_obj_add(dev, nb, ptr);
0610 return notifier_from_errno(err);
0611 case SWITCHDEV_PORT_OBJ_DEL:
0612 err = sparx5_handle_port_obj_del(dev, nb, ptr);
0613 return notifier_from_errno(err);
0614 case SWITCHDEV_PORT_ATTR_SET:
0615 err = switchdev_handle_port_attr_set(dev, ptr,
0616 sparx5_netdevice_check,
0617 sparx5_port_attr_set);
0618 return notifier_from_errno(err);
0619 }
0620
0621 return NOTIFY_DONE;
0622 }
0623
0624 int sparx5_register_notifier_blocks(struct sparx5 *s5)
0625 {
0626 int err;
0627
0628 s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
0629 err = register_netdevice_notifier(&s5->netdevice_nb);
0630 if (err)
0631 return err;
0632
0633 s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
0634 err = register_switchdev_notifier(&s5->switchdev_nb);
0635 if (err)
0636 goto err_switchdev_nb;
0637
0638 s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
0639 err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
0640 if (err)
0641 goto err_switchdev_blocking_nb;
0642
0643 sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
0644 if (!sparx5_owq) {
0645 err = -ENOMEM;
0646 goto err_switchdev_blocking_nb;
0647 }
0648
0649 return 0;
0650
0651 err_switchdev_blocking_nb:
0652 unregister_switchdev_notifier(&s5->switchdev_nb);
0653 err_switchdev_nb:
0654 unregister_netdevice_notifier(&s5->netdevice_nb);
0655
0656 return err;
0657 }
0658
0659 void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
0660 {
0661 destroy_workqueue(sparx5_owq);
0662
0663 unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
0664 unregister_switchdev_notifier(&s5->switchdev_nb);
0665 unregister_netdevice_notifier(&s5->netdevice_nb);
0666 }