0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 #include "core_priv.h"
0034
0035 #include <linux/in.h>
0036 #include <linux/in6.h>
0037
0038
0039 #include <net/addrconf.h>
0040 #include <net/bonding.h>
0041
0042 #include <rdma/ib_cache.h>
0043 #include <rdma/ib_addr.h>
0044
0045 static struct workqueue_struct *gid_cache_wq;
0046
0047 enum gid_op_type {
0048 GID_DEL = 0,
0049 GID_ADD
0050 };
0051
0052 struct update_gid_event_work {
0053 struct work_struct work;
0054 union ib_gid gid;
0055 struct ib_gid_attr gid_attr;
0056 enum gid_op_type gid_op;
0057 };
0058
0059 #define ROCE_NETDEV_CALLBACK_SZ 3
0060 struct netdev_event_work_cmd {
0061 roce_netdev_callback cb;
0062 roce_netdev_filter filter;
0063 struct net_device *ndev;
0064 struct net_device *filter_ndev;
0065 };
0066
0067 struct netdev_event_work {
0068 struct work_struct work;
0069 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ];
0070 };
0071
0072 static const struct {
0073 bool (*is_supported)(const struct ib_device *device, u32 port_num);
0074 enum ib_gid_type gid_type;
0075 } PORT_CAP_TO_GID_TYPE[] = {
0076 {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
0077 {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
0078 };
0079
0080 #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
0081
0082 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u32 port)
0083 {
0084 int i;
0085 unsigned int ret_flags = 0;
0086
0087 if (!rdma_protocol_roce(ib_dev, port))
0088 return 1UL << IB_GID_TYPE_IB;
0089
0090 for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
0091 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
0092 ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
0093
0094 return ret_flags;
0095 }
0096 EXPORT_SYMBOL(roce_gid_type_mask_support);
0097
0098 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
0099 u32 port, union ib_gid *gid,
0100 struct ib_gid_attr *gid_attr)
0101 {
0102 int i;
0103 unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
0104
0105 for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
0106 if ((1UL << i) & gid_type_mask) {
0107 gid_attr->gid_type = i;
0108 switch (gid_op) {
0109 case GID_ADD:
0110 ib_cache_gid_add(ib_dev, port,
0111 gid, gid_attr);
0112 break;
0113 case GID_DEL:
0114 ib_cache_gid_del(ib_dev, port,
0115 gid, gid_attr);
0116 break;
0117 }
0118 }
0119 }
0120 }
0121
0122 enum bonding_slave_state {
0123 BONDING_SLAVE_STATE_ACTIVE = 1UL << 0,
0124 BONDING_SLAVE_STATE_INACTIVE = 1UL << 1,
0125
0126 BONDING_SLAVE_STATE_NA = 1UL << 2,
0127 };
0128
0129 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
0130 struct net_device *upper)
0131 {
0132 if (upper && netif_is_bond_master(upper)) {
0133 struct net_device *pdev =
0134 bond_option_active_slave_get_rcu(netdev_priv(upper));
0135
0136 if (pdev)
0137 return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
0138 BONDING_SLAVE_STATE_INACTIVE;
0139 }
0140
0141 return BONDING_SLAVE_STATE_NA;
0142 }
0143
0144 #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
0145 BONDING_SLAVE_STATE_NA)
0146 static bool
0147 is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u32 port,
0148 struct net_device *rdma_ndev, void *cookie)
0149 {
0150 struct net_device *real_dev;
0151 bool res;
0152
0153 if (!rdma_ndev)
0154 return false;
0155
0156 rcu_read_lock();
0157 real_dev = rdma_vlan_dev_real_dev(cookie);
0158 if (!real_dev)
0159 real_dev = cookie;
0160
0161 res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
0162 (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
0163 REQUIRED_BOND_STATES)) ||
0164 real_dev == rdma_ndev);
0165
0166 rcu_read_unlock();
0167 return res;
0168 }
0169
0170 static bool
0171 is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u32 port,
0172 struct net_device *rdma_ndev, void *cookie)
0173 {
0174 struct net_device *master_dev;
0175 bool res;
0176
0177 if (!rdma_ndev)
0178 return false;
0179
0180 rcu_read_lock();
0181 master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
0182 res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
0183 BONDING_SLAVE_STATE_INACTIVE;
0184 rcu_read_unlock();
0185
0186 return res;
0187 }
0188
0189
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200 static bool
0201 is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u32 port,
0202 struct net_device *rdma_ndev, void *cookie)
0203 {
0204 struct net_device *cookie_ndev = cookie;
0205 bool res;
0206
0207 if (!rdma_ndev)
0208 return false;
0209
0210 rcu_read_lock();
0211
0212
0213
0214
0215
0216
0217
0218
0219 res = ((cookie_ndev == rdma_ndev && !netif_is_bond_slave(rdma_ndev)) ||
0220 (netif_is_bond_master(cookie_ndev) &&
0221 rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)));
0222
0223 rcu_read_unlock();
0224 return res;
0225 }
0226
0227 static bool pass_all_filter(struct ib_device *ib_dev, u32 port,
0228 struct net_device *rdma_ndev, void *cookie)
0229 {
0230 return true;
0231 }
0232
0233 static bool upper_device_filter(struct ib_device *ib_dev, u32 port,
0234 struct net_device *rdma_ndev, void *cookie)
0235 {
0236 bool res;
0237
0238 if (!rdma_ndev)
0239 return false;
0240
0241 if (rdma_ndev == cookie)
0242 return true;
0243
0244 rcu_read_lock();
0245 res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
0246 rcu_read_unlock();
0247
0248 return res;
0249 }
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259
0260
0261
0262
0263 static bool
0264 is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u32 port,
0265 struct net_device *rdma_ndev,
0266 void *cookie)
0267 {
0268 struct net_device *cookie_ndev = cookie;
0269 bool match = false;
0270
0271 if (!rdma_ndev)
0272 return false;
0273
0274 rcu_read_lock();
0275 if (netif_is_bond_master(cookie_ndev) &&
0276 rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
0277 match = true;
0278 rcu_read_unlock();
0279 return match;
0280 }
0281
0282 static void update_gid_ip(enum gid_op_type gid_op,
0283 struct ib_device *ib_dev,
0284 u32 port, struct net_device *ndev,
0285 struct sockaddr *addr)
0286 {
0287 union ib_gid gid;
0288 struct ib_gid_attr gid_attr;
0289
0290 rdma_ip2gid(addr, &gid);
0291 memset(&gid_attr, 0, sizeof(gid_attr));
0292 gid_attr.ndev = ndev;
0293
0294 update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
0295 }
0296
0297 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
0298 u32 port,
0299 struct net_device *rdma_ndev,
0300 struct net_device *event_ndev)
0301 {
0302 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
0303 unsigned long gid_type_mask;
0304
0305 if (!rdma_ndev)
0306 return;
0307
0308 if (!real_dev)
0309 real_dev = event_ndev;
0310
0311 rcu_read_lock();
0312
0313 if (((rdma_ndev != event_ndev &&
0314 !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
0315 is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev)
0316 ==
0317 BONDING_SLAVE_STATE_INACTIVE)) {
0318 rcu_read_unlock();
0319 return;
0320 }
0321
0322 rcu_read_unlock();
0323
0324 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
0325
0326 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
0327 gid_type_mask,
0328 IB_CACHE_GID_DEFAULT_MODE_DELETE);
0329 }
0330
0331 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
0332 u32 port, struct net_device *ndev)
0333 {
0334 const struct in_ifaddr *ifa;
0335 struct in_device *in_dev;
0336 struct sin_list {
0337 struct list_head list;
0338 struct sockaddr_in ip;
0339 };
0340 struct sin_list *sin_iter;
0341 struct sin_list *sin_temp;
0342
0343 LIST_HEAD(sin_list);
0344 if (ndev->reg_state >= NETREG_UNREGISTERING)
0345 return;
0346
0347 rcu_read_lock();
0348 in_dev = __in_dev_get_rcu(ndev);
0349 if (!in_dev) {
0350 rcu_read_unlock();
0351 return;
0352 }
0353
0354 in_dev_for_each_ifa_rcu(ifa, in_dev) {
0355 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
0356
0357 if (!entry)
0358 continue;
0359
0360 entry->ip.sin_family = AF_INET;
0361 entry->ip.sin_addr.s_addr = ifa->ifa_address;
0362 list_add_tail(&entry->list, &sin_list);
0363 }
0364
0365 rcu_read_unlock();
0366
0367 list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
0368 update_gid_ip(GID_ADD, ib_dev, port, ndev,
0369 (struct sockaddr *)&sin_iter->ip);
0370 list_del(&sin_iter->list);
0371 kfree(sin_iter);
0372 }
0373 }
0374
0375 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
0376 u32 port, struct net_device *ndev)
0377 {
0378 struct inet6_ifaddr *ifp;
0379 struct inet6_dev *in6_dev;
0380 struct sin6_list {
0381 struct list_head list;
0382 struct sockaddr_in6 sin6;
0383 };
0384 struct sin6_list *sin6_iter;
0385 struct sin6_list *sin6_temp;
0386 struct ib_gid_attr gid_attr = {.ndev = ndev};
0387 LIST_HEAD(sin6_list);
0388
0389 if (ndev->reg_state >= NETREG_UNREGISTERING)
0390 return;
0391
0392 in6_dev = in6_dev_get(ndev);
0393 if (!in6_dev)
0394 return;
0395
0396 read_lock_bh(&in6_dev->lock);
0397 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
0398 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
0399
0400 if (!entry)
0401 continue;
0402
0403 entry->sin6.sin6_family = AF_INET6;
0404 entry->sin6.sin6_addr = ifp->addr;
0405 list_add_tail(&entry->list, &sin6_list);
0406 }
0407 read_unlock_bh(&in6_dev->lock);
0408
0409 in6_dev_put(in6_dev);
0410
0411 list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
0412 union ib_gid gid;
0413
0414 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
0415 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
0416 list_del(&sin6_iter->list);
0417 kfree(sin6_iter);
0418 }
0419 }
0420
0421 static void _add_netdev_ips(struct ib_device *ib_dev, u32 port,
0422 struct net_device *ndev)
0423 {
0424 enum_netdev_ipv4_ips(ib_dev, port, ndev);
0425 if (IS_ENABLED(CONFIG_IPV6))
0426 enum_netdev_ipv6_ips(ib_dev, port, ndev);
0427 }
0428
0429 static void add_netdev_ips(struct ib_device *ib_dev, u32 port,
0430 struct net_device *rdma_ndev, void *cookie)
0431 {
0432 _add_netdev_ips(ib_dev, port, cookie);
0433 }
0434
0435 static void del_netdev_ips(struct ib_device *ib_dev, u32 port,
0436 struct net_device *rdma_ndev, void *cookie)
0437 {
0438 ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
0439 }
0440
0441
0442
0443
0444
0445
0446
0447
0448
0449
0450 static void del_default_gids(struct ib_device *ib_dev, u32 port,
0451 struct net_device *rdma_ndev, void *cookie)
0452 {
0453 struct net_device *cookie_ndev = cookie;
0454 unsigned long gid_type_mask;
0455
0456 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
0457
0458 ib_cache_gid_set_default_gid(ib_dev, port, cookie_ndev, gid_type_mask,
0459 IB_CACHE_GID_DEFAULT_MODE_DELETE);
0460 }
0461
0462 static void add_default_gids(struct ib_device *ib_dev, u32 port,
0463 struct net_device *rdma_ndev, void *cookie)
0464 {
0465 struct net_device *event_ndev = cookie;
0466 unsigned long gid_type_mask;
0467
0468 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
0469 ib_cache_gid_set_default_gid(ib_dev, port, event_ndev, gid_type_mask,
0470 IB_CACHE_GID_DEFAULT_MODE_SET);
0471 }
0472
0473 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
0474 u32 port,
0475 struct net_device *rdma_ndev,
0476 void *cookie)
0477 {
0478 struct net *net;
0479 struct net_device *ndev;
0480
0481
0482
0483
0484 rtnl_lock();
0485 down_read(&net_rwsem);
0486 for_each_net(net)
0487 for_each_netdev(net, ndev) {
0488
0489
0490
0491
0492
0493 if (is_ndev_for_default_gid_filter(ib_dev, port,
0494 rdma_ndev, ndev))
0495 add_default_gids(ib_dev, port, rdma_ndev, ndev);
0496
0497 if (is_eth_port_of_netdev_filter(ib_dev, port,
0498 rdma_ndev, ndev))
0499 _add_netdev_ips(ib_dev, port, ndev);
0500 }
0501 up_read(&net_rwsem);
0502 rtnl_unlock();
0503 }
0504
0505
0506
0507
0508
0509
0510
0511 void rdma_roce_rescan_device(struct ib_device *ib_dev)
0512 {
0513 ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
0514 enum_all_gids_of_dev_cb, NULL);
0515 }
0516 EXPORT_SYMBOL(rdma_roce_rescan_device);
0517
0518 static void callback_for_addr_gid_device_scan(struct ib_device *device,
0519 u32 port,
0520 struct net_device *rdma_ndev,
0521 void *cookie)
0522 {
0523 struct update_gid_event_work *parsed = cookie;
0524
0525 return update_gid(parsed->gid_op, device,
0526 port, &parsed->gid,
0527 &parsed->gid_attr);
0528 }
0529
0530 struct upper_list {
0531 struct list_head list;
0532 struct net_device *upper;
0533 };
0534
0535 static int netdev_upper_walk(struct net_device *upper,
0536 struct netdev_nested_priv *priv)
0537 {
0538 struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
0539 struct list_head *upper_list = (struct list_head *)priv->data;
0540
0541 if (!entry)
0542 return 0;
0543
0544 list_add_tail(&entry->list, upper_list);
0545 dev_hold(upper);
0546 entry->upper = upper;
0547
0548 return 0;
0549 }
0550
0551 static void handle_netdev_upper(struct ib_device *ib_dev, u32 port,
0552 void *cookie,
0553 void (*handle_netdev)(struct ib_device *ib_dev,
0554 u32 port,
0555 struct net_device *ndev))
0556 {
0557 struct net_device *ndev = cookie;
0558 struct netdev_nested_priv priv;
0559 struct upper_list *upper_iter;
0560 struct upper_list *upper_temp;
0561 LIST_HEAD(upper_list);
0562
0563 priv.data = &upper_list;
0564 rcu_read_lock();
0565 netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &priv);
0566 rcu_read_unlock();
0567
0568 handle_netdev(ib_dev, port, ndev);
0569 list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
0570 list) {
0571 handle_netdev(ib_dev, port, upper_iter->upper);
0572 dev_put(upper_iter->upper);
0573 list_del(&upper_iter->list);
0574 kfree(upper_iter);
0575 }
0576 }
0577
0578 static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u32 port,
0579 struct net_device *event_ndev)
0580 {
0581 ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
0582 }
0583
0584 static void del_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
0585 struct net_device *rdma_ndev, void *cookie)
0586 {
0587 handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
0588 }
0589
0590 static void add_netdev_upper_ips(struct ib_device *ib_dev, u32 port,
0591 struct net_device *rdma_ndev, void *cookie)
0592 {
0593 handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
0594 }
0595
0596 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u32 port,
0597 struct net_device *rdma_ndev,
0598 void *cookie)
0599 {
0600 struct net_device *master_ndev;
0601
0602 rcu_read_lock();
0603 master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
0604 if (master_ndev)
0605 dev_hold(master_ndev);
0606 rcu_read_unlock();
0607
0608 if (master_ndev) {
0609 bond_delete_netdev_default_gids(ib_dev, port, rdma_ndev,
0610 master_ndev);
0611 dev_put(master_ndev);
0612 }
0613 }
0614
0615
0616
0617
0618
0619
0620 static void netdevice_event_work_handler(struct work_struct *_work)
0621 {
0622 struct netdev_event_work *work =
0623 container_of(_work, struct netdev_event_work, work);
0624 unsigned int i;
0625
0626 for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
0627 ib_enum_all_roce_netdevs(work->cmds[i].filter,
0628 work->cmds[i].filter_ndev,
0629 work->cmds[i].cb,
0630 work->cmds[i].ndev);
0631 dev_put(work->cmds[i].ndev);
0632 dev_put(work->cmds[i].filter_ndev);
0633 }
0634
0635 kfree(work);
0636 }
0637
0638 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
0639 struct net_device *ndev)
0640 {
0641 unsigned int i;
0642 struct netdev_event_work *ndev_work =
0643 kmalloc(sizeof(*ndev_work), GFP_KERNEL);
0644
0645 if (!ndev_work)
0646 return NOTIFY_DONE;
0647
0648 memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
0649 for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
0650 if (!ndev_work->cmds[i].ndev)
0651 ndev_work->cmds[i].ndev = ndev;
0652 if (!ndev_work->cmds[i].filter_ndev)
0653 ndev_work->cmds[i].filter_ndev = ndev;
0654 dev_hold(ndev_work->cmds[i].ndev);
0655 dev_hold(ndev_work->cmds[i].filter_ndev);
0656 }
0657 INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
0658
0659 queue_work(gid_cache_wq, &ndev_work->work);
0660
0661 return NOTIFY_DONE;
0662 }
0663
0664 static const struct netdev_event_work_cmd add_cmd = {
0665 .cb = add_netdev_ips,
0666 .filter = is_eth_port_of_netdev_filter
0667 };
0668
0669 static const struct netdev_event_work_cmd add_cmd_upper_ips = {
0670 .cb = add_netdev_upper_ips,
0671 .filter = is_eth_port_of_netdev_filter
0672 };
0673
0674 static void
0675 ndev_event_unlink(struct netdev_notifier_changeupper_info *changeupper_info,
0676 struct netdev_event_work_cmd *cmds)
0677 {
0678 static const struct netdev_event_work_cmd
0679 upper_ips_del_cmd = {
0680 .cb = del_netdev_upper_ips,
0681 .filter = upper_device_filter
0682 };
0683
0684 cmds[0] = upper_ips_del_cmd;
0685 cmds[0].ndev = changeupper_info->upper_dev;
0686 cmds[1] = add_cmd;
0687 }
0688
0689 static const struct netdev_event_work_cmd bonding_default_add_cmd = {
0690 .cb = add_default_gids,
0691 .filter = is_upper_ndev_bond_master_filter
0692 };
0693
0694 static void
0695 ndev_event_link(struct net_device *event_ndev,
0696 struct netdev_notifier_changeupper_info *changeupper_info,
0697 struct netdev_event_work_cmd *cmds)
0698 {
0699 static const struct netdev_event_work_cmd
0700 bonding_default_del_cmd = {
0701 .cb = del_default_gids,
0702 .filter = is_upper_ndev_bond_master_filter
0703 };
0704
0705
0706
0707
0708 cmds[0] = bonding_default_del_cmd;
0709 cmds[0].ndev = event_ndev;
0710 cmds[0].filter_ndev = changeupper_info->upper_dev;
0711
0712
0713 cmds[1] = bonding_default_add_cmd;
0714 cmds[1].ndev = changeupper_info->upper_dev;
0715 cmds[1].filter_ndev = changeupper_info->upper_dev;
0716
0717
0718 cmds[2] = add_cmd_upper_ips;
0719 cmds[2].ndev = changeupper_info->upper_dev;
0720 cmds[2].filter_ndev = changeupper_info->upper_dev;
0721 }
0722
0723 static void netdevice_event_changeupper(struct net_device *event_ndev,
0724 struct netdev_notifier_changeupper_info *changeupper_info,
0725 struct netdev_event_work_cmd *cmds)
0726 {
0727 if (changeupper_info->linking)
0728 ndev_event_link(event_ndev, changeupper_info, cmds);
0729 else
0730 ndev_event_unlink(changeupper_info, cmds);
0731 }
0732
0733 static const struct netdev_event_work_cmd add_default_gid_cmd = {
0734 .cb = add_default_gids,
0735 .filter = is_ndev_for_default_gid_filter,
0736 };
0737
0738 static int netdevice_event(struct notifier_block *this, unsigned long event,
0739 void *ptr)
0740 {
0741 static const struct netdev_event_work_cmd del_cmd = {
0742 .cb = del_netdev_ips, .filter = pass_all_filter};
0743 static const struct netdev_event_work_cmd
0744 bonding_default_del_cmd_join = {
0745 .cb = del_netdev_default_ips_join,
0746 .filter = is_eth_port_inactive_slave_filter
0747 };
0748 static const struct netdev_event_work_cmd
0749 netdev_del_cmd = {
0750 .cb = del_netdev_ips,
0751 .filter = is_eth_port_of_netdev_filter
0752 };
0753 static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
0754 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
0755 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
0756 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
0757
0758 if (ndev->type != ARPHRD_ETHER)
0759 return NOTIFY_DONE;
0760
0761 switch (event) {
0762 case NETDEV_REGISTER:
0763 case NETDEV_UP:
0764 cmds[0] = bonding_default_del_cmd_join;
0765 cmds[1] = add_default_gid_cmd;
0766 cmds[2] = add_cmd;
0767 break;
0768
0769 case NETDEV_UNREGISTER:
0770 if (ndev->reg_state < NETREG_UNREGISTERED)
0771 cmds[0] = del_cmd;
0772 else
0773 return NOTIFY_DONE;
0774 break;
0775
0776 case NETDEV_CHANGEADDR:
0777 cmds[0] = netdev_del_cmd;
0778 if (ndev->reg_state == NETREG_REGISTERED) {
0779 cmds[1] = add_default_gid_cmd;
0780 cmds[2] = add_cmd;
0781 }
0782 break;
0783
0784 case NETDEV_CHANGEUPPER:
0785 netdevice_event_changeupper(ndev,
0786 container_of(ptr, struct netdev_notifier_changeupper_info, info),
0787 cmds);
0788 break;
0789
0790 case NETDEV_BONDING_FAILOVER:
0791 cmds[0] = bonding_event_ips_del_cmd;
0792
0793 cmds[1] = bonding_default_add_cmd;
0794
0795 cmds[2] = add_cmd_upper_ips;
0796 break;
0797
0798 default:
0799 return NOTIFY_DONE;
0800 }
0801
0802 return netdevice_queue_work(cmds, ndev);
0803 }
0804
0805 static void update_gid_event_work_handler(struct work_struct *_work)
0806 {
0807 struct update_gid_event_work *work =
0808 container_of(_work, struct update_gid_event_work, work);
0809
0810 ib_enum_all_roce_netdevs(is_eth_port_of_netdev_filter,
0811 work->gid_attr.ndev,
0812 callback_for_addr_gid_device_scan, work);
0813
0814 dev_put(work->gid_attr.ndev);
0815 kfree(work);
0816 }
0817
0818 static int addr_event(struct notifier_block *this, unsigned long event,
0819 struct sockaddr *sa, struct net_device *ndev)
0820 {
0821 struct update_gid_event_work *work;
0822 enum gid_op_type gid_op;
0823
0824 if (ndev->type != ARPHRD_ETHER)
0825 return NOTIFY_DONE;
0826
0827 switch (event) {
0828 case NETDEV_UP:
0829 gid_op = GID_ADD;
0830 break;
0831
0832 case NETDEV_DOWN:
0833 gid_op = GID_DEL;
0834 break;
0835
0836 default:
0837 return NOTIFY_DONE;
0838 }
0839
0840 work = kmalloc(sizeof(*work), GFP_ATOMIC);
0841 if (!work)
0842 return NOTIFY_DONE;
0843
0844 INIT_WORK(&work->work, update_gid_event_work_handler);
0845
0846 rdma_ip2gid(sa, &work->gid);
0847 work->gid_op = gid_op;
0848
0849 memset(&work->gid_attr, 0, sizeof(work->gid_attr));
0850 dev_hold(ndev);
0851 work->gid_attr.ndev = ndev;
0852
0853 queue_work(gid_cache_wq, &work->work);
0854
0855 return NOTIFY_DONE;
0856 }
0857
0858 static int inetaddr_event(struct notifier_block *this, unsigned long event,
0859 void *ptr)
0860 {
0861 struct sockaddr_in in;
0862 struct net_device *ndev;
0863 struct in_ifaddr *ifa = ptr;
0864
0865 in.sin_family = AF_INET;
0866 in.sin_addr.s_addr = ifa->ifa_address;
0867 ndev = ifa->ifa_dev->dev;
0868
0869 return addr_event(this, event, (struct sockaddr *)&in, ndev);
0870 }
0871
0872 static int inet6addr_event(struct notifier_block *this, unsigned long event,
0873 void *ptr)
0874 {
0875 struct sockaddr_in6 in6;
0876 struct net_device *ndev;
0877 struct inet6_ifaddr *ifa6 = ptr;
0878
0879 in6.sin6_family = AF_INET6;
0880 in6.sin6_addr = ifa6->addr;
0881 ndev = ifa6->idev->dev;
0882
0883 return addr_event(this, event, (struct sockaddr *)&in6, ndev);
0884 }
0885
0886 static struct notifier_block nb_netdevice = {
0887 .notifier_call = netdevice_event
0888 };
0889
0890 static struct notifier_block nb_inetaddr = {
0891 .notifier_call = inetaddr_event
0892 };
0893
0894 static struct notifier_block nb_inet6addr = {
0895 .notifier_call = inet6addr_event
0896 };
0897
0898 int __init roce_gid_mgmt_init(void)
0899 {
0900 gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
0901 if (!gid_cache_wq)
0902 return -ENOMEM;
0903
0904 register_inetaddr_notifier(&nb_inetaddr);
0905 if (IS_ENABLED(CONFIG_IPV6))
0906 register_inet6addr_notifier(&nb_inet6addr);
0907
0908
0909
0910
0911
0912 register_netdevice_notifier(&nb_netdevice);
0913
0914 return 0;
0915 }
0916
0917 void __exit roce_gid_mgmt_cleanup(void)
0918 {
0919 if (IS_ENABLED(CONFIG_IPV6))
0920 unregister_inet6addr_notifier(&nb_inet6addr);
0921 unregister_inetaddr_notifier(&nb_inetaddr);
0922 unregister_netdevice_notifier(&nb_netdevice);
0923
0924
0925
0926
0927
0928 destroy_workqueue(gid_cache_wq);
0929 }