0001
0002
0003
0004 #include <linux/ethtool_netlink.h>
0005 #include <linux/netdevice.h>
0006 #include <linux/slab.h>
0007 #include <linux/types.h>
0008 #include <linux/workqueue.h>
0009 #include <net/udp_tunnel.h>
0010 #include <net/vxlan.h>
0011
0012 enum udp_tunnel_nic_table_entry_flags {
0013 UDP_TUNNEL_NIC_ENTRY_ADD = BIT(0),
0014 UDP_TUNNEL_NIC_ENTRY_DEL = BIT(1),
0015 UDP_TUNNEL_NIC_ENTRY_OP_FAIL = BIT(2),
0016 UDP_TUNNEL_NIC_ENTRY_FROZEN = BIT(3),
0017 };
0018
0019 struct udp_tunnel_nic_table_entry {
0020 __be16 port;
0021 u8 type;
0022 u8 flags;
0023 u16 use_cnt;
0024 #define UDP_TUNNEL_NIC_USE_CNT_MAX U16_MAX
0025 u8 hw_priv;
0026 };
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037
0038
0039 struct udp_tunnel_nic {
0040 struct work_struct work;
0041
0042 struct net_device *dev;
0043
0044 u8 need_sync:1;
0045 u8 need_replay:1;
0046 u8 work_pending:1;
0047
0048 unsigned int n_tables;
0049 unsigned long missed;
0050 struct udp_tunnel_nic_table_entry **entries;
0051 };
0052
0053
0054
0055
0056 static struct workqueue_struct *udp_tunnel_nic_workqueue;
0057
0058 static const char *udp_tunnel_nic_tunnel_type_name(unsigned int type)
0059 {
0060 switch (type) {
0061 case UDP_TUNNEL_TYPE_VXLAN:
0062 return "vxlan";
0063 case UDP_TUNNEL_TYPE_GENEVE:
0064 return "geneve";
0065 case UDP_TUNNEL_TYPE_VXLAN_GPE:
0066 return "vxlan-gpe";
0067 default:
0068 return "unknown";
0069 }
0070 }
0071
0072 static bool
0073 udp_tunnel_nic_entry_is_free(struct udp_tunnel_nic_table_entry *entry)
0074 {
0075 return entry->use_cnt == 0 && !entry->flags;
0076 }
0077
0078 static bool
0079 udp_tunnel_nic_entry_is_present(struct udp_tunnel_nic_table_entry *entry)
0080 {
0081 return entry->use_cnt && !(entry->flags & ~UDP_TUNNEL_NIC_ENTRY_FROZEN);
0082 }
0083
0084 static bool
0085 udp_tunnel_nic_entry_is_frozen(struct udp_tunnel_nic_table_entry *entry)
0086 {
0087 return entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN;
0088 }
0089
0090 static void
0091 udp_tunnel_nic_entry_freeze_used(struct udp_tunnel_nic_table_entry *entry)
0092 {
0093 if (!udp_tunnel_nic_entry_is_free(entry))
0094 entry->flags |= UDP_TUNNEL_NIC_ENTRY_FROZEN;
0095 }
0096
0097 static void
0098 udp_tunnel_nic_entry_unfreeze(struct udp_tunnel_nic_table_entry *entry)
0099 {
0100 entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_FROZEN;
0101 }
0102
0103 static bool
0104 udp_tunnel_nic_entry_is_queued(struct udp_tunnel_nic_table_entry *entry)
0105 {
0106 return entry->flags & (UDP_TUNNEL_NIC_ENTRY_ADD |
0107 UDP_TUNNEL_NIC_ENTRY_DEL);
0108 }
0109
0110 static void
0111 udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn,
0112 struct udp_tunnel_nic_table_entry *entry,
0113 unsigned int flag)
0114 {
0115 entry->flags |= flag;
0116 utn->need_sync = 1;
0117 }
0118
0119 static void
0120 udp_tunnel_nic_ti_from_entry(struct udp_tunnel_nic_table_entry *entry,
0121 struct udp_tunnel_info *ti)
0122 {
0123 memset(ti, 0, sizeof(*ti));
0124 ti->port = entry->port;
0125 ti->type = entry->type;
0126 ti->hw_priv = entry->hw_priv;
0127 }
0128
0129 static bool
0130 udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn)
0131 {
0132 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0133 unsigned int i, j;
0134
0135 for (i = 0; i < utn->n_tables; i++)
0136 for (j = 0; j < info->tables[i].n_entries; j++)
0137 if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
0138 return false;
0139 return true;
0140 }
0141
0142 static bool
0143 udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
0144 {
0145 const struct udp_tunnel_nic_table_info *table;
0146 unsigned int i, j;
0147
0148 if (!utn->missed)
0149 return false;
0150
0151 for (i = 0; i < utn->n_tables; i++) {
0152 table = &dev->udp_tunnel_nic_info->tables[i];
0153 if (!test_bit(i, &utn->missed))
0154 continue;
0155
0156 for (j = 0; j < table->n_entries; j++)
0157 if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
0158 return true;
0159 }
0160
0161 return false;
0162 }
0163
0164 static void
0165 __udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
0166 unsigned int idx, struct udp_tunnel_info *ti)
0167 {
0168 struct udp_tunnel_nic_table_entry *entry;
0169 struct udp_tunnel_nic *utn;
0170
0171 utn = dev->udp_tunnel_nic;
0172 entry = &utn->entries[table][idx];
0173
0174 if (entry->use_cnt)
0175 udp_tunnel_nic_ti_from_entry(entry, ti);
0176 }
0177
0178 static void
0179 __udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
0180 unsigned int idx, u8 priv)
0181 {
0182 dev->udp_tunnel_nic->entries[table][idx].hw_priv = priv;
0183 }
0184
0185 static void
0186 udp_tunnel_nic_entry_update_done(struct udp_tunnel_nic_table_entry *entry,
0187 int err)
0188 {
0189 bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
0190
0191 WARN_ON_ONCE(entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
0192 entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL);
0193
0194 if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD &&
0195 (!err || (err == -EEXIST && dodgy)))
0196 entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_ADD;
0197
0198 if (entry->flags & UDP_TUNNEL_NIC_ENTRY_DEL &&
0199 (!err || (err == -ENOENT && dodgy)))
0200 entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_DEL;
0201
0202 if (!err)
0203 entry->flags &= ~UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
0204 else
0205 entry->flags |= UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
0206 }
0207
0208 static void
0209 udp_tunnel_nic_device_sync_one(struct net_device *dev,
0210 struct udp_tunnel_nic *utn,
0211 unsigned int table, unsigned int idx)
0212 {
0213 struct udp_tunnel_nic_table_entry *entry;
0214 struct udp_tunnel_info ti;
0215 int err;
0216
0217 entry = &utn->entries[table][idx];
0218 if (!udp_tunnel_nic_entry_is_queued(entry))
0219 return;
0220
0221 udp_tunnel_nic_ti_from_entry(entry, &ti);
0222 if (entry->flags & UDP_TUNNEL_NIC_ENTRY_ADD)
0223 err = dev->udp_tunnel_nic_info->set_port(dev, table, idx, &ti);
0224 else
0225 err = dev->udp_tunnel_nic_info->unset_port(dev, table, idx,
0226 &ti);
0227 udp_tunnel_nic_entry_update_done(entry, err);
0228
0229 if (err)
0230 netdev_warn(dev,
0231 "UDP tunnel port sync failed port %d type %s: %d\n",
0232 be16_to_cpu(entry->port),
0233 udp_tunnel_nic_tunnel_type_name(entry->type),
0234 err);
0235 }
0236
0237 static void
0238 udp_tunnel_nic_device_sync_by_port(struct net_device *dev,
0239 struct udp_tunnel_nic *utn)
0240 {
0241 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0242 unsigned int i, j;
0243
0244 for (i = 0; i < utn->n_tables; i++)
0245 for (j = 0; j < info->tables[i].n_entries; j++)
0246 udp_tunnel_nic_device_sync_one(dev, utn, i, j);
0247 }
0248
0249 static void
0250 udp_tunnel_nic_device_sync_by_table(struct net_device *dev,
0251 struct udp_tunnel_nic *utn)
0252 {
0253 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0254 unsigned int i, j;
0255 int err;
0256
0257 for (i = 0; i < utn->n_tables; i++) {
0258
0259 for (j = 0; j < info->tables[i].n_entries; j++)
0260 if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j]))
0261 break;
0262 if (j == info->tables[i].n_entries)
0263 continue;
0264
0265 err = info->sync_table(dev, i);
0266 if (err)
0267 netdev_warn(dev, "UDP tunnel port sync failed for table %d: %d\n",
0268 i, err);
0269
0270 for (j = 0; j < info->tables[i].n_entries; j++) {
0271 struct udp_tunnel_nic_table_entry *entry;
0272
0273 entry = &utn->entries[i][j];
0274 if (udp_tunnel_nic_entry_is_queued(entry))
0275 udp_tunnel_nic_entry_update_done(entry, err);
0276 }
0277 }
0278 }
0279
0280 static void
0281 __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
0282 {
0283 if (!utn->need_sync)
0284 return;
0285
0286 if (dev->udp_tunnel_nic_info->sync_table)
0287 udp_tunnel_nic_device_sync_by_table(dev, utn);
0288 else
0289 udp_tunnel_nic_device_sync_by_port(dev, utn);
0290
0291 utn->need_sync = 0;
0292
0293
0294
0295 utn->need_replay = udp_tunnel_nic_should_replay(dev, utn);
0296 }
0297
0298 static void
0299 udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
0300 {
0301 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0302 bool may_sleep;
0303
0304 if (!utn->need_sync)
0305 return;
0306
0307
0308
0309
0310 may_sleep = info->flags & UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
0311 if (!may_sleep)
0312 __udp_tunnel_nic_device_sync(dev, utn);
0313 if (may_sleep || utn->need_replay) {
0314 queue_work(udp_tunnel_nic_workqueue, &utn->work);
0315 utn->work_pending = 1;
0316 }
0317 }
0318
0319 static bool
0320 udp_tunnel_nic_table_is_capable(const struct udp_tunnel_nic_table_info *table,
0321 struct udp_tunnel_info *ti)
0322 {
0323 return table->tunnel_types & ti->type;
0324 }
0325
0326 static bool
0327 udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn,
0328 struct udp_tunnel_info *ti)
0329 {
0330 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0331 unsigned int i;
0332
0333
0334 if (info->flags & UDP_TUNNEL_NIC_INFO_IPV4_ONLY &&
0335 ti->sa_family != AF_INET)
0336 return false;
0337
0338 for (i = 0; i < utn->n_tables; i++)
0339 if (udp_tunnel_nic_table_is_capable(&info->tables[i], ti))
0340 return true;
0341 return false;
0342 }
0343
0344 static int
0345 udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn,
0346 struct udp_tunnel_info *ti)
0347 {
0348 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0349 struct udp_tunnel_nic_table_entry *entry;
0350 unsigned int i, j;
0351
0352 for (i = 0; i < utn->n_tables; i++)
0353 for (j = 0; j < info->tables[i].n_entries; j++) {
0354 entry = &utn->entries[i][j];
0355
0356 if (!udp_tunnel_nic_entry_is_free(entry) &&
0357 entry->port == ti->port &&
0358 entry->type != ti->type) {
0359 __set_bit(i, &utn->missed);
0360 return true;
0361 }
0362 }
0363 return false;
0364 }
0365
0366 static void
0367 udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn,
0368 unsigned int table, unsigned int idx, int use_cnt_adj)
0369 {
0370 struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
0371 bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
0372 unsigned int from, to;
0373
0374 WARN_ON(entry->use_cnt + (u32)use_cnt_adj > U16_MAX);
0375
0376
0377
0378
0379 entry->use_cnt += use_cnt_adj;
0380 if (!dodgy && !entry->use_cnt == !(entry->use_cnt - use_cnt_adj))
0381 return;
0382
0383
0384
0385
0386
0387 if (use_cnt_adj < 0) {
0388 from = UDP_TUNNEL_NIC_ENTRY_ADD;
0389 to = UDP_TUNNEL_NIC_ENTRY_DEL;
0390 } else {
0391 from = UDP_TUNNEL_NIC_ENTRY_DEL;
0392 to = UDP_TUNNEL_NIC_ENTRY_ADD;
0393 }
0394
0395 if (entry->flags & from) {
0396 entry->flags &= ~from;
0397 if (!dodgy)
0398 return;
0399 }
0400
0401 udp_tunnel_nic_entry_queue(utn, entry, to);
0402 }
0403
0404 static bool
0405 udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn,
0406 unsigned int table, unsigned int idx,
0407 struct udp_tunnel_info *ti, int use_cnt_adj)
0408 {
0409 struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
0410
0411 if (udp_tunnel_nic_entry_is_free(entry) ||
0412 entry->port != ti->port ||
0413 entry->type != ti->type)
0414 return false;
0415
0416 if (udp_tunnel_nic_entry_is_frozen(entry))
0417 return true;
0418
0419 udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj);
0420 return true;
0421 }
0422
0423
0424
0425
0426
0427
0428 static bool
0429 udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
0430 struct udp_tunnel_info *ti, int use_cnt_adj)
0431 {
0432 const struct udp_tunnel_nic_table_info *table;
0433 unsigned int i, j;
0434
0435 for (i = 0; i < utn->n_tables; i++) {
0436 table = &dev->udp_tunnel_nic_info->tables[i];
0437 if (!udp_tunnel_nic_table_is_capable(table, ti))
0438 continue;
0439
0440 for (j = 0; j < table->n_entries; j++)
0441 if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti,
0442 use_cnt_adj))
0443 return true;
0444 }
0445
0446 return false;
0447 }
0448
0449 static bool
0450 udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
0451 struct udp_tunnel_info *ti)
0452 {
0453 return udp_tunnel_nic_try_existing(dev, utn, ti, +1);
0454 }
0455
0456 static bool
0457 udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
0458 struct udp_tunnel_info *ti)
0459 {
0460 return udp_tunnel_nic_try_existing(dev, utn, ti, -1);
0461 }
0462
0463 static bool
0464 udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn,
0465 struct udp_tunnel_info *ti)
0466 {
0467 const struct udp_tunnel_nic_table_info *table;
0468 unsigned int i, j;
0469
0470 for (i = 0; i < utn->n_tables; i++) {
0471 table = &dev->udp_tunnel_nic_info->tables[i];
0472 if (!udp_tunnel_nic_table_is_capable(table, ti))
0473 continue;
0474
0475 for (j = 0; j < table->n_entries; j++) {
0476 struct udp_tunnel_nic_table_entry *entry;
0477
0478 entry = &utn->entries[i][j];
0479 if (!udp_tunnel_nic_entry_is_free(entry))
0480 continue;
0481
0482 entry->port = ti->port;
0483 entry->type = ti->type;
0484 entry->use_cnt = 1;
0485 udp_tunnel_nic_entry_queue(utn, entry,
0486 UDP_TUNNEL_NIC_ENTRY_ADD);
0487 return true;
0488 }
0489
0490
0491
0492
0493
0494 __set_bit(i, &utn->missed);
0495 }
0496
0497 return false;
0498 }
0499
0500 static void
0501 __udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
0502 {
0503 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0504 struct udp_tunnel_nic *utn;
0505
0506 utn = dev->udp_tunnel_nic;
0507 if (!utn)
0508 return;
0509 if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)
0510 return;
0511 if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN &&
0512 ti->port == htons(IANA_VXLAN_UDP_PORT)) {
0513 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
0514 netdev_warn(dev, "device assumes port 4789 will be used by vxlan tunnels\n");
0515 return;
0516 }
0517
0518 if (!udp_tunnel_nic_is_capable(dev, utn, ti))
0519 return;
0520
0521
0522
0523
0524
0525 if (udp_tunnel_nic_has_collision(dev, utn, ti))
0526 return;
0527
0528 if (!udp_tunnel_nic_add_existing(dev, utn, ti))
0529 udp_tunnel_nic_add_new(dev, utn, ti);
0530
0531 udp_tunnel_nic_device_sync(dev, utn);
0532 }
0533
0534 static void
0535 __udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
0536 {
0537 struct udp_tunnel_nic *utn;
0538
0539 utn = dev->udp_tunnel_nic;
0540 if (!utn)
0541 return;
0542
0543 if (!udp_tunnel_nic_is_capable(dev, utn, ti))
0544 return;
0545
0546 udp_tunnel_nic_del_existing(dev, utn, ti);
0547
0548 udp_tunnel_nic_device_sync(dev, utn);
0549 }
0550
0551 static void __udp_tunnel_nic_reset_ntf(struct net_device *dev)
0552 {
0553 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0554 struct udp_tunnel_nic *utn;
0555 unsigned int i, j;
0556
0557 ASSERT_RTNL();
0558
0559 utn = dev->udp_tunnel_nic;
0560 if (!utn)
0561 return;
0562
0563 utn->need_sync = false;
0564 for (i = 0; i < utn->n_tables; i++)
0565 for (j = 0; j < info->tables[i].n_entries; j++) {
0566 struct udp_tunnel_nic_table_entry *entry;
0567
0568 entry = &utn->entries[i][j];
0569
0570 entry->flags &= ~(UDP_TUNNEL_NIC_ENTRY_DEL |
0571 UDP_TUNNEL_NIC_ENTRY_OP_FAIL);
0572
0573 WARN_ON(entry->flags & UDP_TUNNEL_NIC_ENTRY_FROZEN);
0574 if (!entry->use_cnt)
0575 continue;
0576
0577 udp_tunnel_nic_entry_queue(utn, entry,
0578 UDP_TUNNEL_NIC_ENTRY_ADD);
0579 }
0580
0581 __udp_tunnel_nic_device_sync(dev, utn);
0582 }
0583
0584 static size_t
0585 __udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
0586 {
0587 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0588 struct udp_tunnel_nic *utn;
0589 unsigned int j;
0590 size_t size;
0591
0592 utn = dev->udp_tunnel_nic;
0593 if (!utn)
0594 return 0;
0595
0596 size = 0;
0597 for (j = 0; j < info->tables[table].n_entries; j++) {
0598 if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
0599 continue;
0600
0601 size += nla_total_size(0) +
0602 nla_total_size(sizeof(__be16)) +
0603 nla_total_size(sizeof(u32));
0604 }
0605
0606 return size;
0607 }
0608
0609 static int
0610 __udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
0611 struct sk_buff *skb)
0612 {
0613 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0614 struct udp_tunnel_nic *utn;
0615 struct nlattr *nest;
0616 unsigned int j;
0617
0618 utn = dev->udp_tunnel_nic;
0619 if (!utn)
0620 return 0;
0621
0622 for (j = 0; j < info->tables[table].n_entries; j++) {
0623 if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
0624 continue;
0625
0626 nest = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
0627
0628 if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
0629 utn->entries[table][j].port) ||
0630 nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
0631 ilog2(utn->entries[table][j].type)))
0632 goto err_cancel;
0633
0634 nla_nest_end(skb, nest);
0635 }
0636
0637 return 0;
0638
0639 err_cancel:
0640 nla_nest_cancel(skb, nest);
0641 return -EMSGSIZE;
0642 }
0643
0644 static const struct udp_tunnel_nic_ops __udp_tunnel_nic_ops = {
0645 .get_port = __udp_tunnel_nic_get_port,
0646 .set_port_priv = __udp_tunnel_nic_set_port_priv,
0647 .add_port = __udp_tunnel_nic_add_port,
0648 .del_port = __udp_tunnel_nic_del_port,
0649 .reset_ntf = __udp_tunnel_nic_reset_ntf,
0650 .dump_size = __udp_tunnel_nic_dump_size,
0651 .dump_write = __udp_tunnel_nic_dump_write,
0652 };
0653
0654 static void
0655 udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn)
0656 {
0657 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0658 unsigned int i, j;
0659
0660 for (i = 0; i < utn->n_tables; i++)
0661 for (j = 0; j < info->tables[i].n_entries; j++) {
0662 int adj_cnt = -utn->entries[i][j].use_cnt;
0663
0664 if (adj_cnt)
0665 udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt);
0666 }
0667
0668 __udp_tunnel_nic_device_sync(dev, utn);
0669
0670 for (i = 0; i < utn->n_tables; i++)
0671 memset(utn->entries[i], 0, array_size(info->tables[i].n_entries,
0672 sizeof(**utn->entries)));
0673 WARN_ON(utn->need_sync);
0674 utn->need_replay = 0;
0675 }
0676
0677 static void
0678 udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
0679 {
0680 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0681 struct udp_tunnel_nic_shared_node *node;
0682 unsigned int i, j;
0683
0684
0685
0686
0687 for (i = 0; i < utn->n_tables; i++)
0688 for (j = 0; j < info->tables[i].n_entries; j++)
0689 udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]);
0690 utn->missed = 0;
0691 utn->need_replay = 0;
0692
0693 if (!info->shared) {
0694 udp_tunnel_get_rx_info(dev);
0695 } else {
0696 list_for_each_entry(node, &info->shared->devices, list)
0697 udp_tunnel_get_rx_info(node->dev);
0698 }
0699
0700 for (i = 0; i < utn->n_tables; i++)
0701 for (j = 0; j < info->tables[i].n_entries; j++)
0702 udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]);
0703 }
0704
0705 static void udp_tunnel_nic_device_sync_work(struct work_struct *work)
0706 {
0707 struct udp_tunnel_nic *utn =
0708 container_of(work, struct udp_tunnel_nic, work);
0709
0710 rtnl_lock();
0711 utn->work_pending = 0;
0712 __udp_tunnel_nic_device_sync(utn->dev, utn);
0713
0714 if (utn->need_replay)
0715 udp_tunnel_nic_replay(utn->dev, utn);
0716 rtnl_unlock();
0717 }
0718
0719 static struct udp_tunnel_nic *
0720 udp_tunnel_nic_alloc(const struct udp_tunnel_nic_info *info,
0721 unsigned int n_tables)
0722 {
0723 struct udp_tunnel_nic *utn;
0724 unsigned int i;
0725
0726 utn = kzalloc(sizeof(*utn), GFP_KERNEL);
0727 if (!utn)
0728 return NULL;
0729 utn->n_tables = n_tables;
0730 INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
0731
0732 utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL);
0733 if (!utn->entries)
0734 goto err_free_utn;
0735
0736 for (i = 0; i < n_tables; i++) {
0737 utn->entries[i] = kcalloc(info->tables[i].n_entries,
0738 sizeof(*utn->entries[i]), GFP_KERNEL);
0739 if (!utn->entries[i])
0740 goto err_free_prev_entries;
0741 }
0742
0743 return utn;
0744
0745 err_free_prev_entries:
0746 while (i--)
0747 kfree(utn->entries[i]);
0748 kfree(utn->entries);
0749 err_free_utn:
0750 kfree(utn);
0751 return NULL;
0752 }
0753
0754 static void udp_tunnel_nic_free(struct udp_tunnel_nic *utn)
0755 {
0756 unsigned int i;
0757
0758 for (i = 0; i < utn->n_tables; i++)
0759 kfree(utn->entries[i]);
0760 kfree(utn->entries);
0761 kfree(utn);
0762 }
0763
0764 static int udp_tunnel_nic_register(struct net_device *dev)
0765 {
0766 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0767 struct udp_tunnel_nic_shared_node *node = NULL;
0768 struct udp_tunnel_nic *utn;
0769 unsigned int n_tables, i;
0770
0771 BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE <
0772 UDP_TUNNEL_NIC_MAX_TABLES);
0773
0774 BUILD_BUG_ON(UDP_TUNNEL_NIC_USE_CNT_MAX <
0775 UDP_TUNNEL_NIC_MAX_SHARING_DEVICES * 2);
0776
0777
0778 if (WARN_ON(!info->set_port != !info->unset_port) ||
0779 WARN_ON(!info->set_port == !info->sync_table) ||
0780 WARN_ON(!info->tables[0].n_entries))
0781 return -EINVAL;
0782
0783 if (WARN_ON(info->shared &&
0784 info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
0785 return -EINVAL;
0786
0787 n_tables = 1;
0788 for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
0789 if (!info->tables[i].n_entries)
0790 continue;
0791
0792 n_tables++;
0793 if (WARN_ON(!info->tables[i - 1].n_entries))
0794 return -EINVAL;
0795 }
0796
0797
0798 if (info->shared) {
0799 node = kzalloc(sizeof(*node), GFP_KERNEL);
0800 if (!node)
0801 return -ENOMEM;
0802
0803 node->dev = dev;
0804 }
0805
0806 if (info->shared && info->shared->udp_tunnel_nic_info) {
0807 utn = info->shared->udp_tunnel_nic_info;
0808 } else {
0809 utn = udp_tunnel_nic_alloc(info, n_tables);
0810 if (!utn) {
0811 kfree(node);
0812 return -ENOMEM;
0813 }
0814 }
0815
0816 if (info->shared) {
0817 if (!info->shared->udp_tunnel_nic_info) {
0818 INIT_LIST_HEAD(&info->shared->devices);
0819 info->shared->udp_tunnel_nic_info = utn;
0820 }
0821
0822 list_add_tail(&node->list, &info->shared->devices);
0823 }
0824
0825 utn->dev = dev;
0826 dev_hold(dev);
0827 dev->udp_tunnel_nic = utn;
0828
0829 if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
0830 udp_tunnel_get_rx_info(dev);
0831
0832 return 0;
0833 }
0834
0835 static void
0836 udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
0837 {
0838 const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
0839
0840
0841
0842
0843 if (info->shared) {
0844 struct udp_tunnel_nic_shared_node *node, *first;
0845
0846 list_for_each_entry(node, &info->shared->devices, list)
0847 if (node->dev == dev)
0848 break;
0849 if (list_entry_is_head(node, &info->shared->devices, list))
0850 return;
0851
0852 list_del(&node->list);
0853 kfree(node);
0854
0855 first = list_first_entry_or_null(&info->shared->devices,
0856 typeof(*first), list);
0857 if (first) {
0858 udp_tunnel_drop_rx_info(dev);
0859 utn->dev = first->dev;
0860 goto release_dev;
0861 }
0862
0863 info->shared->udp_tunnel_nic_info = NULL;
0864 }
0865
0866
0867
0868
0869 udp_tunnel_nic_flush(dev, utn);
0870
0871
0872
0873
0874 if (utn->work_pending)
0875 return;
0876
0877 udp_tunnel_nic_free(utn);
0878 release_dev:
0879 dev->udp_tunnel_nic = NULL;
0880 dev_put(dev);
0881 }
0882
0883 static int
0884 udp_tunnel_nic_netdevice_event(struct notifier_block *unused,
0885 unsigned long event, void *ptr)
0886 {
0887 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
0888 const struct udp_tunnel_nic_info *info;
0889 struct udp_tunnel_nic *utn;
0890
0891 info = dev->udp_tunnel_nic_info;
0892 if (!info)
0893 return NOTIFY_DONE;
0894
0895 if (event == NETDEV_REGISTER) {
0896 int err;
0897
0898 err = udp_tunnel_nic_register(dev);
0899 if (err)
0900 netdev_WARN(dev, "failed to register for UDP tunnel offloads: %d", err);
0901 return notifier_from_errno(err);
0902 }
0903
0904 utn = dev->udp_tunnel_nic;
0905 if (!utn)
0906 return NOTIFY_DONE;
0907
0908 if (event == NETDEV_UNREGISTER) {
0909 udp_tunnel_nic_unregister(dev, utn);
0910 return NOTIFY_OK;
0911 }
0912
0913
0914 if (!(info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
0915 return NOTIFY_DONE;
0916
0917 if (event == NETDEV_UP) {
0918 WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
0919 udp_tunnel_get_rx_info(dev);
0920 return NOTIFY_OK;
0921 }
0922 if (event == NETDEV_GOING_DOWN) {
0923 udp_tunnel_nic_flush(dev, utn);
0924 return NOTIFY_OK;
0925 }
0926
0927 return NOTIFY_DONE;
0928 }
0929
0930 static struct notifier_block udp_tunnel_nic_notifier_block __read_mostly = {
0931 .notifier_call = udp_tunnel_nic_netdevice_event,
0932 };
0933
0934 static int __init udp_tunnel_nic_init_module(void)
0935 {
0936 int err;
0937
0938 udp_tunnel_nic_workqueue = alloc_ordered_workqueue("udp_tunnel_nic", 0);
0939 if (!udp_tunnel_nic_workqueue)
0940 return -ENOMEM;
0941
0942 rtnl_lock();
0943 udp_tunnel_nic_ops = &__udp_tunnel_nic_ops;
0944 rtnl_unlock();
0945
0946 err = register_netdevice_notifier(&udp_tunnel_nic_notifier_block);
0947 if (err)
0948 goto err_unset_ops;
0949
0950 return 0;
0951
0952 err_unset_ops:
0953 rtnl_lock();
0954 udp_tunnel_nic_ops = NULL;
0955 rtnl_unlock();
0956 destroy_workqueue(udp_tunnel_nic_workqueue);
0957 return err;
0958 }
0959 late_initcall(udp_tunnel_nic_init_module);
0960
0961 static void __exit udp_tunnel_nic_cleanup_module(void)
0962 {
0963 unregister_netdevice_notifier(&udp_tunnel_nic_notifier_block);
0964
0965 rtnl_lock();
0966 udp_tunnel_nic_ops = NULL;
0967 rtnl_unlock();
0968
0969 destroy_workqueue(udp_tunnel_nic_workqueue);
0970 }
0971 module_exit(udp_tunnel_nic_cleanup_module);
0972
0973 MODULE_LICENSE("GPL");