0001
0002
0003
0004 #include "fm10k.h"
0005 #include <linux/vmalloc.h>
0006 #include <net/udp_tunnel.h>
0007 #include <linux/if_macvlan.h>
0008
0009
0010
0011
0012
0013
0014
0015 int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring)
0016 {
0017 struct device *dev = tx_ring->dev;
0018 int size;
0019
0020 size = sizeof(struct fm10k_tx_buffer) * tx_ring->count;
0021
0022 tx_ring->tx_buffer = vzalloc(size);
0023 if (!tx_ring->tx_buffer)
0024 goto err;
0025
0026 u64_stats_init(&tx_ring->syncp);
0027
0028
0029 tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc);
0030 tx_ring->size = ALIGN(tx_ring->size, 4096);
0031
0032 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
0033 &tx_ring->dma, GFP_KERNEL);
0034 if (!tx_ring->desc)
0035 goto err;
0036
0037 return 0;
0038
0039 err:
0040 vfree(tx_ring->tx_buffer);
0041 tx_ring->tx_buffer = NULL;
0042 return -ENOMEM;
0043 }
0044
0045
0046
0047
0048
0049
0050
0051
0052
0053
0054
0055 static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface)
0056 {
0057 int i, err;
0058
0059 for (i = 0; i < interface->num_tx_queues; i++) {
0060 err = fm10k_setup_tx_resources(interface->tx_ring[i]);
0061 if (!err)
0062 continue;
0063
0064 netif_err(interface, probe, interface->netdev,
0065 "Allocation for Tx Queue %u failed\n", i);
0066 goto err_setup_tx;
0067 }
0068
0069 return 0;
0070 err_setup_tx:
0071
0072 while (i--)
0073 fm10k_free_tx_resources(interface->tx_ring[i]);
0074 return err;
0075 }
0076
0077
0078
0079
0080
0081
0082
0083 int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring)
0084 {
0085 struct device *dev = rx_ring->dev;
0086 int size;
0087
0088 size = sizeof(struct fm10k_rx_buffer) * rx_ring->count;
0089
0090 rx_ring->rx_buffer = vzalloc(size);
0091 if (!rx_ring->rx_buffer)
0092 goto err;
0093
0094 u64_stats_init(&rx_ring->syncp);
0095
0096
0097 rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc);
0098 rx_ring->size = ALIGN(rx_ring->size, 4096);
0099
0100 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
0101 &rx_ring->dma, GFP_KERNEL);
0102 if (!rx_ring->desc)
0103 goto err;
0104
0105 return 0;
0106 err:
0107 vfree(rx_ring->rx_buffer);
0108 rx_ring->rx_buffer = NULL;
0109 return -ENOMEM;
0110 }
0111
0112
0113
0114
0115
0116
0117
0118
0119
0120
0121
0122 static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface)
0123 {
0124 int i, err;
0125
0126 for (i = 0; i < interface->num_rx_queues; i++) {
0127 err = fm10k_setup_rx_resources(interface->rx_ring[i]);
0128 if (!err)
0129 continue;
0130
0131 netif_err(interface, probe, interface->netdev,
0132 "Allocation for Rx Queue %u failed\n", i);
0133 goto err_setup_rx;
0134 }
0135
0136 return 0;
0137 err_setup_rx:
0138
0139 while (i--)
0140 fm10k_free_rx_resources(interface->rx_ring[i]);
0141 return err;
0142 }
0143
0144 void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring,
0145 struct fm10k_tx_buffer *tx_buffer)
0146 {
0147 if (tx_buffer->skb) {
0148 dev_kfree_skb_any(tx_buffer->skb);
0149 if (dma_unmap_len(tx_buffer, len))
0150 dma_unmap_single(ring->dev,
0151 dma_unmap_addr(tx_buffer, dma),
0152 dma_unmap_len(tx_buffer, len),
0153 DMA_TO_DEVICE);
0154 } else if (dma_unmap_len(tx_buffer, len)) {
0155 dma_unmap_page(ring->dev,
0156 dma_unmap_addr(tx_buffer, dma),
0157 dma_unmap_len(tx_buffer, len),
0158 DMA_TO_DEVICE);
0159 }
0160 tx_buffer->next_to_watch = NULL;
0161 tx_buffer->skb = NULL;
0162 dma_unmap_len_set(tx_buffer, len, 0);
0163
0164 }
0165
0166
0167
0168
0169
0170 static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
0171 {
0172 unsigned long size;
0173 u16 i;
0174
0175
0176 if (!tx_ring->tx_buffer)
0177 return;
0178
0179
0180 for (i = 0; i < tx_ring->count; i++) {
0181 struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i];
0182
0183 fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
0184 }
0185
0186
0187 netdev_tx_reset_queue(txring_txq(tx_ring));
0188
0189 size = sizeof(struct fm10k_tx_buffer) * tx_ring->count;
0190 memset(tx_ring->tx_buffer, 0, size);
0191
0192
0193 memset(tx_ring->desc, 0, tx_ring->size);
0194 }
0195
0196
0197
0198
0199
0200
0201
0202 void fm10k_free_tx_resources(struct fm10k_ring *tx_ring)
0203 {
0204 fm10k_clean_tx_ring(tx_ring);
0205
0206 vfree(tx_ring->tx_buffer);
0207 tx_ring->tx_buffer = NULL;
0208
0209
0210 if (!tx_ring->desc)
0211 return;
0212
0213 dma_free_coherent(tx_ring->dev, tx_ring->size,
0214 tx_ring->desc, tx_ring->dma);
0215 tx_ring->desc = NULL;
0216 }
0217
0218
0219
0220
0221
0222 void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface)
0223 {
0224 int i;
0225
0226 for (i = 0; i < interface->num_tx_queues; i++)
0227 fm10k_clean_tx_ring(interface->tx_ring[i]);
0228 }
0229
0230
0231
0232
0233
0234
0235
0236 static void fm10k_free_all_tx_resources(struct fm10k_intfc *interface)
0237 {
0238 int i = interface->num_tx_queues;
0239
0240 while (i--)
0241 fm10k_free_tx_resources(interface->tx_ring[i]);
0242 }
0243
0244
0245
0246
0247
0248 static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring)
0249 {
0250 unsigned long size;
0251 u16 i;
0252
0253 if (!rx_ring->rx_buffer)
0254 return;
0255
0256 dev_kfree_skb(rx_ring->skb);
0257 rx_ring->skb = NULL;
0258
0259
0260 for (i = 0; i < rx_ring->count; i++) {
0261 struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i];
0262
0263 if (!buffer->page)
0264 continue;
0265
0266 dma_unmap_page(rx_ring->dev, buffer->dma,
0267 PAGE_SIZE, DMA_FROM_DEVICE);
0268 __free_page(buffer->page);
0269
0270 buffer->page = NULL;
0271 }
0272
0273 size = sizeof(struct fm10k_rx_buffer) * rx_ring->count;
0274 memset(rx_ring->rx_buffer, 0, size);
0275
0276
0277 memset(rx_ring->desc, 0, rx_ring->size);
0278
0279 rx_ring->next_to_alloc = 0;
0280 rx_ring->next_to_clean = 0;
0281 rx_ring->next_to_use = 0;
0282 }
0283
0284
0285
0286
0287
0288
0289
0290 void fm10k_free_rx_resources(struct fm10k_ring *rx_ring)
0291 {
0292 fm10k_clean_rx_ring(rx_ring);
0293
0294 vfree(rx_ring->rx_buffer);
0295 rx_ring->rx_buffer = NULL;
0296
0297
0298 if (!rx_ring->desc)
0299 return;
0300
0301 dma_free_coherent(rx_ring->dev, rx_ring->size,
0302 rx_ring->desc, rx_ring->dma);
0303
0304 rx_ring->desc = NULL;
0305 }
0306
0307
0308
0309
0310
0311 void fm10k_clean_all_rx_rings(struct fm10k_intfc *interface)
0312 {
0313 int i;
0314
0315 for (i = 0; i < interface->num_rx_queues; i++)
0316 fm10k_clean_rx_ring(interface->rx_ring[i]);
0317 }
0318
0319
0320
0321
0322
0323
0324
0325 static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface)
0326 {
0327 int i = interface->num_rx_queues;
0328
0329 while (i--)
0330 fm10k_free_rx_resources(interface->rx_ring[i]);
0331 }
0332
0333
0334
0335
0336
0337
0338
0339 static void fm10k_request_glort_range(struct fm10k_intfc *interface)
0340 {
0341 struct fm10k_hw *hw = &interface->hw;
0342 u16 mask = (~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT;
0343
0344
0345 interface->glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
0346 interface->glort_count = 0;
0347
0348
0349 if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
0350 return;
0351
0352
0353
0354
0355
0356
0357 if (mask <= hw->iov.total_vfs) {
0358 interface->glort_count = 1;
0359 interface->glort += mask;
0360 } else if (mask < 64) {
0361 interface->glort_count = (mask + 1) / 2;
0362 interface->glort += interface->glort_count;
0363 } else {
0364 interface->glort_count = mask - 63;
0365 interface->glort += 64;
0366 }
0367 }
0368
0369
0370
0371
0372
0373
0374
0375 static void fm10k_restore_udp_port_info(struct fm10k_intfc *interface)
0376 {
0377 struct fm10k_hw *hw = &interface->hw;
0378
0379
0380 if (hw->mac.type != fm10k_mac_pf)
0381 return;
0382
0383
0384 fm10k_write_reg(hw, FM10K_TUNNEL_CFG,
0385 ntohs(interface->vxlan_port) |
0386 (ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT));
0387
0388
0389 fm10k_write_reg(hw, FM10K_TUNNEL_CFG_GENEVE,
0390 ntohs(interface->geneve_port));
0391 }
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401
0402 static int fm10k_udp_tunnel_sync(struct net_device *dev, unsigned int table)
0403 {
0404 struct fm10k_intfc *interface = netdev_priv(dev);
0405 struct udp_tunnel_info ti;
0406
0407 udp_tunnel_nic_get_port(dev, table, 0, &ti);
0408 if (!table)
0409 interface->vxlan_port = ti.port;
0410 else
0411 interface->geneve_port = ti.port;
0412
0413 fm10k_restore_udp_port_info(interface);
0414 return 0;
0415 }
0416
0417 static const struct udp_tunnel_nic_info fm10k_udp_tunnels = {
0418 .sync_table = fm10k_udp_tunnel_sync,
0419 .tables = {
0420 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
0421 { .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
0422 },
0423 };
0424
0425
0426
0427
0428
0429
0430
0431
0432
0433
0434
0435
0436
0437 int fm10k_open(struct net_device *netdev)
0438 {
0439 struct fm10k_intfc *interface = netdev_priv(netdev);
0440 int err;
0441
0442
0443 err = fm10k_setup_all_tx_resources(interface);
0444 if (err)
0445 goto err_setup_tx;
0446
0447
0448 err = fm10k_setup_all_rx_resources(interface);
0449 if (err)
0450 goto err_setup_rx;
0451
0452
0453 err = fm10k_qv_request_irq(interface);
0454 if (err)
0455 goto err_req_irq;
0456
0457
0458 fm10k_request_glort_range(interface);
0459
0460
0461 err = netif_set_real_num_tx_queues(netdev,
0462 interface->num_tx_queues);
0463 if (err)
0464 goto err_set_queues;
0465
0466 err = netif_set_real_num_rx_queues(netdev,
0467 interface->num_rx_queues);
0468 if (err)
0469 goto err_set_queues;
0470
0471 fm10k_up(interface);
0472
0473 return 0;
0474
0475 err_set_queues:
0476 fm10k_qv_free_irq(interface);
0477 err_req_irq:
0478 fm10k_free_all_rx_resources(interface);
0479 err_setup_rx:
0480 fm10k_free_all_tx_resources(interface);
0481 err_setup_tx:
0482 return err;
0483 }
0484
0485
0486
0487
0488
0489
0490
0491
0492
0493
0494
0495
0496 int fm10k_close(struct net_device *netdev)
0497 {
0498 struct fm10k_intfc *interface = netdev_priv(netdev);
0499
0500 fm10k_down(interface);
0501
0502 fm10k_qv_free_irq(interface);
0503
0504 fm10k_free_all_tx_resources(interface);
0505 fm10k_free_all_rx_resources(interface);
0506
0507 return 0;
0508 }
0509
0510 static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
0511 {
0512 struct fm10k_intfc *interface = netdev_priv(dev);
0513 int num_tx_queues = READ_ONCE(interface->num_tx_queues);
0514 unsigned int r_idx = skb->queue_mapping;
0515 int err;
0516
0517 if (!num_tx_queues)
0518 return NETDEV_TX_BUSY;
0519
0520 if ((skb->protocol == htons(ETH_P_8021Q)) &&
0521 !skb_vlan_tag_present(skb)) {
0522
0523
0524
0525 struct vlan_hdr *vhdr;
0526 __be16 proto;
0527
0528
0529 skb = skb_share_check(skb, GFP_ATOMIC);
0530 if (!skb)
0531 return NETDEV_TX_OK;
0532
0533
0534 if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
0535 return NETDEV_TX_OK;
0536
0537
0538 err = skb_cow_head(skb, 0);
0539 if (err) {
0540 dev_kfree_skb(skb);
0541 return NETDEV_TX_OK;
0542 }
0543
0544
0545 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
0546
0547
0548 __vlan_hwaccel_put_tag(skb,
0549 htons(ETH_P_8021Q),
0550 ntohs(vhdr->h_vlan_TCI));
0551 proto = vhdr->h_vlan_encapsulated_proto;
0552 skb->protocol = (ntohs(proto) >= 1536) ? proto :
0553 htons(ETH_P_802_2);
0554
0555
0556 memmove(skb->data + VLAN_HLEN, skb->data, 12);
0557 __skb_pull(skb, VLAN_HLEN);
0558 skb_reset_mac_header(skb);
0559 }
0560
0561
0562
0563
0564 if (unlikely(skb->len < 17)) {
0565 int pad_len = 17 - skb->len;
0566
0567 if (skb_pad(skb, pad_len))
0568 return NETDEV_TX_OK;
0569 __skb_put(skb, pad_len);
0570 }
0571
0572 if (r_idx >= num_tx_queues)
0573 r_idx %= num_tx_queues;
0574
0575 err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]);
0576
0577 return err;
0578 }
0579
0580
0581
0582
0583
0584
0585 static void fm10k_tx_timeout(struct net_device *netdev, unsigned int txqueue)
0586 {
0587 struct fm10k_intfc *interface = netdev_priv(netdev);
0588 struct fm10k_ring *tx_ring;
0589 bool real_tx_hang = false;
0590
0591 if (txqueue >= interface->num_tx_queues) {
0592 WARN(1, "invalid Tx queue index %d", txqueue);
0593 return;
0594 }
0595
0596 tx_ring = interface->tx_ring[txqueue];
0597 if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
0598 real_tx_hang = true;
0599
0600 #define TX_TIMEO_LIMIT 16000
0601 if (real_tx_hang) {
0602 fm10k_tx_timeout_reset(interface);
0603 } else {
0604 netif_info(interface, drv, netdev,
0605 "Fake Tx hang detected with timeout of %d seconds\n",
0606 netdev->watchdog_timeo / HZ);
0607
0608
0609 if (netdev->watchdog_timeo < TX_TIMEO_LIMIT)
0610 netdev->watchdog_timeo *= 2;
0611 }
0612 }
0613
0614
0615
0616
0617
0618
0619
0620
0621
0622
0623
0624 static bool fm10k_host_mbx_ready(struct fm10k_intfc *interface)
0625 {
0626 struct fm10k_hw *hw = &interface->hw;
0627
0628 return (hw->mac.type == fm10k_mac_vf || interface->host_ready);
0629 }
0630
0631
0632
0633
0634
0635
0636
0637
0638
0639
0640
0641
0642
0643 int fm10k_queue_vlan_request(struct fm10k_intfc *interface,
0644 u32 vid, u8 vsi, bool set)
0645 {
0646 struct fm10k_macvlan_request *request;
0647 unsigned long flags;
0648
0649
0650
0651
0652 request = kzalloc(sizeof(*request), GFP_ATOMIC);
0653 if (!request)
0654 return -ENOMEM;
0655
0656 request->type = FM10K_VLAN_REQUEST;
0657 request->vlan.vid = vid;
0658 request->vlan.vsi = vsi;
0659 request->set = set;
0660
0661 spin_lock_irqsave(&interface->macvlan_lock, flags);
0662 list_add_tail(&request->list, &interface->macvlan_requests);
0663 spin_unlock_irqrestore(&interface->macvlan_lock, flags);
0664
0665 fm10k_macvlan_schedule(interface);
0666
0667 return 0;
0668 }
0669
0670
0671
0672
0673
0674
0675
0676
0677
0678
0679
0680
0681
0682 int fm10k_queue_mac_request(struct fm10k_intfc *interface, u16 glort,
0683 const unsigned char *addr, u16 vid, bool set)
0684 {
0685 struct fm10k_macvlan_request *request;
0686 unsigned long flags;
0687
0688
0689
0690
0691 request = kzalloc(sizeof(*request), GFP_ATOMIC);
0692 if (!request)
0693 return -ENOMEM;
0694
0695 if (is_multicast_ether_addr(addr))
0696 request->type = FM10K_MC_MAC_REQUEST;
0697 else
0698 request->type = FM10K_UC_MAC_REQUEST;
0699
0700 ether_addr_copy(request->mac.addr, addr);
0701 request->mac.glort = glort;
0702 request->mac.vid = vid;
0703 request->set = set;
0704
0705 spin_lock_irqsave(&interface->macvlan_lock, flags);
0706 list_add_tail(&request->list, &interface->macvlan_requests);
0707 spin_unlock_irqrestore(&interface->macvlan_lock, flags);
0708
0709 fm10k_macvlan_schedule(interface);
0710
0711 return 0;
0712 }
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723 void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface,
0724 u16 glort, bool vlans)
0725
0726 {
0727 struct fm10k_macvlan_request *r, *tmp;
0728 unsigned long flags;
0729
0730 spin_lock_irqsave(&interface->macvlan_lock, flags);
0731
0732
0733 list_for_each_entry_safe(r, tmp, &interface->macvlan_requests, list) {
0734 switch (r->type) {
0735 case FM10K_MC_MAC_REQUEST:
0736 case FM10K_UC_MAC_REQUEST:
0737
0738 if (r->mac.glort != glort)
0739 break;
0740 fallthrough;
0741 case FM10K_VLAN_REQUEST:
0742 if (vlans) {
0743 list_del(&r->list);
0744 kfree(r);
0745 }
0746 break;
0747 }
0748 }
0749
0750 spin_unlock_irqrestore(&interface->macvlan_lock, flags);
0751 }
0752
0753 static int fm10k_uc_vlan_unsync(struct net_device *netdev,
0754 const unsigned char *uc_addr)
0755 {
0756 struct fm10k_intfc *interface = netdev_priv(netdev);
0757 u16 glort = interface->glort;
0758 u16 vid = interface->vid;
0759 bool set = !!(vid / VLAN_N_VID);
0760 int err;
0761
0762
0763 vid &= VLAN_N_VID - 1;
0764
0765 err = fm10k_queue_mac_request(interface, glort, uc_addr, vid, set);
0766 if (err)
0767 return err;
0768
0769
0770 return 1;
0771 }
0772
0773 static int fm10k_mc_vlan_unsync(struct net_device *netdev,
0774 const unsigned char *mc_addr)
0775 {
0776 struct fm10k_intfc *interface = netdev_priv(netdev);
0777 u16 glort = interface->glort;
0778 u16 vid = interface->vid;
0779 bool set = !!(vid / VLAN_N_VID);
0780 int err;
0781
0782
0783 vid &= VLAN_N_VID - 1;
0784
0785 err = fm10k_queue_mac_request(interface, glort, mc_addr, vid, set);
0786 if (err)
0787 return err;
0788
0789
0790 return 1;
0791 }
0792
0793 static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
0794 {
0795 struct fm10k_intfc *interface = netdev_priv(netdev);
0796 struct fm10k_l2_accel *l2_accel = interface->l2_accel;
0797 struct fm10k_hw *hw = &interface->hw;
0798 u16 glort;
0799 s32 err;
0800 int i;
0801
0802
0803 if (!vid)
0804 return 0;
0805
0806 if (vid >= VLAN_N_VID)
0807 return -EINVAL;
0808
0809
0810
0811
0812
0813
0814 if (set && hw->mac.vlan_override)
0815 return -EACCES;
0816
0817
0818 set_bit(vid, interface->active_vlans);
0819 if (!set)
0820 clear_bit(vid, interface->active_vlans);
0821
0822
0823 for (i = 0; i < interface->num_rx_queues; i++) {
0824 struct fm10k_ring *rx_ring = interface->rx_ring[i];
0825 u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1);
0826
0827 if (test_bit(rx_vid, interface->active_vlans))
0828 rx_ring->vid |= FM10K_VLAN_CLEAR;
0829 else
0830 rx_ring->vid &= ~FM10K_VLAN_CLEAR;
0831 }
0832
0833
0834
0835
0836 if (hw->mac.vlan_override)
0837 return 0;
0838
0839
0840
0841
0842 if (!set && vid == hw->mac.default_vid)
0843 return 0;
0844
0845
0846
0847
0848 if (test_bit(__FM10K_DOWN, interface->state))
0849 return 0;
0850
0851 fm10k_mbx_lock(interface);
0852
0853
0854 if (!(netdev->flags & IFF_PROMISC)) {
0855 err = fm10k_queue_vlan_request(interface, vid, 0, set);
0856 if (err)
0857 goto err_out;
0858 }
0859
0860
0861 err = fm10k_queue_mac_request(interface, interface->glort,
0862 hw->mac.addr, vid, set);
0863 if (err)
0864 goto err_out;
0865
0866
0867 if (l2_accel) {
0868 for (i = 0; i < l2_accel->size; i++) {
0869 struct net_device *sdev = l2_accel->macvlan[i];
0870
0871 if (!sdev)
0872 continue;
0873
0874 glort = l2_accel->dglort + 1 + i;
0875
0876 fm10k_queue_mac_request(interface, glort,
0877 sdev->dev_addr,
0878 vid, set);
0879 }
0880 }
0881
0882
0883 interface->vid = vid + (set ? VLAN_N_VID : 0);
0884
0885
0886 __dev_uc_unsync(netdev, fm10k_uc_vlan_unsync);
0887 __dev_mc_unsync(netdev, fm10k_mc_vlan_unsync);
0888
0889 err_out:
0890 fm10k_mbx_unlock(interface);
0891
0892 return err;
0893 }
0894
0895 static int fm10k_vlan_rx_add_vid(struct net_device *netdev,
0896 __always_unused __be16 proto, u16 vid)
0897 {
0898
0899 return fm10k_update_vid(netdev, vid, true);
0900 }
0901
0902 static int fm10k_vlan_rx_kill_vid(struct net_device *netdev,
0903 __always_unused __be16 proto, u16 vid)
0904 {
0905
0906 return fm10k_update_vid(netdev, vid, false);
0907 }
0908
0909 static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid)
0910 {
0911 struct fm10k_hw *hw = &interface->hw;
0912 u16 default_vid = hw->mac.default_vid;
0913 u16 vid_limit = vid < default_vid ? default_vid : VLAN_N_VID;
0914
0915 vid = find_next_bit(interface->active_vlans, vid_limit, ++vid);
0916
0917 return vid;
0918 }
0919
0920 static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface)
0921 {
0922 u32 vid, prev_vid;
0923
0924
0925 for (vid = 0, prev_vid = 0;
0926 prev_vid < VLAN_N_VID;
0927 prev_vid = vid + 1, vid = fm10k_find_next_vlan(interface, vid)) {
0928 if (prev_vid == vid)
0929 continue;
0930
0931
0932 prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT;
0933 fm10k_queue_vlan_request(interface, prev_vid, 0, false);
0934 }
0935 }
0936
0937 static int __fm10k_uc_sync(struct net_device *dev,
0938 const unsigned char *addr, bool sync)
0939 {
0940 struct fm10k_intfc *interface = netdev_priv(dev);
0941 u16 vid, glort = interface->glort;
0942 s32 err;
0943
0944 if (!is_valid_ether_addr(addr))
0945 return -EADDRNOTAVAIL;
0946
0947 for (vid = fm10k_find_next_vlan(interface, 0);
0948 vid < VLAN_N_VID;
0949 vid = fm10k_find_next_vlan(interface, vid)) {
0950 err = fm10k_queue_mac_request(interface, glort,
0951 addr, vid, sync);
0952 if (err)
0953 return err;
0954 }
0955
0956 return 0;
0957 }
0958
0959 static int fm10k_uc_sync(struct net_device *dev,
0960 const unsigned char *addr)
0961 {
0962 return __fm10k_uc_sync(dev, addr, true);
0963 }
0964
0965 static int fm10k_uc_unsync(struct net_device *dev,
0966 const unsigned char *addr)
0967 {
0968 return __fm10k_uc_sync(dev, addr, false);
0969 }
0970
0971 static int fm10k_set_mac(struct net_device *dev, void *p)
0972 {
0973 struct fm10k_intfc *interface = netdev_priv(dev);
0974 struct fm10k_hw *hw = &interface->hw;
0975 struct sockaddr *addr = p;
0976 s32 err = 0;
0977
0978 if (!is_valid_ether_addr(addr->sa_data))
0979 return -EADDRNOTAVAIL;
0980
0981 if (dev->flags & IFF_UP) {
0982
0983 fm10k_mbx_lock(interface);
0984
0985 err = fm10k_uc_sync(dev, addr->sa_data);
0986 if (!err)
0987 fm10k_uc_unsync(dev, hw->mac.addr);
0988
0989 fm10k_mbx_unlock(interface);
0990 }
0991
0992 if (!err) {
0993 eth_hw_addr_set(dev, addr->sa_data);
0994 ether_addr_copy(hw->mac.addr, addr->sa_data);
0995 dev->addr_assign_type &= ~NET_ADDR_RANDOM;
0996 }
0997
0998
0999 return err ? -EAGAIN : 0;
1000 }
1001
1002 static int __fm10k_mc_sync(struct net_device *dev,
1003 const unsigned char *addr, bool sync)
1004 {
1005 struct fm10k_intfc *interface = netdev_priv(dev);
1006 u16 vid, glort = interface->glort;
1007 s32 err;
1008
1009 if (!is_multicast_ether_addr(addr))
1010 return -EADDRNOTAVAIL;
1011
1012 for (vid = fm10k_find_next_vlan(interface, 0);
1013 vid < VLAN_N_VID;
1014 vid = fm10k_find_next_vlan(interface, vid)) {
1015 err = fm10k_queue_mac_request(interface, glort,
1016 addr, vid, sync);
1017 if (err)
1018 return err;
1019 }
1020
1021 return 0;
1022 }
1023
1024 static int fm10k_mc_sync(struct net_device *dev,
1025 const unsigned char *addr)
1026 {
1027 return __fm10k_mc_sync(dev, addr, true);
1028 }
1029
1030 static int fm10k_mc_unsync(struct net_device *dev,
1031 const unsigned char *addr)
1032 {
1033 return __fm10k_mc_sync(dev, addr, false);
1034 }
1035
1036 static void fm10k_set_rx_mode(struct net_device *dev)
1037 {
1038 struct fm10k_intfc *interface = netdev_priv(dev);
1039 struct fm10k_hw *hw = &interface->hw;
1040 int xcast_mode;
1041
1042
1043 if (!(dev->flags & IFF_UP))
1044 return;
1045
1046
1047 xcast_mode = (dev->flags & IFF_PROMISC) ? FM10K_XCAST_MODE_PROMISC :
1048 (dev->flags & IFF_ALLMULTI) ? FM10K_XCAST_MODE_ALLMULTI :
1049 (dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
1050 FM10K_XCAST_MODE_MULTI : FM10K_XCAST_MODE_NONE;
1051
1052 fm10k_mbx_lock(interface);
1053
1054
1055 if (interface->xcast_mode != xcast_mode) {
1056
1057 if (xcast_mode == FM10K_XCAST_MODE_PROMISC)
1058 fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL,
1059 0, true);
1060
1061
1062 if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
1063 fm10k_clear_unused_vlans(interface);
1064
1065
1066 if (fm10k_host_mbx_ready(interface))
1067 hw->mac.ops.update_xcast_mode(hw, interface->glort,
1068 xcast_mode);
1069
1070
1071 interface->xcast_mode = xcast_mode;
1072 }
1073
1074
1075 __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
1076 __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
1077
1078 fm10k_mbx_unlock(interface);
1079 }
1080
1081 void fm10k_restore_rx_state(struct fm10k_intfc *interface)
1082 {
1083 struct fm10k_l2_accel *l2_accel = interface->l2_accel;
1084 struct net_device *netdev = interface->netdev;
1085 struct fm10k_hw *hw = &interface->hw;
1086 int xcast_mode, i;
1087 u16 vid, glort;
1088
1089
1090 glort = interface->glort;
1091
1092
1093 if (netdev->flags & IFF_PROMISC)
1094 xcast_mode = FM10K_XCAST_MODE_PROMISC;
1095 else if (netdev->flags & IFF_ALLMULTI)
1096 xcast_mode = FM10K_XCAST_MODE_ALLMULTI;
1097 else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST))
1098 xcast_mode = FM10K_XCAST_MODE_MULTI;
1099 else
1100 xcast_mode = FM10K_XCAST_MODE_NONE;
1101
1102 fm10k_mbx_lock(interface);
1103
1104
1105 if (fm10k_host_mbx_ready(interface))
1106 hw->mac.ops.update_lport_state(hw, glort,
1107 interface->glort_count, true);
1108
1109
1110 fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0,
1111 xcast_mode == FM10K_XCAST_MODE_PROMISC);
1112
1113
1114 for (vid = fm10k_find_next_vlan(interface, 0);
1115 vid < VLAN_N_VID;
1116 vid = fm10k_find_next_vlan(interface, vid)) {
1117 fm10k_queue_vlan_request(interface, vid, 0, true);
1118
1119 fm10k_queue_mac_request(interface, glort,
1120 hw->mac.addr, vid, true);
1121
1122
1123 if (l2_accel) {
1124 for (i = 0; i < l2_accel->size; i++) {
1125 struct net_device *sdev = l2_accel->macvlan[i];
1126
1127 if (!sdev)
1128 continue;
1129
1130 glort = l2_accel->dglort + 1 + i;
1131
1132 fm10k_queue_mac_request(interface, glort,
1133 sdev->dev_addr,
1134 vid, true);
1135 }
1136 }
1137 }
1138
1139
1140
1141
1142 if (fm10k_host_mbx_ready(interface))
1143 hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
1144
1145
1146 __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
1147 __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
1148
1149
1150 if (l2_accel) {
1151 for (i = 0; i < l2_accel->size; i++) {
1152 struct net_device *sdev = l2_accel->macvlan[i];
1153
1154 if (!sdev)
1155 continue;
1156
1157 glort = l2_accel->dglort + 1 + i;
1158
1159 hw->mac.ops.update_xcast_mode(hw, glort,
1160 FM10K_XCAST_MODE_NONE);
1161 fm10k_queue_mac_request(interface, glort,
1162 sdev->dev_addr,
1163 hw->mac.default_vid, true);
1164 }
1165 }
1166
1167 fm10k_mbx_unlock(interface);
1168
1169
1170 interface->xcast_mode = xcast_mode;
1171
1172
1173 fm10k_restore_udp_port_info(interface);
1174 }
1175
1176 void fm10k_reset_rx_state(struct fm10k_intfc *interface)
1177 {
1178 struct net_device *netdev = interface->netdev;
1179 struct fm10k_hw *hw = &interface->hw;
1180
1181
1182 while (test_bit(__FM10K_MACVLAN_SCHED, interface->state))
1183 usleep_range(1000, 2000);
1184
1185
1186 fm10k_clear_macvlan_queue(interface, interface->glort, true);
1187
1188 fm10k_mbx_lock(interface);
1189
1190
1191
1192
1193 if (fm10k_host_mbx_ready(interface))
1194 hw->mac.ops.update_lport_state(hw, interface->glort,
1195 interface->glort_count, false);
1196
1197 fm10k_mbx_unlock(interface);
1198
1199
1200 interface->xcast_mode = FM10K_XCAST_MODE_NONE;
1201
1202
1203 __dev_uc_unsync(netdev, NULL);
1204 __dev_mc_unsync(netdev, NULL);
1205 }
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215 static void fm10k_get_stats64(struct net_device *netdev,
1216 struct rtnl_link_stats64 *stats)
1217 {
1218 struct fm10k_intfc *interface = netdev_priv(netdev);
1219 struct fm10k_ring *ring;
1220 unsigned int start, i;
1221 u64 bytes, packets;
1222
1223 rcu_read_lock();
1224
1225 for (i = 0; i < interface->num_rx_queues; i++) {
1226 ring = READ_ONCE(interface->rx_ring[i]);
1227
1228 if (!ring)
1229 continue;
1230
1231 do {
1232 start = u64_stats_fetch_begin_irq(&ring->syncp);
1233 packets = ring->stats.packets;
1234 bytes = ring->stats.bytes;
1235 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1236
1237 stats->rx_packets += packets;
1238 stats->rx_bytes += bytes;
1239 }
1240
1241 for (i = 0; i < interface->num_tx_queues; i++) {
1242 ring = READ_ONCE(interface->tx_ring[i]);
1243
1244 if (!ring)
1245 continue;
1246
1247 do {
1248 start = u64_stats_fetch_begin_irq(&ring->syncp);
1249 packets = ring->stats.packets;
1250 bytes = ring->stats.bytes;
1251 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1252
1253 stats->tx_packets += packets;
1254 stats->tx_bytes += bytes;
1255 }
1256
1257 rcu_read_unlock();
1258
1259
1260 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1261 }
1262
1263 int fm10k_setup_tc(struct net_device *dev, u8 tc)
1264 {
1265 struct fm10k_intfc *interface = netdev_priv(dev);
1266 int err;
1267
1268
1269 if (tc && (interface->hw.mac.type != fm10k_mac_pf))
1270 return -EINVAL;
1271
1272
1273 if (tc > 8)
1274 return -EINVAL;
1275
1276
1277
1278
1279
1280 if (netif_running(dev))
1281 fm10k_close(dev);
1282
1283 fm10k_mbx_free_irq(interface);
1284
1285 fm10k_clear_queueing_scheme(interface);
1286
1287
1288 netdev_reset_tc(dev);
1289 netdev_set_num_tc(dev, tc);
1290
1291 err = fm10k_init_queueing_scheme(interface);
1292 if (err)
1293 goto err_queueing_scheme;
1294
1295 err = fm10k_mbx_request_irq(interface);
1296 if (err)
1297 goto err_mbx_irq;
1298
1299 err = netif_running(dev) ? fm10k_open(dev) : 0;
1300 if (err)
1301 goto err_open;
1302
1303
1304 set_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
1305
1306 return 0;
1307 err_open:
1308 fm10k_mbx_free_irq(interface);
1309 err_mbx_irq:
1310 fm10k_clear_queueing_scheme(interface);
1311 err_queueing_scheme:
1312 netif_device_detach(dev);
1313
1314 return err;
1315 }
1316
1317 static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type,
1318 void *type_data)
1319 {
1320 struct tc_mqprio_qopt *mqprio = type_data;
1321
1322 if (type != TC_SETUP_QDISC_MQPRIO)
1323 return -EOPNOTSUPP;
1324
1325 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1326
1327 return fm10k_setup_tc(dev, mqprio->num_tc);
1328 }
1329
1330 static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
1331 struct fm10k_l2_accel *l2_accel)
1332 {
1333 int i;
1334
1335 for (i = 0; i < interface->num_rx_queues; i++) {
1336 struct fm10k_ring *ring = interface->rx_ring[i];
1337
1338 rcu_assign_pointer(ring->l2_accel, l2_accel);
1339 }
1340
1341 interface->l2_accel = l2_accel;
1342 }
1343
1344 static void *fm10k_dfwd_add_station(struct net_device *dev,
1345 struct net_device *sdev)
1346 {
1347 struct fm10k_intfc *interface = netdev_priv(dev);
1348 struct fm10k_l2_accel *l2_accel = interface->l2_accel;
1349 struct fm10k_l2_accel *old_l2_accel = NULL;
1350 struct fm10k_dglort_cfg dglort = { 0 };
1351 struct fm10k_hw *hw = &interface->hw;
1352 int size, i;
1353 u16 vid, glort;
1354
1355
1356
1357
1358
1359 if (!macvlan_supports_dest_filter(sdev))
1360 return ERR_PTR(-EMEDIUMTYPE);
1361
1362
1363 if (!l2_accel) {
1364
1365 if (interface->glort_count < 7)
1366 return ERR_PTR(-EBUSY);
1367
1368 size = offsetof(struct fm10k_l2_accel, macvlan[7]);
1369 l2_accel = kzalloc(size, GFP_KERNEL);
1370 if (!l2_accel)
1371 return ERR_PTR(-ENOMEM);
1372
1373 l2_accel->size = 7;
1374 l2_accel->dglort = interface->glort;
1375
1376
1377 fm10k_assign_l2_accel(interface, l2_accel);
1378
1379 } else if ((l2_accel->count == FM10K_MAX_STATIONS) ||
1380 (l2_accel->count == (interface->glort_count - 1))) {
1381 return ERR_PTR(-EBUSY);
1382
1383 } else if (l2_accel->count == l2_accel->size) {
1384 old_l2_accel = l2_accel;
1385 size = offsetof(struct fm10k_l2_accel,
1386 macvlan[(l2_accel->size * 2) + 1]);
1387 l2_accel = kzalloc(size, GFP_KERNEL);
1388 if (!l2_accel)
1389 return ERR_PTR(-ENOMEM);
1390
1391 memcpy(l2_accel, old_l2_accel,
1392 offsetof(struct fm10k_l2_accel,
1393 macvlan[old_l2_accel->size]));
1394
1395 l2_accel->size = (old_l2_accel->size * 2) + 1;
1396
1397
1398 fm10k_assign_l2_accel(interface, l2_accel);
1399 kfree_rcu(old_l2_accel, rcu);
1400 }
1401
1402
1403 for (i = 0; i < l2_accel->size; i++) {
1404 if (!l2_accel->macvlan[i])
1405 break;
1406 }
1407
1408
1409 l2_accel->macvlan[i] = sdev;
1410 l2_accel->count++;
1411
1412
1413 dglort.idx = fm10k_dglort_pf_rss;
1414 dglort.inner_rss = 1;
1415 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
1416 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
1417 dglort.glort = interface->glort;
1418 dglort.shared_l = fls(l2_accel->size);
1419 hw->mac.ops.configure_dglort_map(hw, &dglort);
1420
1421
1422 fm10k_mbx_lock(interface);
1423
1424 glort = l2_accel->dglort + 1 + i;
1425
1426 if (fm10k_host_mbx_ready(interface))
1427 hw->mac.ops.update_xcast_mode(hw, glort,
1428 FM10K_XCAST_MODE_NONE);
1429
1430 fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1431 hw->mac.default_vid, true);
1432
1433 for (vid = fm10k_find_next_vlan(interface, 0);
1434 vid < VLAN_N_VID;
1435 vid = fm10k_find_next_vlan(interface, vid))
1436 fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1437 vid, true);
1438
1439 fm10k_mbx_unlock(interface);
1440
1441 return sdev;
1442 }
1443
1444 static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
1445 {
1446 struct fm10k_intfc *interface = netdev_priv(dev);
1447 struct fm10k_l2_accel *l2_accel = READ_ONCE(interface->l2_accel);
1448 struct fm10k_dglort_cfg dglort = { 0 };
1449 struct fm10k_hw *hw = &interface->hw;
1450 struct net_device *sdev = priv;
1451 u16 vid, glort;
1452 int i;
1453
1454 if (!l2_accel)
1455 return;
1456
1457
1458 for (i = 0; i < l2_accel->size; i++) {
1459 if (l2_accel->macvlan[i] == sdev)
1460 break;
1461 }
1462
1463
1464 if (i == l2_accel->size)
1465 return;
1466
1467
1468 fm10k_mbx_lock(interface);
1469
1470 glort = l2_accel->dglort + 1 + i;
1471
1472 if (fm10k_host_mbx_ready(interface))
1473 hw->mac.ops.update_xcast_mode(hw, glort,
1474 FM10K_XCAST_MODE_NONE);
1475
1476 fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1477 hw->mac.default_vid, false);
1478
1479 for (vid = fm10k_find_next_vlan(interface, 0);
1480 vid < VLAN_N_VID;
1481 vid = fm10k_find_next_vlan(interface, vid))
1482 fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1483 vid, false);
1484
1485 fm10k_mbx_unlock(interface);
1486
1487
1488 l2_accel->macvlan[i] = NULL;
1489 l2_accel->count--;
1490
1491
1492 dglort.idx = fm10k_dglort_pf_rss;
1493 dglort.inner_rss = 1;
1494 dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
1495 dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
1496 dglort.glort = interface->glort;
1497 dglort.shared_l = fls(l2_accel->size);
1498 hw->mac.ops.configure_dglort_map(hw, &dglort);
1499
1500
1501 if (l2_accel->count == 0) {
1502 fm10k_assign_l2_accel(interface, NULL);
1503 kfree_rcu(l2_accel, rcu);
1504 }
1505 }
1506
1507 static netdev_features_t fm10k_features_check(struct sk_buff *skb,
1508 struct net_device *dev,
1509 netdev_features_t features)
1510 {
1511 if (!skb->encapsulation || fm10k_tx_encap_offload(skb))
1512 return features;
1513
1514 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1515 }
1516
1517 static const struct net_device_ops fm10k_netdev_ops = {
1518 .ndo_open = fm10k_open,
1519 .ndo_stop = fm10k_close,
1520 .ndo_validate_addr = eth_validate_addr,
1521 .ndo_start_xmit = fm10k_xmit_frame,
1522 .ndo_set_mac_address = fm10k_set_mac,
1523 .ndo_tx_timeout = fm10k_tx_timeout,
1524 .ndo_vlan_rx_add_vid = fm10k_vlan_rx_add_vid,
1525 .ndo_vlan_rx_kill_vid = fm10k_vlan_rx_kill_vid,
1526 .ndo_set_rx_mode = fm10k_set_rx_mode,
1527 .ndo_get_stats64 = fm10k_get_stats64,
1528 .ndo_setup_tc = __fm10k_setup_tc,
1529 .ndo_set_vf_mac = fm10k_ndo_set_vf_mac,
1530 .ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
1531 .ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
1532 .ndo_get_vf_config = fm10k_ndo_get_vf_config,
1533 .ndo_get_vf_stats = fm10k_ndo_get_vf_stats,
1534 .ndo_dfwd_add_station = fm10k_dfwd_add_station,
1535 .ndo_dfwd_del_station = fm10k_dfwd_del_station,
1536 .ndo_features_check = fm10k_features_check,
1537 };
1538
1539 #define DEFAULT_DEBUG_LEVEL_SHIFT 3
1540
1541 struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
1542 {
1543 netdev_features_t hw_features;
1544 struct fm10k_intfc *interface;
1545 struct net_device *dev;
1546
1547 dev = alloc_etherdev_mq(sizeof(struct fm10k_intfc), MAX_QUEUES);
1548 if (!dev)
1549 return NULL;
1550
1551
1552 dev->netdev_ops = &fm10k_netdev_ops;
1553 fm10k_set_ethtool_ops(dev);
1554
1555
1556 interface = netdev_priv(dev);
1557 interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
1558
1559
1560 dev->features |= NETIF_F_IP_CSUM |
1561 NETIF_F_IPV6_CSUM |
1562 NETIF_F_SG |
1563 NETIF_F_TSO |
1564 NETIF_F_TSO6 |
1565 NETIF_F_TSO_ECN |
1566 NETIF_F_RXHASH |
1567 NETIF_F_RXCSUM;
1568
1569
1570 if (info->mac == fm10k_mac_pf) {
1571 dev->hw_enc_features = NETIF_F_IP_CSUM |
1572 NETIF_F_TSO |
1573 NETIF_F_TSO6 |
1574 NETIF_F_TSO_ECN |
1575 NETIF_F_GSO_UDP_TUNNEL |
1576 NETIF_F_IPV6_CSUM |
1577 NETIF_F_SG;
1578
1579 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
1580
1581 dev->udp_tunnel_nic_info = &fm10k_udp_tunnels;
1582 }
1583
1584
1585 hw_features = dev->features;
1586
1587
1588 hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
1589
1590
1591 dev->vlan_features |= dev->features;
1592
1593
1594
1595
1596
1597 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1598 NETIF_F_HW_VLAN_CTAG_RX |
1599 NETIF_F_HW_VLAN_CTAG_FILTER;
1600
1601 dev->priv_flags |= IFF_UNICAST_FLT;
1602
1603 dev->hw_features |= hw_features;
1604
1605
1606 dev->min_mtu = ETH_MIN_MTU;
1607 dev->max_mtu = FM10K_MAX_JUMBO_FRAME_SIZE;
1608
1609 return dev;
1610 }