0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/bitfield.h>
0015 #include <linux/bpf.h>
0016 #include <linux/module.h>
0017 #include <linux/kernel.h>
0018 #include <linux/init.h>
0019 #include <linux/fs.h>
0020 #include <linux/netdevice.h>
0021 #include <linux/etherdevice.h>
0022 #include <linux/interrupt.h>
0023 #include <linux/ip.h>
0024 #include <linux/ipv6.h>
0025 #include <linux/mm.h>
0026 #include <linux/overflow.h>
0027 #include <linux/page_ref.h>
0028 #include <linux/pci.h>
0029 #include <linux/pci_regs.h>
0030 #include <linux/msi.h>
0031 #include <linux/ethtool.h>
0032 #include <linux/log2.h>
0033 #include <linux/if_vlan.h>
0034 #include <linux/if_bridge.h>
0035 #include <linux/random.h>
0036 #include <linux/vmalloc.h>
0037 #include <linux/ktime.h>
0038
0039 #include <net/tls.h>
0040 #include <net/vxlan.h>
0041 #include <net/xdp_sock_drv.h>
0042
0043 #include "nfpcore/nfp_dev.h"
0044 #include "nfpcore/nfp_nsp.h"
0045 #include "ccm.h"
0046 #include "nfp_app.h"
0047 #include "nfp_net_ctrl.h"
0048 #include "nfp_net.h"
0049 #include "nfp_net_dp.h"
0050 #include "nfp_net_sriov.h"
0051 #include "nfp_net_xsk.h"
0052 #include "nfp_port.h"
0053 #include "crypto/crypto.h"
0054 #include "crypto/fw.h"
0055
0056
0057
0058
0059
0060
0061 void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
0062 void __iomem *ctrl_bar)
0063 {
0064 u32 reg;
0065
0066 reg = readl(ctrl_bar + NFP_NET_CFG_VERSION);
0067 put_unaligned_le32(reg, fw_ver);
0068 }
0069
0070 u32 nfp_qcp_queue_offset(const struct nfp_dev_info *dev_info, u16 queue)
0071 {
0072 queue &= dev_info->qc_idx_mask;
0073 return dev_info->qc_addr_offset + NFP_QCP_QUEUE_ADDR_SZ * queue;
0074 }
0075
0076
0077
0078
0079
0080
0081
0082 static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
0083 {
0084 nn_writel(nn, NFP_NET_CFG_UPDATE, update);
0085
0086 nn_pci_flush(nn);
0087 nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
0088 nn->reconfig_in_progress_update = update;
0089 }
0090
0091
0092 static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
0093 {
0094 update |= nn->reconfig_posted;
0095 nn->reconfig_posted = 0;
0096
0097 nfp_net_reconfig_start(nn, update);
0098
0099 nn->reconfig_timer_active = true;
0100 mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
0101 }
0102
0103 static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
0104 {
0105 u32 reg;
0106
0107 reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
0108 if (reg == 0)
0109 return true;
0110 if (reg & NFP_NET_CFG_UPDATE_ERR) {
0111 nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
0112 reg, nn->reconfig_in_progress_update,
0113 nn_readl(nn, NFP_NET_CFG_CTRL));
0114 return true;
0115 } else if (last_check) {
0116 nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n",
0117 reg, nn->reconfig_in_progress_update,
0118 nn_readl(nn, NFP_NET_CFG_CTRL));
0119 return true;
0120 }
0121
0122 return false;
0123 }
0124
0125 static bool __nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
0126 {
0127 bool timed_out = false;
0128 int i;
0129
0130
0131
0132
0133 for (i = 0; i < 50; i++) {
0134 if (nfp_net_reconfig_check_done(nn, false))
0135 return false;
0136 udelay(4);
0137 }
0138
0139 while (!nfp_net_reconfig_check_done(nn, timed_out)) {
0140 usleep_range(250, 500);
0141 timed_out = time_is_before_eq_jiffies(deadline);
0142 }
0143
0144 return timed_out;
0145 }
0146
0147 static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
0148 {
0149 if (__nfp_net_reconfig_wait(nn, deadline))
0150 return -EIO;
0151
0152 if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
0153 return -EIO;
0154
0155 return 0;
0156 }
0157
0158 static void nfp_net_reconfig_timer(struct timer_list *t)
0159 {
0160 struct nfp_net *nn = from_timer(nn, t, reconfig_timer);
0161
0162 spin_lock_bh(&nn->reconfig_lock);
0163
0164 nn->reconfig_timer_active = false;
0165
0166
0167 if (nn->reconfig_sync_present)
0168 goto done;
0169
0170
0171 nfp_net_reconfig_check_done(nn, true);
0172
0173 if (nn->reconfig_posted)
0174 nfp_net_reconfig_start_async(nn, 0);
0175 done:
0176 spin_unlock_bh(&nn->reconfig_lock);
0177 }
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188 static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
0189 {
0190 spin_lock_bh(&nn->reconfig_lock);
0191
0192
0193 if (nn->reconfig_sync_present) {
0194 nn->reconfig_posted |= update;
0195 goto done;
0196 }
0197
0198
0199 if (!nn->reconfig_timer_active ||
0200 nfp_net_reconfig_check_done(nn, false))
0201 nfp_net_reconfig_start_async(nn, update);
0202 else
0203 nn->reconfig_posted |= update;
0204 done:
0205 spin_unlock_bh(&nn->reconfig_lock);
0206 }
0207
0208 static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
0209 {
0210 bool cancelled_timer = false;
0211 u32 pre_posted_requests;
0212
0213 spin_lock_bh(&nn->reconfig_lock);
0214
0215 WARN_ON(nn->reconfig_sync_present);
0216 nn->reconfig_sync_present = true;
0217
0218 if (nn->reconfig_timer_active) {
0219 nn->reconfig_timer_active = false;
0220 cancelled_timer = true;
0221 }
0222 pre_posted_requests = nn->reconfig_posted;
0223 nn->reconfig_posted = 0;
0224
0225 spin_unlock_bh(&nn->reconfig_lock);
0226
0227 if (cancelled_timer) {
0228 del_timer_sync(&nn->reconfig_timer);
0229 nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
0230 }
0231
0232
0233 if (pre_posted_requests) {
0234 nfp_net_reconfig_start(nn, pre_posted_requests);
0235 nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
0236 }
0237 }
0238
0239 static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
0240 {
0241 nfp_net_reconfig_sync_enter(nn);
0242
0243 spin_lock_bh(&nn->reconfig_lock);
0244 nn->reconfig_sync_present = false;
0245 spin_unlock_bh(&nn->reconfig_lock);
0246 }
0247
0248
0249
0250
0251
0252
0253
0254
0255
0256
0257
0258
0259 int __nfp_net_reconfig(struct nfp_net *nn, u32 update)
0260 {
0261 int ret;
0262
0263 nfp_net_reconfig_sync_enter(nn);
0264
0265 nfp_net_reconfig_start(nn, update);
0266 ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
0267
0268 spin_lock_bh(&nn->reconfig_lock);
0269
0270 if (nn->reconfig_posted)
0271 nfp_net_reconfig_start_async(nn, 0);
0272
0273 nn->reconfig_sync_present = false;
0274
0275 spin_unlock_bh(&nn->reconfig_lock);
0276
0277 return ret;
0278 }
0279
0280 int nfp_net_reconfig(struct nfp_net *nn, u32 update)
0281 {
0282 int ret;
0283
0284 nn_ctrl_bar_lock(nn);
0285 ret = __nfp_net_reconfig(nn, update);
0286 nn_ctrl_bar_unlock(nn);
0287
0288 return ret;
0289 }
0290
0291 int nfp_net_mbox_lock(struct nfp_net *nn, unsigned int data_size)
0292 {
0293 if (nn->tlv_caps.mbox_len < NFP_NET_CFG_MBOX_SIMPLE_VAL + data_size) {
0294 nn_err(nn, "mailbox too small for %u of data (%u)\n",
0295 data_size, nn->tlv_caps.mbox_len);
0296 return -EIO;
0297 }
0298
0299 nn_ctrl_bar_lock(nn);
0300 return 0;
0301 }
0302
0303
0304
0305
0306
0307
0308
0309
0310
0311
0312 int nfp_net_mbox_reconfig(struct nfp_net *nn, u32 mbox_cmd)
0313 {
0314 u32 mbox = nn->tlv_caps.mbox_off;
0315 int ret;
0316
0317 nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
0318
0319 ret = __nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MBOX);
0320 if (ret) {
0321 nn_err(nn, "Mailbox update error\n");
0322 return ret;
0323 }
0324
0325 return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
0326 }
0327
0328 void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 mbox_cmd)
0329 {
0330 u32 mbox = nn->tlv_caps.mbox_off;
0331
0332 nn_writeq(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_CMD, mbox_cmd);
0333
0334 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_MBOX);
0335 }
0336
0337 int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn)
0338 {
0339 u32 mbox = nn->tlv_caps.mbox_off;
0340
0341 nfp_net_reconfig_wait_posted(nn);
0342
0343 return -nn_readl(nn, mbox + NFP_NET_CFG_MBOX_SIMPLE_RET);
0344 }
0345
0346 int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd)
0347 {
0348 int ret;
0349
0350 ret = nfp_net_mbox_reconfig(nn, mbox_cmd);
0351 nn_ctrl_bar_unlock(nn);
0352 return ret;
0353 }
0354
0355
0356
0357
0358
0359
0360
0361
0362
0363
0364
0365
0366
0367 unsigned int
0368 nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
0369 unsigned int min_irqs, unsigned int wanted_irqs)
0370 {
0371 unsigned int i;
0372 int got_irqs;
0373
0374 for (i = 0; i < wanted_irqs; i++)
0375 irq_entries[i].entry = i;
0376
0377 got_irqs = pci_enable_msix_range(pdev, irq_entries,
0378 min_irqs, wanted_irqs);
0379 if (got_irqs < 0) {
0380 dev_err(&pdev->dev, "Failed to enable %d-%d MSI-X (err=%d)\n",
0381 min_irqs, wanted_irqs, got_irqs);
0382 return 0;
0383 }
0384
0385 if (got_irqs < wanted_irqs)
0386 dev_warn(&pdev->dev, "Unable to allocate %d IRQs got only %d\n",
0387 wanted_irqs, got_irqs);
0388
0389 return got_irqs;
0390 }
0391
0392
0393
0394
0395
0396
0397
0398
0399
0400
0401 void
0402 nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
0403 unsigned int n)
0404 {
0405 struct nfp_net_dp *dp = &nn->dp;
0406
0407 nn->max_r_vecs = n - NFP_NET_NON_Q_VECTORS;
0408 dp->num_r_vecs = nn->max_r_vecs;
0409
0410 memcpy(nn->irq_entries, irq_entries, sizeof(*irq_entries) * n);
0411
0412 if (dp->num_rx_rings > dp->num_r_vecs ||
0413 dp->num_tx_rings > dp->num_r_vecs)
0414 dev_warn(nn->dp.dev, "More rings (%d,%d) than vectors (%d).\n",
0415 dp->num_rx_rings, dp->num_tx_rings,
0416 dp->num_r_vecs);
0417
0418 dp->num_rx_rings = min(dp->num_r_vecs, dp->num_rx_rings);
0419 dp->num_tx_rings = min(dp->num_r_vecs, dp->num_tx_rings);
0420 dp->num_stack_tx_rings = dp->num_tx_rings;
0421 }
0422
0423
0424
0425
0426
0427
0428
0429 void nfp_net_irqs_disable(struct pci_dev *pdev)
0430 {
0431 pci_disable_msix(pdev);
0432 }
0433
0434
0435
0436
0437
0438
0439
0440
0441 static irqreturn_t nfp_net_irq_rxtx(int irq, void *data)
0442 {
0443 struct nfp_net_r_vector *r_vec = data;
0444
0445
0446
0447
0448
0449 r_vec->event_ctr++;
0450
0451 napi_schedule_irqoff(&r_vec->napi);
0452
0453
0454
0455
0456
0457 return IRQ_HANDLED;
0458 }
0459
0460 static irqreturn_t nfp_ctrl_irq_rxtx(int irq, void *data)
0461 {
0462 struct nfp_net_r_vector *r_vec = data;
0463
0464 tasklet_schedule(&r_vec->tasklet);
0465
0466 return IRQ_HANDLED;
0467 }
0468
0469
0470
0471
0472
0473 static void nfp_net_read_link_status(struct nfp_net *nn)
0474 {
0475 unsigned long flags;
0476 bool link_up;
0477 u32 sts;
0478
0479 spin_lock_irqsave(&nn->link_status_lock, flags);
0480
0481 sts = nn_readl(nn, NFP_NET_CFG_STS);
0482 link_up = !!(sts & NFP_NET_CFG_STS_LINK);
0483
0484 if (nn->link_up == link_up)
0485 goto out;
0486
0487 nn->link_up = link_up;
0488 if (nn->port)
0489 set_bit(NFP_PORT_CHANGED, &nn->port->flags);
0490
0491 if (nn->link_up) {
0492 netif_carrier_on(nn->dp.netdev);
0493 netdev_info(nn->dp.netdev, "NIC Link is Up\n");
0494 } else {
0495 netif_carrier_off(nn->dp.netdev);
0496 netdev_info(nn->dp.netdev, "NIC Link is Down\n");
0497 }
0498 out:
0499 spin_unlock_irqrestore(&nn->link_status_lock, flags);
0500 }
0501
0502
0503
0504
0505
0506
0507
0508
0509 static irqreturn_t nfp_net_irq_lsc(int irq, void *data)
0510 {
0511 struct nfp_net *nn = data;
0512 struct msix_entry *entry;
0513
0514 entry = &nn->irq_entries[NFP_NET_IRQ_LSC_IDX];
0515
0516 nfp_net_read_link_status(nn);
0517
0518 nfp_net_irq_unmask(nn, entry->entry);
0519
0520 return IRQ_HANDLED;
0521 }
0522
0523
0524
0525
0526
0527
0528
0529
0530 static irqreturn_t nfp_net_irq_exn(int irq, void *data)
0531 {
0532 struct nfp_net *nn = data;
0533
0534 nn_err(nn, "%s: UNIMPLEMENTED.\n", __func__);
0535
0536 return IRQ_HANDLED;
0537 }
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549 static int
0550 nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
0551 const char *format, char *name, size_t name_sz,
0552 unsigned int vector_idx, irq_handler_t handler)
0553 {
0554 struct msix_entry *entry;
0555 int err;
0556
0557 entry = &nn->irq_entries[vector_idx];
0558
0559 snprintf(name, name_sz, format, nfp_net_name(nn));
0560 err = request_irq(entry->vector, handler, 0, name, nn);
0561 if (err) {
0562 nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
0563 entry->vector, err);
0564 return err;
0565 }
0566 nn_writeb(nn, ctrl_offset, entry->entry);
0567 nfp_net_irq_unmask(nn, entry->entry);
0568
0569 return 0;
0570 }
0571
0572
0573
0574
0575
0576
0577
0578 static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
0579 unsigned int vector_idx)
0580 {
0581 nn_writeb(nn, ctrl_offset, 0xff);
0582 nn_pci_flush(nn);
0583 free_irq(nn->irq_entries[vector_idx].vector, nn);
0584 }
0585
0586 struct sk_buff *
0587 nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
0588 struct sk_buff *skb, u64 *tls_handle, int *nr_frags)
0589 {
0590 #ifdef CONFIG_TLS_DEVICE
0591 struct nfp_net_tls_offload_ctx *ntls;
0592 struct sk_buff *nskb;
0593 bool resync_pending;
0594 u32 datalen, seq;
0595
0596 if (likely(!dp->ktls_tx))
0597 return skb;
0598 if (!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk))
0599 return skb;
0600
0601 datalen = skb->len - skb_tcp_all_headers(skb);
0602 seq = ntohl(tcp_hdr(skb)->seq);
0603 ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
0604 resync_pending = tls_offload_tx_resync_pending(skb->sk);
0605 if (unlikely(resync_pending || ntls->next_seq != seq)) {
0606
0607 if (!datalen)
0608 return skb;
0609
0610 u64_stats_update_begin(&r_vec->tx_sync);
0611 r_vec->tls_tx_fallback++;
0612 u64_stats_update_end(&r_vec->tx_sync);
0613
0614 nskb = tls_encrypt_skb(skb);
0615 if (!nskb) {
0616 u64_stats_update_begin(&r_vec->tx_sync);
0617 r_vec->tls_tx_no_fallback++;
0618 u64_stats_update_end(&r_vec->tx_sync);
0619 return NULL;
0620 }
0621
0622 if (nskb == skb)
0623 return skb;
0624
0625 if (unlikely(skb_is_nonlinear(nskb))) {
0626 nn_dp_warn(dp, "tls_encrypt_skb() produced fragmented frame\n");
0627 u64_stats_update_begin(&r_vec->tx_sync);
0628 r_vec->tx_errors++;
0629 u64_stats_update_end(&r_vec->tx_sync);
0630 dev_kfree_skb_any(nskb);
0631 return NULL;
0632 }
0633
0634
0635 if (!resync_pending && seq - ntls->next_seq < U32_MAX / 4)
0636 tls_offload_tx_resync_request(nskb->sk, seq,
0637 ntls->next_seq);
0638
0639 *nr_frags = 0;
0640 return nskb;
0641 }
0642
0643 if (datalen) {
0644 u64_stats_update_begin(&r_vec->tx_sync);
0645 if (!skb_is_gso(skb))
0646 r_vec->hw_tls_tx++;
0647 else
0648 r_vec->hw_tls_tx += skb_shinfo(skb)->gso_segs;
0649 u64_stats_update_end(&r_vec->tx_sync);
0650 }
0651
0652 memcpy(tls_handle, ntls->fw_handle, sizeof(ntls->fw_handle));
0653 ntls->next_seq += datalen;
0654 #endif
0655 return skb;
0656 }
0657
0658 void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle)
0659 {
0660 #ifdef CONFIG_TLS_DEVICE
0661 struct nfp_net_tls_offload_ctx *ntls;
0662 u32 datalen, seq;
0663
0664 if (!tls_handle)
0665 return;
0666 if (WARN_ON_ONCE(!skb->sk || !tls_is_sk_tx_device_offloaded(skb->sk)))
0667 return;
0668
0669 datalen = skb->len - skb_tcp_all_headers(skb);
0670 seq = ntohl(tcp_hdr(skb)->seq);
0671
0672 ntls = tls_driver_ctx(skb->sk, TLS_OFFLOAD_CTX_DIR_TX);
0673 if (ntls->next_seq == seq + datalen)
0674 ntls->next_seq = seq;
0675 else
0676 WARN_ON_ONCE(1);
0677 #endif
0678 }
0679
0680 static void nfp_net_tx_timeout(struct net_device *netdev, unsigned int txqueue)
0681 {
0682 struct nfp_net *nn = netdev_priv(netdev);
0683
0684 nn_warn(nn, "TX watchdog timeout on ring: %u\n", txqueue);
0685 }
0686
0687
0688 static unsigned int
0689 nfp_net_calc_fl_bufsz_data(struct nfp_net_dp *dp)
0690 {
0691 unsigned int fl_bufsz = 0;
0692
0693 if (dp->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
0694 fl_bufsz += NFP_NET_MAX_PREPEND;
0695 else
0696 fl_bufsz += dp->rx_offset;
0697 fl_bufsz += ETH_HLEN + VLAN_HLEN * 2 + dp->mtu;
0698
0699 return fl_bufsz;
0700 }
0701
0702 static unsigned int nfp_net_calc_fl_bufsz(struct nfp_net_dp *dp)
0703 {
0704 unsigned int fl_bufsz;
0705
0706 fl_bufsz = NFP_NET_RX_BUF_HEADROOM;
0707 fl_bufsz += dp->rx_dma_off;
0708 fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
0709
0710 fl_bufsz = SKB_DATA_ALIGN(fl_bufsz);
0711 fl_bufsz += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
0712
0713 return fl_bufsz;
0714 }
0715
0716 static unsigned int nfp_net_calc_fl_bufsz_xsk(struct nfp_net_dp *dp)
0717 {
0718 unsigned int fl_bufsz;
0719
0720 fl_bufsz = XDP_PACKET_HEADROOM;
0721 fl_bufsz += nfp_net_calc_fl_bufsz_data(dp);
0722
0723 return fl_bufsz;
0724 }
0725
0726
0727
0728
0729
0730
0731
0732
0733 static void nfp_net_vecs_init(struct nfp_net *nn)
0734 {
0735 struct nfp_net_r_vector *r_vec;
0736 int r;
0737
0738 nn->lsc_handler = nfp_net_irq_lsc;
0739 nn->exn_handler = nfp_net_irq_exn;
0740
0741 for (r = 0; r < nn->max_r_vecs; r++) {
0742 struct msix_entry *entry;
0743
0744 entry = &nn->irq_entries[NFP_NET_NON_Q_VECTORS + r];
0745
0746 r_vec = &nn->r_vecs[r];
0747 r_vec->nfp_net = nn;
0748 r_vec->irq_entry = entry->entry;
0749 r_vec->irq_vector = entry->vector;
0750
0751 if (nn->dp.netdev) {
0752 r_vec->handler = nfp_net_irq_rxtx;
0753 } else {
0754 r_vec->handler = nfp_ctrl_irq_rxtx;
0755
0756 __skb_queue_head_init(&r_vec->queue);
0757 spin_lock_init(&r_vec->lock);
0758 tasklet_setup(&r_vec->tasklet, nn->dp.ops->ctrl_poll);
0759 tasklet_disable(&r_vec->tasklet);
0760 }
0761
0762 cpumask_set_cpu(r, &r_vec->affinity_mask);
0763 }
0764 }
0765
0766 static void
0767 nfp_net_napi_add(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec, int idx)
0768 {
0769 if (dp->netdev)
0770 netif_napi_add(dp->netdev, &r_vec->napi,
0771 nfp_net_has_xsk_pool_slow(dp, idx) ?
0772 dp->ops->xsk_poll : dp->ops->poll,
0773 NAPI_POLL_WEIGHT);
0774 else
0775 tasklet_enable(&r_vec->tasklet);
0776 }
0777
0778 static void
0779 nfp_net_napi_del(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec)
0780 {
0781 if (dp->netdev)
0782 netif_napi_del(&r_vec->napi);
0783 else
0784 tasklet_disable(&r_vec->tasklet);
0785 }
0786
0787 static void
0788 nfp_net_vector_assign_rings(struct nfp_net_dp *dp,
0789 struct nfp_net_r_vector *r_vec, int idx)
0790 {
0791 r_vec->rx_ring = idx < dp->num_rx_rings ? &dp->rx_rings[idx] : NULL;
0792 r_vec->tx_ring =
0793 idx < dp->num_stack_tx_rings ? &dp->tx_rings[idx] : NULL;
0794
0795 r_vec->xdp_ring = idx < dp->num_tx_rings - dp->num_stack_tx_rings ?
0796 &dp->tx_rings[dp->num_stack_tx_rings + idx] : NULL;
0797
0798 if (nfp_net_has_xsk_pool_slow(dp, idx) || r_vec->xsk_pool) {
0799 r_vec->xsk_pool = dp->xdp_prog ? dp->xsk_pools[idx] : NULL;
0800
0801 if (r_vec->xsk_pool)
0802 xsk_pool_set_rxq_info(r_vec->xsk_pool,
0803 &r_vec->rx_ring->xdp_rxq);
0804
0805 nfp_net_napi_del(dp, r_vec);
0806 nfp_net_napi_add(dp, r_vec, idx);
0807 }
0808 }
0809
0810 static int
0811 nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
0812 int idx)
0813 {
0814 int err;
0815
0816 nfp_net_napi_add(&nn->dp, r_vec, idx);
0817
0818 snprintf(r_vec->name, sizeof(r_vec->name),
0819 "%s-rxtx-%d", nfp_net_name(nn), idx);
0820 err = request_irq(r_vec->irq_vector, r_vec->handler, 0, r_vec->name,
0821 r_vec);
0822 if (err) {
0823 nfp_net_napi_del(&nn->dp, r_vec);
0824 nn_err(nn, "Error requesting IRQ %d\n", r_vec->irq_vector);
0825 return err;
0826 }
0827 disable_irq(r_vec->irq_vector);
0828
0829 irq_set_affinity_hint(r_vec->irq_vector, &r_vec->affinity_mask);
0830
0831 nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, r_vec->irq_vector,
0832 r_vec->irq_entry);
0833
0834 return 0;
0835 }
0836
0837 static void
0838 nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
0839 {
0840 irq_set_affinity_hint(r_vec->irq_vector, NULL);
0841 nfp_net_napi_del(&nn->dp, r_vec);
0842 free_irq(r_vec->irq_vector, r_vec);
0843 }
0844
0845
0846
0847
0848
0849 void nfp_net_rss_write_itbl(struct nfp_net *nn)
0850 {
0851 int i;
0852
0853 for (i = 0; i < NFP_NET_CFG_RSS_ITBL_SZ; i += 4)
0854 nn_writel(nn, NFP_NET_CFG_RSS_ITBL + i,
0855 get_unaligned_le32(nn->rss_itbl + i));
0856 }
0857
0858
0859
0860
0861
0862 void nfp_net_rss_write_key(struct nfp_net *nn)
0863 {
0864 int i;
0865
0866 for (i = 0; i < nfp_net_rss_key_sz(nn); i += 4)
0867 nn_writel(nn, NFP_NET_CFG_RSS_KEY + i,
0868 get_unaligned_le32(nn->rss_key + i));
0869 }
0870
0871
0872
0873
0874
0875 void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
0876 {
0877 u8 i;
0878 u32 factor;
0879 u32 value;
0880
0881
0882
0883
0884
0885 factor = nn->tlv_caps.me_freq_mhz / 16;
0886
0887
0888 value = (nn->rx_coalesce_max_frames << 16) |
0889 (factor * nn->rx_coalesce_usecs);
0890 for (i = 0; i < nn->dp.num_rx_rings; i++)
0891 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(i), value);
0892
0893
0894 value = (nn->tx_coalesce_max_frames << 16) |
0895 (factor * nn->tx_coalesce_usecs);
0896 for (i = 0; i < nn->dp.num_tx_rings; i++)
0897 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(i), value);
0898 }
0899
0900
0901
0902
0903
0904
0905
0906
0907
0908
0909 static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *addr)
0910 {
0911 nn_writel(nn, NFP_NET_CFG_MACADDR + 0, get_unaligned_be32(addr));
0912 nn_writew(nn, NFP_NET_CFG_MACADDR + 6, get_unaligned_be16(addr + 4));
0913 }
0914
0915
0916
0917
0918
0919
0920
0921 static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
0922 {
0923 u32 new_ctrl, update;
0924 unsigned int r;
0925 int err;
0926
0927 new_ctrl = nn->dp.ctrl;
0928 new_ctrl &= ~NFP_NET_CFG_CTRL_ENABLE;
0929 update = NFP_NET_CFG_UPDATE_GEN;
0930 update |= NFP_NET_CFG_UPDATE_MSIX;
0931 update |= NFP_NET_CFG_UPDATE_RING;
0932
0933 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
0934 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
0935
0936 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
0937 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
0938
0939 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
0940 err = nfp_net_reconfig(nn, update);
0941 if (err)
0942 nn_err(nn, "Could not disable device: %d\n", err);
0943
0944 for (r = 0; r < nn->dp.num_rx_rings; r++) {
0945 nfp_net_rx_ring_reset(&nn->dp.rx_rings[r]);
0946 if (nfp_net_has_xsk_pool_slow(&nn->dp, nn->dp.rx_rings[r].idx))
0947 nfp_net_xsk_rx_bufs_free(&nn->dp.rx_rings[r]);
0948 }
0949 for (r = 0; r < nn->dp.num_tx_rings; r++)
0950 nfp_net_tx_ring_reset(&nn->dp, &nn->dp.tx_rings[r]);
0951 for (r = 0; r < nn->dp.num_r_vecs; r++)
0952 nfp_net_vec_clear_ring_data(nn, r);
0953
0954 nn->dp.ctrl = new_ctrl;
0955 }
0956
0957
0958
0959
0960
0961 static int nfp_net_set_config_and_enable(struct nfp_net *nn)
0962 {
0963 u32 bufsz, new_ctrl, update = 0;
0964 unsigned int r;
0965 int err;
0966
0967 new_ctrl = nn->dp.ctrl;
0968
0969 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_RSS_ANY) {
0970 nfp_net_rss_write_key(nn);
0971 nfp_net_rss_write_itbl(nn);
0972 nn_writel(nn, NFP_NET_CFG_RSS_CTRL, nn->rss_cfg);
0973 update |= NFP_NET_CFG_UPDATE_RSS;
0974 }
0975
0976 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_IRQMOD) {
0977 nfp_net_coalesce_write_cfg(nn);
0978 update |= NFP_NET_CFG_UPDATE_IRQMOD;
0979 }
0980
0981 for (r = 0; r < nn->dp.num_tx_rings; r++)
0982 nfp_net_tx_ring_hw_cfg_write(nn, &nn->dp.tx_rings[r], r);
0983 for (r = 0; r < nn->dp.num_rx_rings; r++)
0984 nfp_net_rx_ring_hw_cfg_write(nn, &nn->dp.rx_rings[r], r);
0985
0986 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE,
0987 U64_MAX >> (64 - nn->dp.num_tx_rings));
0988
0989 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE,
0990 U64_MAX >> (64 - nn->dp.num_rx_rings));
0991
0992 if (nn->dp.netdev)
0993 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
0994
0995 nn_writel(nn, NFP_NET_CFG_MTU, nn->dp.mtu);
0996
0997 bufsz = nn->dp.fl_bufsz - nn->dp.rx_dma_off - NFP_NET_RX_BUF_NON_DATA;
0998 nn_writel(nn, NFP_NET_CFG_FLBUFSZ, bufsz);
0999
1000
1001 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
1002 update |= NFP_NET_CFG_UPDATE_GEN;
1003 update |= NFP_NET_CFG_UPDATE_MSIX;
1004 update |= NFP_NET_CFG_UPDATE_RING;
1005 if (nn->cap & NFP_NET_CFG_CTRL_RINGCFG)
1006 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
1007
1008 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1009 err = nfp_net_reconfig(nn, update);
1010 if (err) {
1011 nfp_net_clear_config_and_disable(nn);
1012 return err;
1013 }
1014
1015 nn->dp.ctrl = new_ctrl;
1016
1017 for (r = 0; r < nn->dp.num_rx_rings; r++)
1018 nfp_net_rx_ring_fill_freelist(&nn->dp, &nn->dp.rx_rings[r]);
1019
1020 return 0;
1021 }
1022
1023
1024
1025
1026
1027 static void nfp_net_close_stack(struct nfp_net *nn)
1028 {
1029 struct nfp_net_r_vector *r_vec;
1030 unsigned int r;
1031
1032 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
1033 netif_carrier_off(nn->dp.netdev);
1034 nn->link_up = false;
1035
1036 for (r = 0; r < nn->dp.num_r_vecs; r++) {
1037 r_vec = &nn->r_vecs[r];
1038
1039 disable_irq(r_vec->irq_vector);
1040 napi_disable(&r_vec->napi);
1041
1042 if (r_vec->rx_ring)
1043 cancel_work_sync(&r_vec->rx_dim.work);
1044
1045 if (r_vec->tx_ring)
1046 cancel_work_sync(&r_vec->tx_dim.work);
1047 }
1048
1049 netif_tx_disable(nn->dp.netdev);
1050 }
1051
1052
1053
1054
1055
1056 static void nfp_net_close_free_all(struct nfp_net *nn)
1057 {
1058 unsigned int r;
1059
1060 nfp_net_tx_rings_free(&nn->dp);
1061 nfp_net_rx_rings_free(&nn->dp);
1062
1063 for (r = 0; r < nn->dp.num_r_vecs; r++)
1064 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1065
1066 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1067 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1068 }
1069
1070
1071
1072
1073
1074 static int nfp_net_netdev_close(struct net_device *netdev)
1075 {
1076 struct nfp_net *nn = netdev_priv(netdev);
1077
1078
1079
1080 nfp_net_close_stack(nn);
1081
1082
1083
1084 nfp_net_clear_config_and_disable(nn);
1085 nfp_port_configure(netdev, false);
1086
1087
1088
1089 nfp_net_close_free_all(nn);
1090
1091 nn_dbg(nn, "%s down", netdev->name);
1092 return 0;
1093 }
1094
1095 void nfp_ctrl_close(struct nfp_net *nn)
1096 {
1097 int r;
1098
1099 rtnl_lock();
1100
1101 for (r = 0; r < nn->dp.num_r_vecs; r++) {
1102 disable_irq(nn->r_vecs[r].irq_vector);
1103 tasklet_disable(&nn->r_vecs[r].tasklet);
1104 }
1105
1106 nfp_net_clear_config_and_disable(nn);
1107
1108 nfp_net_close_free_all(nn);
1109
1110 rtnl_unlock();
1111 }
1112
1113 static void nfp_net_rx_dim_work(struct work_struct *work)
1114 {
1115 struct nfp_net_r_vector *r_vec;
1116 unsigned int factor, value;
1117 struct dim_cq_moder moder;
1118 struct nfp_net *nn;
1119 struct dim *dim;
1120
1121 dim = container_of(work, struct dim, work);
1122 moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1123 r_vec = container_of(dim, struct nfp_net_r_vector, rx_dim);
1124 nn = r_vec->nfp_net;
1125
1126
1127
1128
1129
1130 factor = nn->tlv_caps.me_freq_mhz / 16;
1131 if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
1132 return;
1133
1134
1135 value = (moder.pkts << 16) | (factor * moder.usec);
1136 nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value);
1137 (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
1138
1139 dim->state = DIM_START_MEASURE;
1140 }
1141
1142 static void nfp_net_tx_dim_work(struct work_struct *work)
1143 {
1144 struct nfp_net_r_vector *r_vec;
1145 unsigned int factor, value;
1146 struct dim_cq_moder moder;
1147 struct nfp_net *nn;
1148 struct dim *dim;
1149
1150 dim = container_of(work, struct dim, work);
1151 moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
1152 r_vec = container_of(dim, struct nfp_net_r_vector, tx_dim);
1153 nn = r_vec->nfp_net;
1154
1155
1156
1157
1158
1159 factor = nn->tlv_caps.me_freq_mhz / 16;
1160 if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts))
1161 return;
1162
1163
1164 value = (moder.pkts << 16) | (factor * moder.usec);
1165 nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value);
1166 (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
1167
1168 dim->state = DIM_START_MEASURE;
1169 }
1170
1171
1172
1173
1174
1175 static void nfp_net_open_stack(struct nfp_net *nn)
1176 {
1177 struct nfp_net_r_vector *r_vec;
1178 unsigned int r;
1179
1180 for (r = 0; r < nn->dp.num_r_vecs; r++) {
1181 r_vec = &nn->r_vecs[r];
1182
1183 if (r_vec->rx_ring) {
1184 INIT_WORK(&r_vec->rx_dim.work, nfp_net_rx_dim_work);
1185 r_vec->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1186 }
1187
1188 if (r_vec->tx_ring) {
1189 INIT_WORK(&r_vec->tx_dim.work, nfp_net_tx_dim_work);
1190 r_vec->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
1191 }
1192
1193 napi_enable(&r_vec->napi);
1194 enable_irq(r_vec->irq_vector);
1195 }
1196
1197 netif_tx_wake_all_queues(nn->dp.netdev);
1198
1199 enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
1200 nfp_net_read_link_status(nn);
1201 }
1202
1203 static int nfp_net_open_alloc_all(struct nfp_net *nn)
1204 {
1205 int err, r;
1206
1207 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
1208 nn->exn_name, sizeof(nn->exn_name),
1209 NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
1210 if (err)
1211 return err;
1212 err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
1213 nn->lsc_name, sizeof(nn->lsc_name),
1214 NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
1215 if (err)
1216 goto err_free_exn;
1217 disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
1218
1219 for (r = 0; r < nn->dp.num_r_vecs; r++) {
1220 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
1221 if (err)
1222 goto err_cleanup_vec_p;
1223 }
1224
1225 err = nfp_net_rx_rings_prepare(nn, &nn->dp);
1226 if (err)
1227 goto err_cleanup_vec;
1228
1229 err = nfp_net_tx_rings_prepare(nn, &nn->dp);
1230 if (err)
1231 goto err_free_rx_rings;
1232
1233 for (r = 0; r < nn->max_r_vecs; r++)
1234 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
1235
1236 return 0;
1237
1238 err_free_rx_rings:
1239 nfp_net_rx_rings_free(&nn->dp);
1240 err_cleanup_vec:
1241 r = nn->dp.num_r_vecs;
1242 err_cleanup_vec_p:
1243 while (r--)
1244 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1245 nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
1246 err_free_exn:
1247 nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
1248 return err;
1249 }
1250
1251 static int nfp_net_netdev_open(struct net_device *netdev)
1252 {
1253 struct nfp_net *nn = netdev_priv(netdev);
1254 int err;
1255
1256
1257
1258
1259
1260
1261 err = nfp_net_open_alloc_all(nn);
1262 if (err)
1263 return err;
1264
1265 err = netif_set_real_num_tx_queues(netdev, nn->dp.num_stack_tx_rings);
1266 if (err)
1267 goto err_free_all;
1268
1269 err = netif_set_real_num_rx_queues(netdev, nn->dp.num_rx_rings);
1270 if (err)
1271 goto err_free_all;
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281 err = nfp_port_configure(netdev, true);
1282 if (err)
1283 goto err_free_all;
1284
1285 err = nfp_net_set_config_and_enable(nn);
1286 if (err)
1287 goto err_port_disable;
1288
1289
1290
1291
1292
1293
1294
1295 nfp_net_open_stack(nn);
1296
1297 return 0;
1298
1299 err_port_disable:
1300 nfp_port_configure(netdev, false);
1301 err_free_all:
1302 nfp_net_close_free_all(nn);
1303 return err;
1304 }
1305
1306 int nfp_ctrl_open(struct nfp_net *nn)
1307 {
1308 int err, r;
1309
1310
1311 rtnl_lock();
1312
1313 err = nfp_net_open_alloc_all(nn);
1314 if (err)
1315 goto err_unlock;
1316
1317 err = nfp_net_set_config_and_enable(nn);
1318 if (err)
1319 goto err_free_all;
1320
1321 for (r = 0; r < nn->dp.num_r_vecs; r++)
1322 enable_irq(nn->r_vecs[r].irq_vector);
1323
1324 rtnl_unlock();
1325
1326 return 0;
1327
1328 err_free_all:
1329 nfp_net_close_free_all(nn);
1330 err_unlock:
1331 rtnl_unlock();
1332 return err;
1333 }
1334
1335 static void nfp_net_set_rx_mode(struct net_device *netdev)
1336 {
1337 struct nfp_net *nn = netdev_priv(netdev);
1338 u32 new_ctrl;
1339
1340 new_ctrl = nn->dp.ctrl;
1341
1342 if (!netdev_mc_empty(netdev) || netdev->flags & IFF_ALLMULTI)
1343 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_L2MC;
1344 else
1345 new_ctrl &= ~NFP_NET_CFG_CTRL_L2MC;
1346
1347 if (netdev->flags & IFF_PROMISC) {
1348 if (nn->cap & NFP_NET_CFG_CTRL_PROMISC)
1349 new_ctrl |= NFP_NET_CFG_CTRL_PROMISC;
1350 else
1351 nn_warn(nn, "FW does not support promiscuous mode\n");
1352 } else {
1353 new_ctrl &= ~NFP_NET_CFG_CTRL_PROMISC;
1354 }
1355
1356 if (new_ctrl == nn->dp.ctrl)
1357 return;
1358
1359 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1360 nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
1361
1362 nn->dp.ctrl = new_ctrl;
1363 }
1364
1365 static void nfp_net_rss_init_itbl(struct nfp_net *nn)
1366 {
1367 int i;
1368
1369 for (i = 0; i < sizeof(nn->rss_itbl); i++)
1370 nn->rss_itbl[i] =
1371 ethtool_rxfh_indir_default(i, nn->dp.num_rx_rings);
1372 }
1373
1374 static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
1375 {
1376 struct nfp_net_dp new_dp = *dp;
1377
1378 *dp = nn->dp;
1379 nn->dp = new_dp;
1380
1381 nn->dp.netdev->mtu = new_dp.mtu;
1382
1383 if (!netif_is_rxfh_configured(nn->dp.netdev))
1384 nfp_net_rss_init_itbl(nn);
1385 }
1386
1387 static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp)
1388 {
1389 unsigned int r;
1390 int err;
1391
1392 nfp_net_dp_swap(nn, dp);
1393
1394 for (r = 0; r < nn->max_r_vecs; r++)
1395 nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r);
1396
1397 err = netif_set_real_num_queues(nn->dp.netdev,
1398 nn->dp.num_stack_tx_rings,
1399 nn->dp.num_rx_rings);
1400 if (err)
1401 return err;
1402
1403 return nfp_net_set_config_and_enable(nn);
1404 }
1405
1406 struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn)
1407 {
1408 struct nfp_net_dp *new;
1409
1410 new = kmalloc(sizeof(*new), GFP_KERNEL);
1411 if (!new)
1412 return NULL;
1413
1414 *new = nn->dp;
1415
1416 new->xsk_pools = kmemdup(new->xsk_pools,
1417 array_size(nn->max_r_vecs,
1418 sizeof(new->xsk_pools)),
1419 GFP_KERNEL);
1420 if (!new->xsk_pools) {
1421 kfree(new);
1422 return NULL;
1423 }
1424
1425
1426 new->fl_bufsz = 0;
1427 new->tx_rings = NULL;
1428 new->rx_rings = NULL;
1429 new->num_r_vecs = 0;
1430 new->num_stack_tx_rings = 0;
1431 new->txrwb = NULL;
1432 new->txrwb_dma = 0;
1433
1434 return new;
1435 }
1436
1437 static void nfp_net_free_dp(struct nfp_net_dp *dp)
1438 {
1439 kfree(dp->xsk_pools);
1440 kfree(dp);
1441 }
1442
1443 static int
1444 nfp_net_check_config(struct nfp_net *nn, struct nfp_net_dp *dp,
1445 struct netlink_ext_ack *extack)
1446 {
1447 unsigned int r, xsk_min_fl_bufsz;
1448
1449
1450 if (!dp->xdp_prog)
1451 return 0;
1452 if (dp->fl_bufsz > PAGE_SIZE) {
1453 NL_SET_ERR_MSG_MOD(extack, "MTU too large w/ XDP enabled");
1454 return -EINVAL;
1455 }
1456 if (dp->num_tx_rings > nn->max_tx_rings) {
1457 NL_SET_ERR_MSG_MOD(extack, "Insufficient number of TX rings w/ XDP enabled");
1458 return -EINVAL;
1459 }
1460
1461 xsk_min_fl_bufsz = nfp_net_calc_fl_bufsz_xsk(dp);
1462 for (r = 0; r < nn->max_r_vecs; r++) {
1463 if (!dp->xsk_pools[r])
1464 continue;
1465
1466 if (xsk_pool_get_rx_frame_size(dp->xsk_pools[r]) < xsk_min_fl_bufsz) {
1467 NL_SET_ERR_MSG_MOD(extack,
1468 "XSK buffer pool chunk size too small");
1469 return -EINVAL;
1470 }
1471 }
1472
1473 return 0;
1474 }
1475
1476 int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *dp,
1477 struct netlink_ext_ack *extack)
1478 {
1479 int r, err;
1480
1481 dp->fl_bufsz = nfp_net_calc_fl_bufsz(dp);
1482
1483 dp->num_stack_tx_rings = dp->num_tx_rings;
1484 if (dp->xdp_prog)
1485 dp->num_stack_tx_rings -= dp->num_rx_rings;
1486
1487 dp->num_r_vecs = max(dp->num_rx_rings, dp->num_stack_tx_rings);
1488
1489 err = nfp_net_check_config(nn, dp, extack);
1490 if (err)
1491 goto exit_free_dp;
1492
1493 if (!netif_running(dp->netdev)) {
1494 nfp_net_dp_swap(nn, dp);
1495 err = 0;
1496 goto exit_free_dp;
1497 }
1498
1499
1500 for (r = nn->dp.num_r_vecs; r < dp->num_r_vecs; r++) {
1501 err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
1502 if (err) {
1503 dp->num_r_vecs = r;
1504 goto err_cleanup_vecs;
1505 }
1506 }
1507
1508 err = nfp_net_rx_rings_prepare(nn, dp);
1509 if (err)
1510 goto err_cleanup_vecs;
1511
1512 err = nfp_net_tx_rings_prepare(nn, dp);
1513 if (err)
1514 goto err_free_rx;
1515
1516
1517 nfp_net_close_stack(nn);
1518 nfp_net_clear_config_and_disable(nn);
1519
1520 err = nfp_net_dp_swap_enable(nn, dp);
1521 if (err) {
1522 int err2;
1523
1524 nfp_net_clear_config_and_disable(nn);
1525
1526
1527 err2 = nfp_net_dp_swap_enable(nn, dp);
1528 if (err2)
1529 nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
1530 err, err2);
1531 }
1532 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
1533 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1534
1535 nfp_net_rx_rings_free(dp);
1536 nfp_net_tx_rings_free(dp);
1537
1538 nfp_net_open_stack(nn);
1539 exit_free_dp:
1540 nfp_net_free_dp(dp);
1541
1542 return err;
1543
1544 err_free_rx:
1545 nfp_net_rx_rings_free(dp);
1546 err_cleanup_vecs:
1547 for (r = dp->num_r_vecs - 1; r >= nn->dp.num_r_vecs; r--)
1548 nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
1549 nfp_net_free_dp(dp);
1550 return err;
1551 }
1552
1553 static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
1554 {
1555 struct nfp_net *nn = netdev_priv(netdev);
1556 struct nfp_net_dp *dp;
1557 int err;
1558
1559 err = nfp_app_check_mtu(nn->app, netdev, new_mtu);
1560 if (err)
1561 return err;
1562
1563 dp = nfp_net_clone_dp(nn);
1564 if (!dp)
1565 return -ENOMEM;
1566
1567 dp->mtu = new_mtu;
1568
1569 return nfp_net_ring_reconfig(nn, dp, NULL);
1570 }
1571
1572 static int
1573 nfp_net_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1574 {
1575 const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD;
1576 struct nfp_net *nn = netdev_priv(netdev);
1577 int err;
1578
1579
1580
1581
1582 if (!vid)
1583 return 0;
1584
1585 err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
1586 if (err)
1587 return err;
1588
1589 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
1590 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
1591 ETH_P_8021Q);
1592
1593 return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
1594 }
1595
1596 static int
1597 nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
1598 {
1599 const u32 cmd = NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL;
1600 struct nfp_net *nn = netdev_priv(netdev);
1601 int err;
1602
1603
1604
1605
1606 if (!vid)
1607 return 0;
1608
1609 err = nfp_net_mbox_lock(nn, NFP_NET_CFG_VLAN_FILTER_SZ);
1610 if (err)
1611 return err;
1612
1613 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_VID, vid);
1614 nn_writew(nn, nn->tlv_caps.mbox_off + NFP_NET_CFG_VLAN_FILTER_PROTO,
1615 ETH_P_8021Q);
1616
1617 return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
1618 }
1619
1620 static void nfp_net_stat64(struct net_device *netdev,
1621 struct rtnl_link_stats64 *stats)
1622 {
1623 struct nfp_net *nn = netdev_priv(netdev);
1624 int r;
1625
1626
1627 for (r = 0; r < nn->max_r_vecs; r++) {
1628 struct nfp_net_r_vector *r_vec = &nn->r_vecs[r];
1629 u64 data[3];
1630 unsigned int start;
1631
1632 do {
1633 start = u64_stats_fetch_begin_irq(&r_vec->rx_sync);
1634 data[0] = r_vec->rx_pkts;
1635 data[1] = r_vec->rx_bytes;
1636 data[2] = r_vec->rx_drops;
1637 } while (u64_stats_fetch_retry_irq(&r_vec->rx_sync, start));
1638 stats->rx_packets += data[0];
1639 stats->rx_bytes += data[1];
1640 stats->rx_dropped += data[2];
1641
1642 do {
1643 start = u64_stats_fetch_begin_irq(&r_vec->tx_sync);
1644 data[0] = r_vec->tx_pkts;
1645 data[1] = r_vec->tx_bytes;
1646 data[2] = r_vec->tx_errors;
1647 } while (u64_stats_fetch_retry_irq(&r_vec->tx_sync, start));
1648 stats->tx_packets += data[0];
1649 stats->tx_bytes += data[1];
1650 stats->tx_errors += data[2];
1651 }
1652
1653
1654 stats->multicast += nn_readq(nn, NFP_NET_CFG_STATS_RX_MC_FRAMES);
1655 stats->rx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_RX_DISCARDS);
1656 stats->rx_errors += nn_readq(nn, NFP_NET_CFG_STATS_RX_ERRORS);
1657
1658 stats->tx_dropped += nn_readq(nn, NFP_NET_CFG_STATS_TX_DISCARDS);
1659 stats->tx_errors += nn_readq(nn, NFP_NET_CFG_STATS_TX_ERRORS);
1660 }
1661
1662 static int nfp_net_set_features(struct net_device *netdev,
1663 netdev_features_t features)
1664 {
1665 netdev_features_t changed = netdev->features ^ features;
1666 struct nfp_net *nn = netdev_priv(netdev);
1667 u32 new_ctrl;
1668 int err;
1669
1670
1671
1672 new_ctrl = nn->dp.ctrl;
1673
1674 if (changed & NETIF_F_RXCSUM) {
1675 if (features & NETIF_F_RXCSUM)
1676 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
1677 else
1678 new_ctrl &= ~NFP_NET_CFG_CTRL_RXCSUM_ANY;
1679 }
1680
1681 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1682 if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
1683 new_ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
1684 else
1685 new_ctrl &= ~NFP_NET_CFG_CTRL_TXCSUM;
1686 }
1687
1688 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1689 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1690 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
1691 NFP_NET_CFG_CTRL_LSO;
1692 else
1693 new_ctrl &= ~NFP_NET_CFG_CTRL_LSO_ANY;
1694 }
1695
1696 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
1697 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1698 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
1699 NFP_NET_CFG_CTRL_RXVLAN;
1700 else
1701 new_ctrl &= ~NFP_NET_CFG_CTRL_RXVLAN_ANY;
1702 }
1703
1704 if (changed & NETIF_F_HW_VLAN_CTAG_TX) {
1705 if (features & NETIF_F_HW_VLAN_CTAG_TX)
1706 new_ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
1707 NFP_NET_CFG_CTRL_TXVLAN;
1708 else
1709 new_ctrl &= ~NFP_NET_CFG_CTRL_TXVLAN_ANY;
1710 }
1711
1712 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
1713 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1714 new_ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
1715 else
1716 new_ctrl &= ~NFP_NET_CFG_CTRL_CTAG_FILTER;
1717 }
1718
1719 if (changed & NETIF_F_HW_VLAN_STAG_RX) {
1720 if (features & NETIF_F_HW_VLAN_STAG_RX)
1721 new_ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
1722 else
1723 new_ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
1724 }
1725
1726 if (changed & NETIF_F_SG) {
1727 if (features & NETIF_F_SG)
1728 new_ctrl |= NFP_NET_CFG_CTRL_GATHER;
1729 else
1730 new_ctrl &= ~NFP_NET_CFG_CTRL_GATHER;
1731 }
1732
1733 err = nfp_port_set_features(netdev, features);
1734 if (err)
1735 return err;
1736
1737 nn_dbg(nn, "Feature change 0x%llx -> 0x%llx (changed=0x%llx)\n",
1738 netdev->features, features, changed);
1739
1740 if (new_ctrl == nn->dp.ctrl)
1741 return 0;
1742
1743 nn_dbg(nn, "NIC ctrl: 0x%x -> 0x%x\n", nn->dp.ctrl, new_ctrl);
1744 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1745 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
1746 if (err)
1747 return err;
1748
1749 nn->dp.ctrl = new_ctrl;
1750
1751 return 0;
1752 }
1753
1754 static netdev_features_t
1755 nfp_net_fix_features(struct net_device *netdev,
1756 netdev_features_t features)
1757 {
1758 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1759 (features & NETIF_F_HW_VLAN_STAG_RX)) {
1760 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
1761 features &= ~NETIF_F_HW_VLAN_CTAG_RX;
1762 netdev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
1763 netdev_warn(netdev,
1764 "S-tag and C-tag stripping can't be enabled at the same time. Enabling S-tag stripping and disabling C-tag stripping\n");
1765 } else if (netdev->features & NETIF_F_HW_VLAN_STAG_RX) {
1766 features &= ~NETIF_F_HW_VLAN_STAG_RX;
1767 netdev->wanted_features &= ~NETIF_F_HW_VLAN_STAG_RX;
1768 netdev_warn(netdev,
1769 "S-tag and C-tag stripping can't be enabled at the same time. Enabling C-tag stripping and disabling S-tag stripping\n");
1770 }
1771 }
1772 return features;
1773 }
1774
1775 static netdev_features_t
1776 nfp_net_features_check(struct sk_buff *skb, struct net_device *dev,
1777 netdev_features_t features)
1778 {
1779 u8 l4_hdr;
1780
1781
1782 features &= vlan_features_check(skb, features);
1783
1784 if (!skb->encapsulation)
1785 return features;
1786
1787
1788 if (skb_is_gso(skb)) {
1789 u32 hdrlen;
1790
1791 hdrlen = skb_inner_tcp_all_headers(skb);
1792
1793
1794
1795
1796 if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8))
1797 features &= ~NETIF_F_GSO_MASK;
1798 }
1799
1800
1801 switch (vlan_get_protocol(skb)) {
1802 case htons(ETH_P_IP):
1803 l4_hdr = ip_hdr(skb)->protocol;
1804 break;
1805 case htons(ETH_P_IPV6):
1806 l4_hdr = ipv6_hdr(skb)->nexthdr;
1807 break;
1808 default:
1809 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1810 }
1811
1812 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
1813 skb->inner_protocol != htons(ETH_P_TEB) ||
1814 (l4_hdr != IPPROTO_UDP && l4_hdr != IPPROTO_GRE) ||
1815 (l4_hdr == IPPROTO_UDP &&
1816 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
1817 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
1818 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1819
1820 return features;
1821 }
1822
1823 static int
1824 nfp_net_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
1825 {
1826 struct nfp_net *nn = netdev_priv(netdev);
1827 int n;
1828
1829
1830
1831
1832 if (nn->port)
1833 return -EOPNOTSUPP;
1834
1835 if (nn->dp.is_vf || nn->vnic_no_name)
1836 return -EOPNOTSUPP;
1837
1838 n = snprintf(name, len, "n%d", nn->id);
1839 if (n >= len)
1840 return -EINVAL;
1841
1842 return 0;
1843 }
1844
1845 static int nfp_net_xdp_setup_drv(struct nfp_net *nn, struct netdev_bpf *bpf)
1846 {
1847 struct bpf_prog *prog = bpf->prog;
1848 struct nfp_net_dp *dp;
1849 int err;
1850
1851 if (!prog == !nn->dp.xdp_prog) {
1852 WRITE_ONCE(nn->dp.xdp_prog, prog);
1853 xdp_attachment_setup(&nn->xdp, bpf);
1854 return 0;
1855 }
1856
1857 dp = nfp_net_clone_dp(nn);
1858 if (!dp)
1859 return -ENOMEM;
1860
1861 dp->xdp_prog = prog;
1862 dp->num_tx_rings += prog ? nn->dp.num_rx_rings : -nn->dp.num_rx_rings;
1863 dp->rx_dma_dir = prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
1864 dp->rx_dma_off = prog ? XDP_PACKET_HEADROOM - nn->dp.rx_offset : 0;
1865
1866
1867 err = nfp_net_ring_reconfig(nn, dp, bpf->extack);
1868 if (err)
1869 return err;
1870
1871 xdp_attachment_setup(&nn->xdp, bpf);
1872 return 0;
1873 }
1874
1875 static int nfp_net_xdp_setup_hw(struct nfp_net *nn, struct netdev_bpf *bpf)
1876 {
1877 int err;
1878
1879 err = nfp_app_xdp_offload(nn->app, nn, bpf->prog, bpf->extack);
1880 if (err)
1881 return err;
1882
1883 xdp_attachment_setup(&nn->xdp_hw, bpf);
1884 return 0;
1885 }
1886
1887 static int nfp_net_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
1888 {
1889 struct nfp_net *nn = netdev_priv(netdev);
1890
1891 switch (xdp->command) {
1892 case XDP_SETUP_PROG:
1893 return nfp_net_xdp_setup_drv(nn, xdp);
1894 case XDP_SETUP_PROG_HW:
1895 return nfp_net_xdp_setup_hw(nn, xdp);
1896 case XDP_SETUP_XSK_POOL:
1897 return nfp_net_xsk_setup_pool(netdev, xdp->xsk.pool,
1898 xdp->xsk.queue_id);
1899 default:
1900 return nfp_app_bpf(nn->app, nn, xdp);
1901 }
1902 }
1903
1904 static int nfp_net_set_mac_address(struct net_device *netdev, void *addr)
1905 {
1906 struct nfp_net *nn = netdev_priv(netdev);
1907 struct sockaddr *saddr = addr;
1908 int err;
1909
1910 err = eth_prepare_mac_addr_change(netdev, addr);
1911 if (err)
1912 return err;
1913
1914 nfp_net_write_mac_addr(nn, saddr->sa_data);
1915
1916 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_MACADDR);
1917 if (err)
1918 return err;
1919
1920 eth_commit_mac_addr_change(netdev, addr);
1921
1922 return 0;
1923 }
1924
1925 static int nfp_net_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
1926 struct net_device *dev, u32 filter_mask,
1927 int nlflags)
1928 {
1929 struct nfp_net *nn = netdev_priv(dev);
1930 u16 mode;
1931
1932 if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
1933 return -EOPNOTSUPP;
1934
1935 mode = (nn->dp.ctrl & NFP_NET_CFG_CTRL_VEPA) ?
1936 BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB;
1937
1938 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0,
1939 nlflags, filter_mask, NULL);
1940 }
1941
1942 static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
1943 u16 flags, struct netlink_ext_ack *extack)
1944 {
1945 struct nfp_net *nn = netdev_priv(dev);
1946 struct nlattr *attr, *br_spec;
1947 int rem, err;
1948 u32 new_ctrl;
1949 u16 mode;
1950
1951 if (!(nn->cap & NFP_NET_CFG_CTRL_VEPA))
1952 return -EOPNOTSUPP;
1953
1954 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
1955 if (!br_spec)
1956 return -EINVAL;
1957
1958 nla_for_each_nested(attr, br_spec, rem) {
1959 if (nla_type(attr) != IFLA_BRIDGE_MODE)
1960 continue;
1961
1962 if (nla_len(attr) < sizeof(mode))
1963 return -EINVAL;
1964
1965 new_ctrl = nn->dp.ctrl;
1966 mode = nla_get_u16(attr);
1967 if (mode == BRIDGE_MODE_VEPA)
1968 new_ctrl |= NFP_NET_CFG_CTRL_VEPA;
1969 else if (mode == BRIDGE_MODE_VEB)
1970 new_ctrl &= ~NFP_NET_CFG_CTRL_VEPA;
1971 else
1972 return -EOPNOTSUPP;
1973
1974 if (new_ctrl == nn->dp.ctrl)
1975 return 0;
1976
1977 nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
1978 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
1979 if (!err)
1980 nn->dp.ctrl = new_ctrl;
1981
1982 return err;
1983 }
1984
1985 return -EINVAL;
1986 }
1987
1988 const struct net_device_ops nfp_nfd3_netdev_ops = {
1989 .ndo_init = nfp_app_ndo_init,
1990 .ndo_uninit = nfp_app_ndo_uninit,
1991 .ndo_open = nfp_net_netdev_open,
1992 .ndo_stop = nfp_net_netdev_close,
1993 .ndo_start_xmit = nfp_net_tx,
1994 .ndo_get_stats64 = nfp_net_stat64,
1995 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
1996 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
1997 .ndo_set_vf_mac = nfp_app_set_vf_mac,
1998 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
1999 .ndo_set_vf_rate = nfp_app_set_vf_rate,
2000 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
2001 .ndo_set_vf_trust = nfp_app_set_vf_trust,
2002 .ndo_get_vf_config = nfp_app_get_vf_config,
2003 .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
2004 .ndo_setup_tc = nfp_port_setup_tc,
2005 .ndo_tx_timeout = nfp_net_tx_timeout,
2006 .ndo_set_rx_mode = nfp_net_set_rx_mode,
2007 .ndo_change_mtu = nfp_net_change_mtu,
2008 .ndo_set_mac_address = nfp_net_set_mac_address,
2009 .ndo_set_features = nfp_net_set_features,
2010 .ndo_fix_features = nfp_net_fix_features,
2011 .ndo_features_check = nfp_net_features_check,
2012 .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
2013 .ndo_bpf = nfp_net_xdp,
2014 .ndo_xsk_wakeup = nfp_net_xsk_wakeup,
2015 .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
2016 .ndo_bridge_getlink = nfp_net_bridge_getlink,
2017 .ndo_bridge_setlink = nfp_net_bridge_setlink,
2018 };
2019
2020 const struct net_device_ops nfp_nfdk_netdev_ops = {
2021 .ndo_init = nfp_app_ndo_init,
2022 .ndo_uninit = nfp_app_ndo_uninit,
2023 .ndo_open = nfp_net_netdev_open,
2024 .ndo_stop = nfp_net_netdev_close,
2025 .ndo_start_xmit = nfp_net_tx,
2026 .ndo_get_stats64 = nfp_net_stat64,
2027 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
2028 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
2029 .ndo_set_vf_mac = nfp_app_set_vf_mac,
2030 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
2031 .ndo_set_vf_rate = nfp_app_set_vf_rate,
2032 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
2033 .ndo_set_vf_trust = nfp_app_set_vf_trust,
2034 .ndo_get_vf_config = nfp_app_get_vf_config,
2035 .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
2036 .ndo_setup_tc = nfp_port_setup_tc,
2037 .ndo_tx_timeout = nfp_net_tx_timeout,
2038 .ndo_set_rx_mode = nfp_net_set_rx_mode,
2039 .ndo_change_mtu = nfp_net_change_mtu,
2040 .ndo_set_mac_address = nfp_net_set_mac_address,
2041 .ndo_set_features = nfp_net_set_features,
2042 .ndo_fix_features = nfp_net_fix_features,
2043 .ndo_features_check = nfp_net_features_check,
2044 .ndo_get_phys_port_name = nfp_net_get_phys_port_name,
2045 .ndo_bpf = nfp_net_xdp,
2046 .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
2047 .ndo_bridge_getlink = nfp_net_bridge_getlink,
2048 .ndo_bridge_setlink = nfp_net_bridge_setlink,
2049 };
2050
2051 static int nfp_udp_tunnel_sync(struct net_device *netdev, unsigned int table)
2052 {
2053 struct nfp_net *nn = netdev_priv(netdev);
2054 int i;
2055
2056 BUILD_BUG_ON(NFP_NET_N_VXLAN_PORTS & 1);
2057 for (i = 0; i < NFP_NET_N_VXLAN_PORTS; i += 2) {
2058 struct udp_tunnel_info ti0, ti1;
2059
2060 udp_tunnel_nic_get_port(netdev, table, i, &ti0);
2061 udp_tunnel_nic_get_port(netdev, table, i + 1, &ti1);
2062
2063 nn_writel(nn, NFP_NET_CFG_VXLAN_PORT + i * sizeof(ti0.port),
2064 be16_to_cpu(ti1.port) << 16 | be16_to_cpu(ti0.port));
2065 }
2066
2067 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
2068 }
2069
2070 static const struct udp_tunnel_nic_info nfp_udp_tunnels = {
2071 .sync_table = nfp_udp_tunnel_sync,
2072 .flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
2073 UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
2074 .tables = {
2075 {
2076 .n_entries = NFP_NET_N_VXLAN_PORTS,
2077 .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,
2078 },
2079 },
2080 };
2081
2082
2083
2084
2085
2086 void nfp_net_info(struct nfp_net *nn)
2087 {
2088 nn_info(nn, "NFP-6xxx %sNetdev: TxQs=%d/%d RxQs=%d/%d\n",
2089 nn->dp.is_vf ? "VF " : "",
2090 nn->dp.num_tx_rings, nn->max_tx_rings,
2091 nn->dp.num_rx_rings, nn->max_rx_rings);
2092 nn_info(nn, "VER: %d.%d.%d.%d, Maximum supported MTU: %d\n",
2093 nn->fw_ver.extend, nn->fw_ver.class,
2094 nn->fw_ver.major, nn->fw_ver.minor,
2095 nn->max_mtu);
2096 nn_info(nn, "CAP: %#x %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
2097 nn->cap,
2098 nn->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2099 nn->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
2100 nn->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
2101 nn->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2102 nn->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2103 nn->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2104 nn->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2105 nn->cap & NFP_NET_CFG_CTRL_RXQINQ ? "RXQINQ " : "",
2106 nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ? "RXVLANv2 " : "",
2107 nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ? "TXVLANv2 " : "",
2108 nn->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2109 nn->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2110 nn->cap & NFP_NET_CFG_CTRL_LSO ? "TSO1 " : "",
2111 nn->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSO2 " : "",
2112 nn->cap & NFP_NET_CFG_CTRL_RSS ? "RSS1 " : "",
2113 nn->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSS2 " : "",
2114 nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER ? "CTAG_FILTER " : "",
2115 nn->cap & NFP_NET_CFG_CTRL_MSIXAUTO ? "AUTOMASK " : "",
2116 nn->cap & NFP_NET_CFG_CTRL_IRQMOD ? "IRQMOD " : "",
2117 nn->cap & NFP_NET_CFG_CTRL_TXRWB ? "TXRWB " : "",
2118 nn->cap & NFP_NET_CFG_CTRL_VEPA ? "VEPA " : "",
2119 nn->cap & NFP_NET_CFG_CTRL_VXLAN ? "VXLAN " : "",
2120 nn->cap & NFP_NET_CFG_CTRL_NVGRE ? "NVGRE " : "",
2121 nn->cap & NFP_NET_CFG_CTRL_CSUM_COMPLETE ?
2122 "RXCSUM_COMPLETE " : "",
2123 nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
2124 nfp_app_extra_cap(nn->app, nn));
2125 }
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142 struct nfp_net *
2143 nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
2144 void __iomem *ctrl_bar, bool needs_netdev,
2145 unsigned int max_tx_rings, unsigned int max_rx_rings)
2146 {
2147 u64 dma_mask = dma_get_mask(&pdev->dev);
2148 struct nfp_net *nn;
2149 int err;
2150
2151 if (needs_netdev) {
2152 struct net_device *netdev;
2153
2154 netdev = alloc_etherdev_mqs(sizeof(struct nfp_net),
2155 max_tx_rings, max_rx_rings);
2156 if (!netdev)
2157 return ERR_PTR(-ENOMEM);
2158
2159 SET_NETDEV_DEV(netdev, &pdev->dev);
2160 nn = netdev_priv(netdev);
2161 nn->dp.netdev = netdev;
2162 } else {
2163 nn = vzalloc(sizeof(*nn));
2164 if (!nn)
2165 return ERR_PTR(-ENOMEM);
2166 }
2167
2168 nn->dp.dev = &pdev->dev;
2169 nn->dp.ctrl_bar = ctrl_bar;
2170 nn->dev_info = dev_info;
2171 nn->pdev = pdev;
2172 nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar);
2173
2174 switch (FIELD_GET(NFP_NET_CFG_VERSION_DP_MASK, nn->fw_ver.extend)) {
2175 case NFP_NET_CFG_VERSION_DP_NFD3:
2176 nn->dp.ops = &nfp_nfd3_ops;
2177 break;
2178 case NFP_NET_CFG_VERSION_DP_NFDK:
2179 if (nn->fw_ver.major < 5) {
2180 dev_err(&pdev->dev,
2181 "NFDK must use ABI 5 or newer, found: %d\n",
2182 nn->fw_ver.major);
2183 err = -EINVAL;
2184 goto err_free_nn;
2185 }
2186 nn->dp.ops = &nfp_nfdk_ops;
2187 break;
2188 default:
2189 err = -EINVAL;
2190 goto err_free_nn;
2191 }
2192
2193 if ((dma_mask & nn->dp.ops->dma_mask) != dma_mask) {
2194 dev_err(&pdev->dev,
2195 "DMA mask of loaded firmware: %llx, required DMA mask: %llx\n",
2196 nn->dp.ops->dma_mask, dma_mask);
2197 err = -EINVAL;
2198 goto err_free_nn;
2199 }
2200
2201 nn->max_tx_rings = max_tx_rings;
2202 nn->max_rx_rings = max_rx_rings;
2203
2204 nn->dp.num_tx_rings = min_t(unsigned int,
2205 max_tx_rings, num_online_cpus());
2206 nn->dp.num_rx_rings = min_t(unsigned int, max_rx_rings,
2207 netif_get_num_default_rss_queues());
2208
2209 nn->dp.num_r_vecs = max(nn->dp.num_tx_rings, nn->dp.num_rx_rings);
2210 nn->dp.num_r_vecs = min_t(unsigned int,
2211 nn->dp.num_r_vecs, num_online_cpus());
2212 nn->max_r_vecs = nn->dp.num_r_vecs;
2213
2214 nn->dp.xsk_pools = kcalloc(nn->max_r_vecs, sizeof(nn->dp.xsk_pools),
2215 GFP_KERNEL);
2216 if (!nn->dp.xsk_pools) {
2217 err = -ENOMEM;
2218 goto err_free_nn;
2219 }
2220
2221 nn->dp.txd_cnt = NFP_NET_TX_DESCS_DEFAULT;
2222 nn->dp.rxd_cnt = NFP_NET_RX_DESCS_DEFAULT;
2223
2224 sema_init(&nn->bar_lock, 1);
2225
2226 spin_lock_init(&nn->reconfig_lock);
2227 spin_lock_init(&nn->link_status_lock);
2228
2229 timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0);
2230
2231 err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
2232 &nn->tlv_caps);
2233 if (err)
2234 goto err_free_nn;
2235
2236 err = nfp_ccm_mbox_alloc(nn);
2237 if (err)
2238 goto err_free_nn;
2239
2240 return nn;
2241
2242 err_free_nn:
2243 if (nn->dp.netdev)
2244 free_netdev(nn->dp.netdev);
2245 else
2246 vfree(nn);
2247 return ERR_PTR(err);
2248 }
2249
2250
2251
2252
2253
2254 void nfp_net_free(struct nfp_net *nn)
2255 {
2256 WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
2257 nfp_ccm_mbox_free(nn);
2258
2259 kfree(nn->dp.xsk_pools);
2260 if (nn->dp.netdev)
2261 free_netdev(nn->dp.netdev);
2262 else
2263 vfree(nn);
2264 }
2265
2266
2267
2268
2269
2270
2271
2272 unsigned int nfp_net_rss_key_sz(struct nfp_net *nn)
2273 {
2274 switch (nn->rss_hfunc) {
2275 case ETH_RSS_HASH_TOP:
2276 return NFP_NET_CFG_RSS_KEY_SZ;
2277 case ETH_RSS_HASH_XOR:
2278 return 0;
2279 case ETH_RSS_HASH_CRC32:
2280 return 4;
2281 }
2282
2283 nn_warn(nn, "Unknown hash function: %u\n", nn->rss_hfunc);
2284 return 0;
2285 }
2286
2287
2288
2289
2290
2291 static void nfp_net_rss_init(struct nfp_net *nn)
2292 {
2293 unsigned long func_bit, rss_cap_hfunc;
2294 u32 reg;
2295
2296
2297 reg = nn_readl(nn, NFP_NET_CFG_RSS_CAP);
2298 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC, reg);
2299 if (!rss_cap_hfunc)
2300 rss_cap_hfunc = FIELD_GET(NFP_NET_CFG_RSS_CAP_HFUNC,
2301 NFP_NET_CFG_RSS_TOEPLITZ);
2302
2303 func_bit = find_first_bit(&rss_cap_hfunc, NFP_NET_CFG_RSS_HFUNCS);
2304 if (func_bit == NFP_NET_CFG_RSS_HFUNCS) {
2305 dev_warn(nn->dp.dev,
2306 "Bad RSS config, defaulting to Toeplitz hash\n");
2307 func_bit = ETH_RSS_HASH_TOP_BIT;
2308 }
2309 nn->rss_hfunc = 1 << func_bit;
2310
2311 netdev_rss_key_fill(nn->rss_key, nfp_net_rss_key_sz(nn));
2312
2313 nfp_net_rss_init_itbl(nn);
2314
2315
2316 nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
2317 NFP_NET_CFG_RSS_IPV6_TCP |
2318 FIELD_PREP(NFP_NET_CFG_RSS_HFUNC, nn->rss_hfunc) |
2319 NFP_NET_CFG_RSS_MASK;
2320 }
2321
2322
2323
2324
2325
2326 static void nfp_net_irqmod_init(struct nfp_net *nn)
2327 {
2328 nn->rx_coalesce_usecs = 50;
2329 nn->rx_coalesce_max_frames = 64;
2330 nn->tx_coalesce_usecs = 50;
2331 nn->tx_coalesce_max_frames = 64;
2332
2333 nn->rx_coalesce_adapt_on = true;
2334 nn->tx_coalesce_adapt_on = true;
2335 }
2336
2337 static void nfp_net_netdev_init(struct nfp_net *nn)
2338 {
2339 struct net_device *netdev = nn->dp.netdev;
2340
2341 nfp_net_write_mac_addr(nn, nn->dp.netdev->dev_addr);
2342
2343 netdev->mtu = nn->dp.mtu;
2344
2345
2346
2347
2348
2349
2350
2351 if (nn->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
2352 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
2353
2354 netdev->hw_features = NETIF_F_HIGHDMA;
2355 if (nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) {
2356 netdev->hw_features |= NETIF_F_RXCSUM;
2357 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXCSUM_ANY;
2358 }
2359 if (nn->cap & NFP_NET_CFG_CTRL_TXCSUM) {
2360 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2361 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
2362 }
2363 if (nn->cap & NFP_NET_CFG_CTRL_GATHER) {
2364 netdev->hw_features |= NETIF_F_SG;
2365 nn->dp.ctrl |= NFP_NET_CFG_CTRL_GATHER;
2366 }
2367 if ((nn->cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
2368 nn->cap & NFP_NET_CFG_CTRL_LSO2) {
2369 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
2370 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_LSO2 ?:
2371 NFP_NET_CFG_CTRL_LSO;
2372 }
2373 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY)
2374 netdev->hw_features |= NETIF_F_RXHASH;
2375 if (nn->cap & NFP_NET_CFG_CTRL_VXLAN) {
2376 if (nn->cap & NFP_NET_CFG_CTRL_LSO) {
2377 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
2378 NETIF_F_GSO_UDP_TUNNEL_CSUM |
2379 NETIF_F_GSO_PARTIAL;
2380 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
2381 }
2382 netdev->udp_tunnel_nic_info = &nfp_udp_tunnels;
2383 nn->dp.ctrl |= NFP_NET_CFG_CTRL_VXLAN;
2384 }
2385 if (nn->cap & NFP_NET_CFG_CTRL_NVGRE) {
2386 if (nn->cap & NFP_NET_CFG_CTRL_LSO)
2387 netdev->hw_features |= NETIF_F_GSO_GRE;
2388 nn->dp.ctrl |= NFP_NET_CFG_CTRL_NVGRE;
2389 }
2390 if (nn->cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
2391 netdev->hw_enc_features = netdev->hw_features;
2392
2393 netdev->vlan_features = netdev->hw_features;
2394
2395 if (nn->cap & NFP_NET_CFG_CTRL_RXVLAN_ANY) {
2396 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2397 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RXVLAN_V2 ?:
2398 NFP_NET_CFG_CTRL_RXVLAN;
2399 }
2400 if (nn->cap & NFP_NET_CFG_CTRL_TXVLAN_ANY) {
2401 if (nn->cap & NFP_NET_CFG_CTRL_LSO2) {
2402 nn_warn(nn, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
2403 } else {
2404 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
2405 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_TXVLAN_V2 ?:
2406 NFP_NET_CFG_CTRL_TXVLAN;
2407 }
2408 }
2409 if (nn->cap & NFP_NET_CFG_CTRL_CTAG_FILTER) {
2410 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2411 nn->dp.ctrl |= NFP_NET_CFG_CTRL_CTAG_FILTER;
2412 }
2413 if (nn->cap & NFP_NET_CFG_CTRL_RXQINQ) {
2414 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX;
2415 nn->dp.ctrl |= NFP_NET_CFG_CTRL_RXQINQ;
2416 }
2417
2418 netdev->features = netdev->hw_features;
2419
2420 if (nfp_app_has_tc(nn->app) && nn->port)
2421 netdev->hw_features |= NETIF_F_HW_TC;
2422
2423
2424
2425
2426 netdev->features &= ~NETIF_F_HW_VLAN_STAG_RX;
2427 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_RXQINQ;
2428
2429
2430 switch (nn->dp.ops->version) {
2431 case NFP_NFD_VER_NFD3:
2432 netdev->netdev_ops = &nfp_nfd3_netdev_ops;
2433 break;
2434 case NFP_NFD_VER_NFDK:
2435 netdev->netdev_ops = &nfp_nfdk_netdev_ops;
2436 break;
2437 }
2438
2439 netdev->watchdog_timeo = msecs_to_jiffies(5 * 1000);
2440
2441
2442 netdev->min_mtu = ETH_MIN_MTU;
2443 netdev->max_mtu = nn->max_mtu;
2444
2445 netif_set_tso_max_segs(netdev, NFP_NET_LSO_MAX_SEGS);
2446
2447 netif_carrier_off(netdev);
2448
2449 nfp_net_set_ethtool_ops(netdev);
2450 }
2451
2452 static int nfp_net_read_caps(struct nfp_net *nn)
2453 {
2454
2455 nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
2456 nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
2457
2458
2459
2460
2461
2462 nn->dp.chained_metadata_format = nn->fw_ver.major == 4 ||
2463 !nn->dp.netdev ||
2464 !(nn->cap & NFP_NET_CFG_CTRL_RSS) ||
2465 nn->cap & NFP_NET_CFG_CTRL_CHAIN_META;
2466
2467
2468
2469 if (nn->dp.chained_metadata_format && nn->fw_ver.major != 4)
2470 nn->cap &= ~NFP_NET_CFG_CTRL_RSS;
2471
2472
2473 if (nn->fw_ver.major >= 2) {
2474 u32 reg;
2475
2476 reg = nn_readl(nn, NFP_NET_CFG_RX_OFFSET);
2477 if (reg > NFP_NET_MAX_PREPEND) {
2478 nn_err(nn, "Invalid rx offset: %d\n", reg);
2479 return -EINVAL;
2480 }
2481 nn->dp.rx_offset = reg;
2482 } else {
2483 nn->dp.rx_offset = NFP_NET_RX_OFFSET;
2484 }
2485
2486
2487 nn->cap &= nn->dp.ops->cap_mask;
2488
2489
2490 if (!nn->dp.netdev)
2491 nn->cap &= nn->app->type->ctrl_cap_mask;
2492
2493 return 0;
2494 }
2495
2496
2497
2498
2499
2500
2501
2502 int nfp_net_init(struct nfp_net *nn)
2503 {
2504 int err;
2505
2506 nn->dp.rx_dma_dir = DMA_FROM_DEVICE;
2507
2508 err = nfp_net_read_caps(nn);
2509 if (err)
2510 return err;
2511
2512
2513 if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) {
2514 nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu);
2515 } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) {
2516 nn->dp.mtu = nn->max_mtu;
2517 } else {
2518 nn->dp.mtu = NFP_NET_DEFAULT_MTU;
2519 }
2520 nn->dp.fl_bufsz = nfp_net_calc_fl_bufsz(&nn->dp);
2521
2522 if (nfp_app_ctrl_uses_data_vnics(nn->app))
2523 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_CMSG_DATA;
2524
2525 if (nn->cap & NFP_NET_CFG_CTRL_RSS_ANY) {
2526 nfp_net_rss_init(nn);
2527 nn->dp.ctrl |= nn->cap & NFP_NET_CFG_CTRL_RSS2 ?:
2528 NFP_NET_CFG_CTRL_RSS;
2529 }
2530
2531
2532 if (nn->cap & NFP_NET_CFG_CTRL_L2BC)
2533 nn->dp.ctrl |= NFP_NET_CFG_CTRL_L2BC;
2534
2535
2536 if (nn->cap & NFP_NET_CFG_CTRL_IRQMOD) {
2537 nfp_net_irqmod_init(nn);
2538 nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD;
2539 }
2540
2541
2542 if (nn->cap & NFP_NET_CFG_CTRL_TXRWB)
2543 nn->dp.ctrl |= NFP_NET_CFG_CTRL_TXRWB;
2544
2545
2546 nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
2547
2548
2549 nn_writel(nn, NFP_NET_CFG_CTRL, 0);
2550 nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, 0);
2551 nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, 0);
2552 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_RING |
2553 NFP_NET_CFG_UPDATE_GEN);
2554 if (err)
2555 return err;
2556
2557 if (nn->dp.netdev) {
2558 nfp_net_netdev_init(nn);
2559
2560 err = nfp_ccm_mbox_init(nn);
2561 if (err)
2562 return err;
2563
2564 err = nfp_net_tls_init(nn);
2565 if (err)
2566 goto err_clean_mbox;
2567 }
2568
2569 nfp_net_vecs_init(nn);
2570
2571 if (!nn->dp.netdev)
2572 return 0;
2573 return register_netdev(nn->dp.netdev);
2574
2575 err_clean_mbox:
2576 nfp_ccm_mbox_clean(nn);
2577 return err;
2578 }
2579
2580
2581
2582
2583
2584 void nfp_net_clean(struct nfp_net *nn)
2585 {
2586 if (!nn->dp.netdev)
2587 return;
2588
2589 unregister_netdev(nn->dp.netdev);
2590 nfp_ccm_mbox_clean(nn);
2591 nfp_net_reconfig_wait_posted(nn);
2592 }