0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018 #include <linux/module.h>
0019 #include <linux/interrupt.h>
0020 #include <linux/pci.h>
0021 #include <net/vxlan.h>
0022 #include "liquidio_common.h"
0023 #include "octeon_droq.h"
0024 #include "octeon_iq.h"
0025 #include "response_manager.h"
0026 #include "octeon_device.h"
0027 #include "octeon_nic.h"
0028 #include "octeon_main.h"
0029 #include "octeon_network.h"
0030 #include "cn23xx_vf_device.h"
0031
0032 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
0033 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Virtual Function Driver");
0034 MODULE_LICENSE("GPL");
0035
0036 static int debug = -1;
0037 module_param(debug, int, 0644);
0038 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
0039
0040 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
0041
0042 struct oct_timestamp_resp {
0043 u64 rh;
0044 u64 timestamp;
0045 u64 status;
0046 };
0047
0048 union tx_info {
0049 u64 u64;
0050 struct {
0051 #ifdef __BIG_ENDIAN_BITFIELD
0052 u16 gso_size;
0053 u16 gso_segs;
0054 u32 reserved;
0055 #else
0056 u32 reserved;
0057 u16 gso_segs;
0058 u16 gso_size;
0059 #endif
0060 } s;
0061 };
0062
0063 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
0064 #define OCTNIC_GSO_MAX_SIZE \
0065 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
0066
0067 static int
0068 liquidio_vf_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
0069 static void liquidio_vf_remove(struct pci_dev *pdev);
0070 static int octeon_device_init(struct octeon_device *oct);
0071 static int liquidio_stop(struct net_device *netdev);
0072
0073 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
0074 {
0075 struct octeon_device_priv *oct_priv =
0076 (struct octeon_device_priv *)oct->priv;
0077 int retry = MAX_IO_PENDING_PKT_COUNT;
0078 int pkt_cnt = 0, pending_pkts;
0079 int i;
0080
0081 do {
0082 pending_pkts = 0;
0083
0084 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
0085 if (!(oct->io_qmask.oq & BIT_ULL(i)))
0086 continue;
0087 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
0088 }
0089 if (pkt_cnt > 0) {
0090 pending_pkts += pkt_cnt;
0091 tasklet_schedule(&oct_priv->droq_tasklet);
0092 }
0093 pkt_cnt = 0;
0094 schedule_timeout_uninterruptible(1);
0095
0096 } while (retry-- && pending_pkts);
0097
0098 return pkt_cnt;
0099 }
0100
0101
0102
0103
0104
0105 static void pcierror_quiesce_device(struct octeon_device *oct)
0106 {
0107 int i;
0108
0109
0110
0111
0112
0113
0114
0115 schedule_timeout_uninterruptible(100);
0116
0117 if (wait_for_pending_requests(oct))
0118 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
0119
0120
0121 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
0122 struct octeon_instr_queue *iq;
0123
0124 if (!(oct->io_qmask.iq & BIT_ULL(i)))
0125 continue;
0126 iq = oct->instr_queue[i];
0127
0128 if (atomic_read(&iq->instr_pending)) {
0129 spin_lock_bh(&iq->lock);
0130 iq->fill_cnt = 0;
0131 iq->octeon_read_index = iq->host_write_index;
0132 iq->stats.instr_processed +=
0133 atomic_read(&iq->instr_pending);
0134 lio_process_iq_request_list(oct, iq, 0);
0135 spin_unlock_bh(&iq->lock);
0136 }
0137 }
0138
0139
0140 lio_process_ordered_list(oct, 1);
0141
0142
0143 }
0144
0145
0146
0147
0148
0149 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
0150 {
0151 u32 status, mask;
0152 int pos = 0x100;
0153
0154 pr_info("%s :\n", __func__);
0155
0156 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
0157 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
0158 if (dev->error_state == pci_channel_io_normal)
0159 status &= ~mask;
0160 else
0161 status &= mask;
0162 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
0163 }
0164
0165
0166
0167
0168
0169 static void stop_pci_io(struct octeon_device *oct)
0170 {
0171 struct msix_entry *msix_entries;
0172 int i;
0173
0174
0175 atomic_set(&oct->status, OCT_DEV_IN_RESET);
0176
0177 for (i = 0; i < oct->ifcount; i++)
0178 netif_device_detach(oct->props[i].netdev);
0179
0180
0181 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
0182
0183 pcierror_quiesce_device(oct);
0184 if (oct->msix_on) {
0185 msix_entries = (struct msix_entry *)oct->msix_entries;
0186 for (i = 0; i < oct->num_msix_irqs; i++) {
0187
0188 irq_set_affinity_hint(msix_entries[i].vector,
0189 NULL);
0190 free_irq(msix_entries[i].vector,
0191 &oct->ioq_vector[i]);
0192 }
0193 pci_disable_msix(oct->pci_dev);
0194 kfree(oct->msix_entries);
0195 oct->msix_entries = NULL;
0196 octeon_free_ioq_vector(oct);
0197 }
0198 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
0199 lio_get_state_string(&oct->status));
0200
0201
0202 cleanup_aer_uncorrect_error_status(oct->pci_dev);
0203
0204 pci_disable_device(oct->pci_dev);
0205 }
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
0216 pci_channel_state_t state)
0217 {
0218 struct octeon_device *oct = pci_get_drvdata(pdev);
0219
0220
0221 if (state == pci_channel_io_normal) {
0222 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
0223 cleanup_aer_uncorrect_error_status(oct->pci_dev);
0224 return PCI_ERS_RESULT_CAN_RECOVER;
0225 }
0226
0227
0228 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
0229 stop_pci_io(oct);
0230
0231 return PCI_ERS_RESULT_DISCONNECT;
0232 }
0233
0234
0235 static const struct pci_error_handlers liquidio_vf_err_handler = {
0236 .error_detected = liquidio_pcie_error_detected,
0237 };
0238
0239 static const struct pci_device_id liquidio_vf_pci_tbl[] = {
0240 {
0241 PCI_VENDOR_ID_CAVIUM, OCTEON_CN23XX_VF_VID,
0242 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
0243 },
0244 {
0245 0, 0, 0, 0, 0, 0, 0
0246 }
0247 };
0248 MODULE_DEVICE_TABLE(pci, liquidio_vf_pci_tbl);
0249
0250 static struct pci_driver liquidio_vf_pci_driver = {
0251 .name = "LiquidIO_VF",
0252 .id_table = liquidio_vf_pci_tbl,
0253 .probe = liquidio_vf_probe,
0254 .remove = liquidio_vf_remove,
0255 .err_handler = &liquidio_vf_err_handler,
0256 };
0257
0258
0259
0260
0261
0262 static void print_link_info(struct net_device *netdev)
0263 {
0264 struct lio *lio = GET_LIO(netdev);
0265
0266 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
0267 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
0268 struct oct_link_info *linfo = &lio->linfo;
0269
0270 if (linfo->link.s.link_up) {
0271 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
0272 linfo->link.s.speed,
0273 (linfo->link.s.duplex) ? "Full" : "Half");
0274 } else {
0275 netif_info(lio, link, lio->netdev, "Link Down\n");
0276 }
0277 }
0278 }
0279
0280
0281
0282
0283
0284 static void octnet_link_status_change(struct work_struct *work)
0285 {
0286 struct cavium_wk *wk = (struct cavium_wk *)work;
0287 struct lio *lio = (struct lio *)wk->ctxptr;
0288
0289
0290
0291
0292
0293 rtnl_lock();
0294 dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
0295 rtnl_unlock();
0296 }
0297
0298
0299
0300
0301
0302 static int setup_link_status_change_wq(struct net_device *netdev)
0303 {
0304 struct lio *lio = GET_LIO(netdev);
0305 struct octeon_device *oct = lio->oct_dev;
0306
0307 lio->link_status_wq.wq = alloc_workqueue("link-status",
0308 WQ_MEM_RECLAIM, 0);
0309 if (!lio->link_status_wq.wq) {
0310 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
0311 return -1;
0312 }
0313 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
0314 octnet_link_status_change);
0315 lio->link_status_wq.wk.ctxptr = lio;
0316
0317 return 0;
0318 }
0319
0320 static void cleanup_link_status_change_wq(struct net_device *netdev)
0321 {
0322 struct lio *lio = GET_LIO(netdev);
0323
0324 if (lio->link_status_wq.wq) {
0325 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
0326 destroy_workqueue(lio->link_status_wq.wq);
0327 }
0328 }
0329
0330
0331
0332
0333
0334
0335
0336
0337
0338 static void update_link_status(struct net_device *netdev,
0339 union oct_link_status *ls)
0340 {
0341 struct lio *lio = GET_LIO(netdev);
0342 int current_max_mtu = lio->linfo.link.s.mtu;
0343 struct octeon_device *oct = lio->oct_dev;
0344
0345 if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
0346 lio->linfo.link.u64 = ls->u64;
0347
0348 print_link_info(netdev);
0349 lio->link_changes++;
0350
0351 if (lio->linfo.link.s.link_up) {
0352 netif_carrier_on(netdev);
0353 wake_txqs(netdev);
0354 } else {
0355 netif_carrier_off(netdev);
0356 stop_txqs(netdev);
0357 }
0358
0359 if (lio->linfo.link.s.mtu != current_max_mtu) {
0360 dev_info(&oct->pci_dev->dev,
0361 "Max MTU Changed from %d to %d\n",
0362 current_max_mtu, lio->linfo.link.s.mtu);
0363 netdev->max_mtu = lio->linfo.link.s.mtu;
0364 }
0365
0366 if (lio->linfo.link.s.mtu < netdev->mtu) {
0367 dev_warn(&oct->pci_dev->dev,
0368 "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
0369 netdev->mtu, lio->linfo.link.s.mtu);
0370 queue_delayed_work(lio->link_status_wq.wq,
0371 &lio->link_status_wq.wk.work, 0);
0372 }
0373 }
0374 }
0375
0376
0377
0378
0379
0380
0381 static int
0382 liquidio_vf_probe(struct pci_dev *pdev,
0383 const struct pci_device_id __maybe_unused *ent)
0384 {
0385 struct octeon_device *oct_dev = NULL;
0386
0387 oct_dev = octeon_allocate_device(pdev->device,
0388 sizeof(struct octeon_device_priv));
0389
0390 if (!oct_dev) {
0391 dev_err(&pdev->dev, "Unable to allocate device\n");
0392 return -ENOMEM;
0393 }
0394 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
0395
0396 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
0397 (u32)pdev->vendor, (u32)pdev->device);
0398
0399
0400 pci_set_drvdata(pdev, oct_dev);
0401
0402
0403 oct_dev->pci_dev = pdev;
0404
0405 oct_dev->subsystem_id = pdev->subsystem_vendor |
0406 (pdev->subsystem_device << 16);
0407
0408 if (octeon_device_init(oct_dev)) {
0409 liquidio_vf_remove(pdev);
0410 return -ENOMEM;
0411 }
0412
0413 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
0414
0415 return 0;
0416 }
0417
0418
0419
0420
0421
0422 static void octeon_pci_flr(struct octeon_device *oct)
0423 {
0424 pci_save_state(oct->pci_dev);
0425
0426 pci_cfg_access_lock(oct->pci_dev);
0427
0428
0429 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
0430 PCI_COMMAND_INTX_DISABLE);
0431
0432 pcie_flr(oct->pci_dev);
0433
0434 pci_cfg_access_unlock(oct->pci_dev);
0435
0436 pci_restore_state(oct->pci_dev);
0437 }
0438
0439
0440
0441
0442
0443 static void octeon_destroy_resources(struct octeon_device *oct)
0444 {
0445 struct octeon_device_priv *oct_priv =
0446 (struct octeon_device_priv *)oct->priv;
0447 struct msix_entry *msix_entries;
0448 int i;
0449
0450 switch (atomic_read(&oct->status)) {
0451 case OCT_DEV_RUNNING:
0452 case OCT_DEV_CORE_OK:
0453
0454 atomic_set(&oct->status, OCT_DEV_IN_RESET);
0455
0456 oct->app_mode = CVM_DRV_INVALID_APP;
0457 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
0458 lio_get_state_string(&oct->status));
0459
0460 schedule_timeout_uninterruptible(HZ / 10);
0461
0462 fallthrough;
0463 case OCT_DEV_HOST_OK:
0464 case OCT_DEV_IO_QUEUES_DONE:
0465 if (lio_wait_for_instr_fetch(oct))
0466 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
0467
0468 if (wait_for_pending_requests(oct))
0469 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
0470
0471
0472
0473
0474
0475 oct->fn_list.disable_io_queues(oct);
0476
0477 if (lio_wait_for_oq_pkts(oct))
0478 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
0479
0480
0481
0482
0483 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
0484 struct octeon_instr_queue *iq;
0485
0486 if (!(oct->io_qmask.iq & BIT_ULL(i)))
0487 continue;
0488 iq = oct->instr_queue[i];
0489
0490 if (atomic_read(&iq->instr_pending)) {
0491 spin_lock_bh(&iq->lock);
0492 iq->fill_cnt = 0;
0493 iq->octeon_read_index = iq->host_write_index;
0494 iq->stats.instr_processed +=
0495 atomic_read(&iq->instr_pending);
0496 lio_process_iq_request_list(oct, iq, 0);
0497 spin_unlock_bh(&iq->lock);
0498 }
0499 }
0500
0501 lio_process_ordered_list(oct, 1);
0502 octeon_free_sc_done_list(oct);
0503 octeon_free_sc_zombie_list(oct);
0504
0505 fallthrough;
0506 case OCT_DEV_INTR_SET_DONE:
0507
0508 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
0509
0510 if (oct->msix_on) {
0511 msix_entries = (struct msix_entry *)oct->msix_entries;
0512 for (i = 0; i < oct->num_msix_irqs; i++) {
0513 if (oct->ioq_vector[i].vector) {
0514 irq_set_affinity_hint(
0515 msix_entries[i].vector,
0516 NULL);
0517 free_irq(msix_entries[i].vector,
0518 &oct->ioq_vector[i]);
0519 oct->ioq_vector[i].vector = 0;
0520 }
0521 }
0522 pci_disable_msix(oct->pci_dev);
0523 kfree(oct->msix_entries);
0524 oct->msix_entries = NULL;
0525 kfree(oct->irq_name_storage);
0526 oct->irq_name_storage = NULL;
0527 }
0528
0529 if (!pcie_reset_flr(oct->pci_dev, PCI_RESET_PROBE))
0530 octeon_pci_flr(oct);
0531 else
0532 cn23xx_vf_ask_pf_to_do_flr(oct);
0533
0534 fallthrough;
0535 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
0536 octeon_free_ioq_vector(oct);
0537
0538 fallthrough;
0539 case OCT_DEV_MBOX_SETUP_DONE:
0540 oct->fn_list.free_mbox(oct);
0541
0542 fallthrough;
0543 case OCT_DEV_IN_RESET:
0544 case OCT_DEV_DROQ_INIT_DONE:
0545 mdelay(100);
0546 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
0547 if (!(oct->io_qmask.oq & BIT_ULL(i)))
0548 continue;
0549 octeon_delete_droq(oct, i);
0550 }
0551
0552 fallthrough;
0553 case OCT_DEV_RESP_LIST_INIT_DONE:
0554 octeon_delete_response_list(oct);
0555
0556 fallthrough;
0557 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
0558 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
0559 if (!(oct->io_qmask.iq & BIT_ULL(i)))
0560 continue;
0561 octeon_delete_instr_queue(oct, i);
0562 }
0563
0564 fallthrough;
0565 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
0566 octeon_free_sc_buffer_pool(oct);
0567
0568 fallthrough;
0569 case OCT_DEV_DISPATCH_INIT_DONE:
0570 octeon_delete_dispatch_list(oct);
0571 cancel_delayed_work_sync(&oct->nic_poll_work.work);
0572
0573 fallthrough;
0574 case OCT_DEV_PCI_MAP_DONE:
0575 octeon_unmap_pci_barx(oct, 0);
0576 octeon_unmap_pci_barx(oct, 1);
0577
0578 fallthrough;
0579 case OCT_DEV_PCI_ENABLE_DONE:
0580 pci_clear_master(oct->pci_dev);
0581
0582 pci_disable_device(oct->pci_dev);
0583
0584 fallthrough;
0585 case OCT_DEV_BEGIN_STATE:
0586
0587 break;
0588 }
0589
0590 tasklet_kill(&oct_priv->droq_tasklet);
0591 }
0592
0593
0594
0595
0596
0597
0598 static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
0599 {
0600 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
0601 struct octeon_soft_command *sc;
0602 union octnet_cmd *ncmd;
0603 int retval;
0604
0605 if (oct->props[lio->ifidx].rx_on == start_stop)
0606 return 0;
0607
0608 sc = (struct octeon_soft_command *)
0609 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
0610 16, 0);
0611 if (!sc) {
0612 netif_info(lio, rx_err, lio->netdev,
0613 "Failed to allocate octeon_soft_command struct\n");
0614 return -ENOMEM;
0615 }
0616
0617 ncmd = (union octnet_cmd *)sc->virtdptr;
0618
0619 ncmd->u64 = 0;
0620 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
0621 ncmd->s.param1 = start_stop;
0622
0623 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
0624
0625 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
0626
0627 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
0628 OPCODE_NIC_CMD, 0, 0, 0);
0629
0630 init_completion(&sc->complete);
0631 sc->sc_status = OCTEON_REQUEST_PENDING;
0632
0633 retval = octeon_send_soft_command(oct, sc);
0634 if (retval == IQ_SEND_FAILED) {
0635 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
0636 octeon_free_soft_command(oct, sc);
0637 } else {
0638
0639
0640
0641 retval = wait_for_sc_completion_timeout(oct, sc, 0);
0642 if (retval)
0643 return retval;
0644
0645 oct->props[lio->ifidx].rx_on = start_stop;
0646 WRITE_ONCE(sc->caller_is_done, true);
0647 }
0648
0649 return retval;
0650 }
0651
0652
0653
0654
0655
0656
0657
0658
0659
0660 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
0661 {
0662 struct net_device *netdev = oct->props[ifidx].netdev;
0663 struct octeon_device_priv *oct_priv =
0664 (struct octeon_device_priv *)oct->priv;
0665 struct napi_struct *napi, *n;
0666 struct lio *lio;
0667
0668 if (!netdev) {
0669 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
0670 __func__, ifidx);
0671 return;
0672 }
0673
0674 lio = GET_LIO(netdev);
0675
0676 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
0677
0678 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
0679 liquidio_stop(netdev);
0680
0681 if (oct->props[lio->ifidx].napi_enabled == 1) {
0682 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
0683 napi_disable(napi);
0684
0685 oct->props[lio->ifidx].napi_enabled = 0;
0686
0687 oct->droq[0]->ops.poll_mode = 0;
0688 }
0689
0690
0691 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
0692 netif_napi_del(napi);
0693
0694 tasklet_enable(&oct_priv->droq_tasklet);
0695
0696 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
0697 unregister_netdev(netdev);
0698
0699 cleanup_rx_oom_poll_fn(netdev);
0700
0701 cleanup_link_status_change_wq(netdev);
0702
0703 lio_delete_glists(lio);
0704
0705 free_netdev(netdev);
0706
0707 oct->props[ifidx].gmxport = -1;
0708
0709 oct->props[ifidx].netdev = NULL;
0710 }
0711
0712
0713
0714
0715
0716 static int liquidio_stop_nic_module(struct octeon_device *oct)
0717 {
0718 struct lio *lio;
0719 int i, j;
0720
0721 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
0722 if (!oct->ifcount) {
0723 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
0724 return 1;
0725 }
0726
0727 spin_lock_bh(&oct->cmd_resp_wqlock);
0728 oct->cmd_resp_state = OCT_DRV_OFFLINE;
0729 spin_unlock_bh(&oct->cmd_resp_wqlock);
0730
0731 for (i = 0; i < oct->ifcount; i++) {
0732 lio = GET_LIO(oct->props[i].netdev);
0733 for (j = 0; j < oct->num_oqs; j++)
0734 octeon_unregister_droq_ops(oct,
0735 lio->linfo.rxpciq[j].s.q_no);
0736 }
0737
0738 for (i = 0; i < oct->ifcount; i++)
0739 liquidio_destroy_nic_device(oct, i);
0740
0741 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
0742 return 0;
0743 }
0744
0745
0746
0747
0748
0749 static void liquidio_vf_remove(struct pci_dev *pdev)
0750 {
0751 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
0752
0753 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
0754
0755 if (oct_dev->app_mode == CVM_DRV_NIC_APP)
0756 liquidio_stop_nic_module(oct_dev);
0757
0758
0759
0760
0761 octeon_destroy_resources(oct_dev);
0762
0763 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
0764
0765
0766
0767
0768 octeon_free_device_mem(oct_dev);
0769 }
0770
0771
0772
0773
0774
0775 static int octeon_pci_os_setup(struct octeon_device *oct)
0776 {
0777 #ifdef CONFIG_PCI_IOV
0778
0779 if (!oct->pci_dev->physfn)
0780 octeon_pci_flr(oct);
0781 #endif
0782
0783 if (pci_enable_device(oct->pci_dev)) {
0784 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
0785 return 1;
0786 }
0787
0788 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
0789 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
0790 pci_disable_device(oct->pci_dev);
0791 return 1;
0792 }
0793
0794
0795 pci_set_master(oct->pci_dev);
0796
0797 return 0;
0798 }
0799
0800
0801
0802
0803
0804 static void free_netbuf(void *buf)
0805 {
0806 struct octnet_buf_free_info *finfo;
0807 struct sk_buff *skb;
0808 struct lio *lio;
0809
0810 finfo = (struct octnet_buf_free_info *)buf;
0811 skb = finfo->skb;
0812 lio = finfo->lio;
0813
0814 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
0815 DMA_TO_DEVICE);
0816
0817 tx_buffer_free(skb);
0818 }
0819
0820
0821
0822
0823
0824 static void free_netsgbuf(void *buf)
0825 {
0826 struct octnet_buf_free_info *finfo;
0827 struct octnic_gather *g;
0828 struct sk_buff *skb;
0829 int i, frags, iq;
0830 struct lio *lio;
0831
0832 finfo = (struct octnet_buf_free_info *)buf;
0833 skb = finfo->skb;
0834 lio = finfo->lio;
0835 g = finfo->g;
0836 frags = skb_shinfo(skb)->nr_frags;
0837
0838 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
0839 g->sg[0].ptr[0], (skb->len - skb->data_len),
0840 DMA_TO_DEVICE);
0841
0842 i = 1;
0843 while (frags--) {
0844 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
0845
0846 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
0847 g->sg[(i >> 2)].ptr[(i & 3)],
0848 skb_frag_size(frag), DMA_TO_DEVICE);
0849 i++;
0850 }
0851
0852 iq = skb_iq(lio->oct_dev, skb);
0853
0854 spin_lock(&lio->glist_lock[iq]);
0855 list_add_tail(&g->list, &lio->glist[iq]);
0856 spin_unlock(&lio->glist_lock[iq]);
0857
0858 tx_buffer_free(skb);
0859 }
0860
0861
0862
0863
0864
0865 static void free_netsgbuf_with_resp(void *buf)
0866 {
0867 struct octnet_buf_free_info *finfo;
0868 struct octeon_soft_command *sc;
0869 struct octnic_gather *g;
0870 struct sk_buff *skb;
0871 int i, frags, iq;
0872 struct lio *lio;
0873
0874 sc = (struct octeon_soft_command *)buf;
0875 skb = (struct sk_buff *)sc->callback_arg;
0876 finfo = (struct octnet_buf_free_info *)&skb->cb;
0877
0878 lio = finfo->lio;
0879 g = finfo->g;
0880 frags = skb_shinfo(skb)->nr_frags;
0881
0882 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
0883 g->sg[0].ptr[0], (skb->len - skb->data_len),
0884 DMA_TO_DEVICE);
0885
0886 i = 1;
0887 while (frags--) {
0888 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
0889
0890 dma_unmap_page(&lio->oct_dev->pci_dev->dev,
0891 g->sg[(i >> 2)].ptr[(i & 3)],
0892 skb_frag_size(frag), DMA_TO_DEVICE);
0893 i++;
0894 }
0895
0896 iq = skb_iq(lio->oct_dev, skb);
0897
0898 spin_lock(&lio->glist_lock[iq]);
0899 list_add_tail(&g->list, &lio->glist[iq]);
0900 spin_unlock(&lio->glist_lock[iq]);
0901
0902
0903 }
0904
0905
0906
0907
0908
0909 static int liquidio_open(struct net_device *netdev)
0910 {
0911 struct lio *lio = GET_LIO(netdev);
0912 struct octeon_device *oct = lio->oct_dev;
0913 struct octeon_device_priv *oct_priv =
0914 (struct octeon_device_priv *)oct->priv;
0915 struct napi_struct *napi, *n;
0916 int ret = 0;
0917
0918 if (!oct->props[lio->ifidx].napi_enabled) {
0919 tasklet_disable(&oct_priv->droq_tasklet);
0920
0921 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
0922 napi_enable(napi);
0923
0924 oct->props[lio->ifidx].napi_enabled = 1;
0925
0926 oct->droq[0]->ops.poll_mode = 1;
0927 }
0928
0929 ifstate_set(lio, LIO_IFSTATE_RUNNING);
0930
0931
0932 lio->intf_open = 1;
0933
0934 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
0935 start_txqs(netdev);
0936
0937 INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
0938 lio->stats_wk.ctxptr = lio;
0939 schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
0940 (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
0941
0942
0943 ret = send_rx_ctrl_cmd(lio, 1);
0944 if (ret)
0945 return ret;
0946
0947 dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name);
0948
0949 return ret;
0950 }
0951
0952
0953
0954
0955
0956 static int liquidio_stop(struct net_device *netdev)
0957 {
0958 struct lio *lio = GET_LIO(netdev);
0959 struct octeon_device *oct = lio->oct_dev;
0960 struct octeon_device_priv *oct_priv =
0961 (struct octeon_device_priv *)oct->priv;
0962 struct napi_struct *napi, *n;
0963 int ret = 0;
0964
0965
0966 ret = send_rx_ctrl_cmd(lio, 0);
0967 if (ret)
0968 return ret;
0969
0970 netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
0971
0972 lio->intf_open = 0;
0973 lio->linfo.link.s.link_up = 0;
0974
0975 netif_carrier_off(netdev);
0976 lio->link_changes++;
0977
0978 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
0979
0980 stop_txqs(netdev);
0981
0982
0983 if (lio_wait_for_clean_oq(oct))
0984 netif_info(lio, rx_err, lio->netdev,
0985 "Proceeding with stop interface after partial RX desc processing\n");
0986
0987 if (oct->props[lio->ifidx].napi_enabled == 1) {
0988 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
0989 napi_disable(napi);
0990
0991 oct->props[lio->ifidx].napi_enabled = 0;
0992
0993 oct->droq[0]->ops.poll_mode = 0;
0994
0995 tasklet_enable(&oct_priv->droq_tasklet);
0996 }
0997
0998 cancel_delayed_work_sync(&lio->stats_wk.work);
0999
1000 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1001
1002 return ret;
1003 }
1004
1005
1006
1007
1008
1009
1010
1011
1012 static enum octnet_ifflags get_new_flags(struct net_device *netdev)
1013 {
1014 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1015
1016 if (netdev->flags & IFF_PROMISC)
1017 f |= OCTNET_IFFLAG_PROMISC;
1018
1019 if (netdev->flags & IFF_ALLMULTI)
1020 f |= OCTNET_IFFLAG_ALLMULTI;
1021
1022 if (netdev->flags & IFF_MULTICAST) {
1023 f |= OCTNET_IFFLAG_MULTICAST;
1024
1025
1026
1027
1028 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1029 f |= OCTNET_IFFLAG_ALLMULTI;
1030 }
1031
1032 if (netdev->flags & IFF_BROADCAST)
1033 f |= OCTNET_IFFLAG_BROADCAST;
1034
1035 return f;
1036 }
1037
1038 static void liquidio_set_uc_list(struct net_device *netdev)
1039 {
1040 struct lio *lio = GET_LIO(netdev);
1041 struct octeon_device *oct = lio->oct_dev;
1042 struct octnic_ctrl_pkt nctrl;
1043 struct netdev_hw_addr *ha;
1044 u64 *mac;
1045
1046 if (lio->netdev_uc_count == netdev_uc_count(netdev))
1047 return;
1048
1049 if (netdev_uc_count(netdev) > MAX_NCTRL_UDD) {
1050 dev_err(&oct->pci_dev->dev, "too many MAC addresses in netdev uc list\n");
1051 return;
1052 }
1053
1054 lio->netdev_uc_count = netdev_uc_count(netdev);
1055
1056 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1057 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_UC_LIST;
1058 nctrl.ncmd.s.more = lio->netdev_uc_count;
1059 nctrl.ncmd.s.param1 = oct->vf_num;
1060 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1061 nctrl.netpndev = (u64)netdev;
1062 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1063
1064
1065 mac = &nctrl.udd[0];
1066 netdev_for_each_uc_addr(ha, netdev) {
1067 ether_addr_copy(((u8 *)mac) + 2, ha->addr);
1068 mac++;
1069 }
1070
1071 octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1072 }
1073
1074
1075
1076
1077
1078 static void liquidio_set_mcast_list(struct net_device *netdev)
1079 {
1080 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1081 struct lio *lio = GET_LIO(netdev);
1082 struct octeon_device *oct = lio->oct_dev;
1083 struct octnic_ctrl_pkt nctrl;
1084 struct netdev_hw_addr *ha;
1085 u64 *mc;
1086 int ret;
1087
1088 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1089
1090
1091 nctrl.ncmd.u64 = 0;
1092 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1093 nctrl.ncmd.s.param1 = get_new_flags(netdev);
1094 nctrl.ncmd.s.param2 = mc_count;
1095 nctrl.ncmd.s.more = mc_count;
1096 nctrl.netpndev = (u64)netdev;
1097 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1098
1099
1100 mc = &nctrl.udd[0];
1101 netdev_for_each_mc_addr(ha, netdev) {
1102 *mc = 0;
1103 ether_addr_copy(((u8 *)mc) + 2, ha->addr);
1104
1105 if (++mc > &nctrl.udd[mc_count])
1106 break;
1107 }
1108
1109 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1110
1111
1112
1113
1114
1115 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1116 if (ret) {
1117 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1118 ret);
1119 }
1120
1121 liquidio_set_uc_list(netdev);
1122 }
1123
1124
1125
1126
1127
1128
1129 static int liquidio_set_mac(struct net_device *netdev, void *p)
1130 {
1131 struct sockaddr *addr = (struct sockaddr *)p;
1132 struct lio *lio = GET_LIO(netdev);
1133 struct octeon_device *oct = lio->oct_dev;
1134 struct octnic_ctrl_pkt nctrl;
1135 int ret = 0;
1136
1137 if (!is_valid_ether_addr(addr->sa_data))
1138 return -EADDRNOTAVAIL;
1139
1140 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
1141 return 0;
1142
1143 if (lio->linfo.macaddr_is_admin_asgnd)
1144 return -EPERM;
1145
1146 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1147
1148 nctrl.ncmd.u64 = 0;
1149 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
1150 nctrl.ncmd.s.param1 = 0;
1151 nctrl.ncmd.s.more = 1;
1152 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1153 nctrl.netpndev = (u64)netdev;
1154
1155 nctrl.udd[0] = 0;
1156
1157 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, addr->sa_data);
1158
1159 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1160 if (ret < 0) {
1161 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
1162 return -ENOMEM;
1163 }
1164
1165 if (nctrl.sc_status ==
1166 FIRMWARE_STATUS_CODE(OCTEON_REQUEST_NO_PERMISSION)) {
1167 dev_err(&oct->pci_dev->dev, "MAC Address change failed: no permission\n");
1168 return -EPERM;
1169 }
1170
1171 eth_hw_addr_set(netdev, addr->sa_data);
1172 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
1173
1174 return 0;
1175 }
1176
1177 static void
1178 liquidio_get_stats64(struct net_device *netdev,
1179 struct rtnl_link_stats64 *lstats)
1180 {
1181 struct lio *lio = GET_LIO(netdev);
1182 struct octeon_device *oct;
1183 u64 pkts = 0, drop = 0, bytes = 0;
1184 struct oct_droq_stats *oq_stats;
1185 struct oct_iq_stats *iq_stats;
1186 int i, iq_no, oq_no;
1187
1188 oct = lio->oct_dev;
1189
1190 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
1191 return;
1192
1193 for (i = 0; i < oct->num_iqs; i++) {
1194 iq_no = lio->linfo.txpciq[i].s.q_no;
1195 iq_stats = &oct->instr_queue[iq_no]->stats;
1196 pkts += iq_stats->tx_done;
1197 drop += iq_stats->tx_dropped;
1198 bytes += iq_stats->tx_tot_bytes;
1199 }
1200
1201 lstats->tx_packets = pkts;
1202 lstats->tx_bytes = bytes;
1203 lstats->tx_dropped = drop;
1204
1205 pkts = 0;
1206 drop = 0;
1207 bytes = 0;
1208
1209 for (i = 0; i < oct->num_oqs; i++) {
1210 oq_no = lio->linfo.rxpciq[i].s.q_no;
1211 oq_stats = &oct->droq[oq_no]->stats;
1212 pkts += oq_stats->rx_pkts_received;
1213 drop += (oq_stats->rx_dropped +
1214 oq_stats->dropped_nodispatch +
1215 oq_stats->dropped_toomany +
1216 oq_stats->dropped_nomem);
1217 bytes += oq_stats->rx_bytes_received;
1218 }
1219
1220 lstats->rx_bytes = bytes;
1221 lstats->rx_packets = pkts;
1222 lstats->rx_dropped = drop;
1223
1224 lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
1225
1226
1227 lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
1228
1229 lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
1230
1231 lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
1232
1233 lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
1234 lstats->rx_frame_errors;
1235
1236
1237 lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
1238 lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
1239
1240 lstats->tx_errors = lstats->tx_aborted_errors +
1241 lstats->tx_carrier_errors;
1242 }
1243
1244
1245
1246
1247
1248
1249 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
1250 {
1251 struct lio *lio = GET_LIO(netdev);
1252 struct hwtstamp_config conf;
1253
1254 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
1255 return -EFAULT;
1256
1257 switch (conf.tx_type) {
1258 case HWTSTAMP_TX_ON:
1259 case HWTSTAMP_TX_OFF:
1260 break;
1261 default:
1262 return -ERANGE;
1263 }
1264
1265 switch (conf.rx_filter) {
1266 case HWTSTAMP_FILTER_NONE:
1267 break;
1268 case HWTSTAMP_FILTER_ALL:
1269 case HWTSTAMP_FILTER_SOME:
1270 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
1271 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
1272 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
1273 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1274 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
1275 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
1276 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1277 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
1278 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
1279 case HWTSTAMP_FILTER_PTP_V2_EVENT:
1280 case HWTSTAMP_FILTER_PTP_V2_SYNC:
1281 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
1282 case HWTSTAMP_FILTER_NTP_ALL:
1283 conf.rx_filter = HWTSTAMP_FILTER_ALL;
1284 break;
1285 default:
1286 return -ERANGE;
1287 }
1288
1289 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
1290 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1291
1292 else
1293 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
1294
1295 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
1296 }
1297
1298
1299
1300
1301
1302
1303
1304 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1305 {
1306 switch (cmd) {
1307 case SIOCSHWTSTAMP:
1308 return hwtstamp_ioctl(netdev, ifr);
1309 default:
1310 return -EOPNOTSUPP;
1311 }
1312 }
1313
1314 static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
1315 {
1316 struct sk_buff *skb = (struct sk_buff *)buf;
1317 struct octnet_buf_free_info *finfo;
1318 struct oct_timestamp_resp *resp;
1319 struct octeon_soft_command *sc;
1320 struct lio *lio;
1321
1322 finfo = (struct octnet_buf_free_info *)skb->cb;
1323 lio = finfo->lio;
1324 sc = finfo->sc;
1325 oct = lio->oct_dev;
1326 resp = (struct oct_timestamp_resp *)sc->virtrptr;
1327
1328 if (status != OCTEON_REQUEST_DONE) {
1329 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
1330 CVM_CAST64(status));
1331 resp->timestamp = 0;
1332 }
1333
1334 octeon_swap_8B_data(&resp->timestamp, 1);
1335
1336 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
1337 struct skb_shared_hwtstamps ts;
1338 u64 ns = resp->timestamp;
1339
1340 netif_info(lio, tx_done, lio->netdev,
1341 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
1342 skb, (unsigned long long)ns);
1343 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
1344 skb_tstamp_tx(skb, &ts);
1345 }
1346
1347 octeon_free_soft_command(oct, sc);
1348 tx_buffer_free(skb);
1349 }
1350
1351
1352
1353
1354
1355
1356 static int send_nic_timestamp_pkt(struct octeon_device *oct,
1357 struct octnic_data_pkt *ndata,
1358 struct octnet_buf_free_info *finfo,
1359 int xmit_more)
1360 {
1361 struct octeon_soft_command *sc;
1362 int ring_doorbell;
1363 struct lio *lio;
1364 int retval;
1365 u32 len;
1366
1367 lio = finfo->lio;
1368
1369 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
1370 sizeof(struct oct_timestamp_resp));
1371 finfo->sc = sc;
1372
1373 if (!sc) {
1374 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
1375 return IQ_SEND_FAILED;
1376 }
1377
1378 if (ndata->reqtype == REQTYPE_NORESP_NET)
1379 ndata->reqtype = REQTYPE_RESP_NET;
1380 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
1381 ndata->reqtype = REQTYPE_RESP_NET_SG;
1382
1383 sc->callback = handle_timestamp;
1384 sc->callback_arg = finfo->skb;
1385 sc->iq_no = ndata->q_no;
1386
1387 len = (u32)((struct octeon_instr_ih3 *)(&sc->cmd.cmd3.ih3))->dlengsz;
1388
1389 ring_doorbell = !xmit_more;
1390
1391 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
1392 sc, len, ndata->reqtype);
1393
1394 if (retval == IQ_SEND_FAILED) {
1395 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
1396 retval);
1397 octeon_free_soft_command(oct, sc);
1398 } else {
1399 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
1400 }
1401
1402 return retval;
1403 }
1404
1405
1406
1407
1408
1409
1410
1411
1412 static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
1413 {
1414 struct octnet_buf_free_info *finfo;
1415 union octnic_cmd_setup cmdsetup;
1416 struct octnic_data_pkt ndata;
1417 struct octeon_instr_irh *irh;
1418 struct oct_iq_stats *stats;
1419 struct octeon_device *oct;
1420 int q_idx = 0, iq_no = 0;
1421 union tx_info *tx_info;
1422 int xmit_more = 0;
1423 struct lio *lio;
1424 int status = 0;
1425 u64 dptr = 0;
1426 u32 tag = 0;
1427 int j;
1428
1429 lio = GET_LIO(netdev);
1430 oct = lio->oct_dev;
1431
1432 q_idx = skb_iq(lio->oct_dev, skb);
1433 tag = q_idx;
1434 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
1435
1436 stats = &oct->instr_queue[iq_no]->stats;
1437
1438
1439
1440
1441 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
1442 (!lio->linfo.link.s.link_up) || (skb->len <= 0)) {
1443 netif_info(lio, tx_err, lio->netdev, "Transmit failed link_status : %d\n",
1444 lio->linfo.link.s.link_up);
1445 goto lio_xmit_failed;
1446 }
1447
1448
1449
1450
1451 finfo = (struct octnet_buf_free_info *)skb->cb;
1452 finfo->lio = lio;
1453 finfo->skb = skb;
1454 finfo->sc = NULL;
1455
1456
1457 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
1458
1459 ndata.buf = finfo;
1460
1461 ndata.q_no = iq_no;
1462
1463 if (octnet_iq_is_full(oct, ndata.q_no)) {
1464
1465 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
1466 ndata.q_no);
1467 stats->tx_iq_busy++;
1468 return NETDEV_TX_BUSY;
1469 }
1470
1471 ndata.datasize = skb->len;
1472
1473 cmdsetup.u64 = 0;
1474 cmdsetup.s.iq_no = iq_no;
1475
1476 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1477 if (skb->encapsulation) {
1478 cmdsetup.s.tnl_csum = 1;
1479 stats->tx_vxlan++;
1480 } else {
1481 cmdsetup.s.transport_csum = 1;
1482 }
1483 }
1484 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
1485 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1486 cmdsetup.s.timestamp = 1;
1487 }
1488
1489 if (!skb_shinfo(skb)->nr_frags) {
1490 cmdsetup.s.u.datasize = skb->len;
1491 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1492
1493 dptr = dma_map_single(&oct->pci_dev->dev,
1494 skb->data,
1495 skb->len,
1496 DMA_TO_DEVICE);
1497 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
1498 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
1499 __func__);
1500 return NETDEV_TX_BUSY;
1501 }
1502
1503 ndata.cmd.cmd3.dptr = dptr;
1504 finfo->dptr = dptr;
1505 ndata.reqtype = REQTYPE_NORESP_NET;
1506
1507 } else {
1508 skb_frag_t *frag;
1509 struct octnic_gather *g;
1510 int i, frags;
1511
1512 spin_lock(&lio->glist_lock[q_idx]);
1513 g = (struct octnic_gather *)
1514 lio_list_delete_head(&lio->glist[q_idx]);
1515 spin_unlock(&lio->glist_lock[q_idx]);
1516
1517 if (!g) {
1518 netif_info(lio, tx_err, lio->netdev,
1519 "Transmit scatter gather: glist null!\n");
1520 goto lio_xmit_failed;
1521 }
1522
1523 cmdsetup.s.gather = 1;
1524 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
1525 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
1526
1527 memset(g->sg, 0, g->sg_size);
1528
1529 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
1530 skb->data,
1531 (skb->len - skb->data_len),
1532 DMA_TO_DEVICE);
1533 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
1534 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
1535 __func__);
1536 return NETDEV_TX_BUSY;
1537 }
1538 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
1539
1540 frags = skb_shinfo(skb)->nr_frags;
1541 i = 1;
1542 while (frags--) {
1543 frag = &skb_shinfo(skb)->frags[i - 1];
1544
1545 g->sg[(i >> 2)].ptr[(i & 3)] =
1546 skb_frag_dma_map(&oct->pci_dev->dev,
1547 frag, 0, skb_frag_size(frag),
1548 DMA_TO_DEVICE);
1549 if (dma_mapping_error(&oct->pci_dev->dev,
1550 g->sg[i >> 2].ptr[i & 3])) {
1551 dma_unmap_single(&oct->pci_dev->dev,
1552 g->sg[0].ptr[0],
1553 skb->len - skb->data_len,
1554 DMA_TO_DEVICE);
1555 for (j = 1; j < i; j++) {
1556 frag = &skb_shinfo(skb)->frags[j - 1];
1557 dma_unmap_page(&oct->pci_dev->dev,
1558 g->sg[j >> 2].ptr[j & 3],
1559 skb_frag_size(frag),
1560 DMA_TO_DEVICE);
1561 }
1562 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
1563 __func__);
1564 return NETDEV_TX_BUSY;
1565 }
1566
1567 add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
1568 (i & 3));
1569 i++;
1570 }
1571
1572 dptr = g->sg_dma_ptr;
1573
1574 ndata.cmd.cmd3.dptr = dptr;
1575 finfo->dptr = dptr;
1576 finfo->g = g;
1577
1578 ndata.reqtype = REQTYPE_NORESP_NET_SG;
1579 }
1580
1581 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
1582 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
1583
1584 if (skb_shinfo(skb)->gso_size) {
1585 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
1586 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
1587 }
1588
1589
1590 if (skb_vlan_tag_present(skb)) {
1591 irh->priority = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
1592 irh->vlan = skb_vlan_tag_get(skb) & VLAN_VID_MASK;
1593 }
1594
1595 xmit_more = netdev_xmit_more();
1596
1597 if (unlikely(cmdsetup.s.timestamp))
1598 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
1599 else
1600 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
1601 if (status == IQ_SEND_FAILED)
1602 goto lio_xmit_failed;
1603
1604 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
1605
1606 if (status == IQ_SEND_STOP) {
1607 dev_err(&oct->pci_dev->dev, "Rcvd IQ_SEND_STOP signal; stopping IQ-%d\n",
1608 iq_no);
1609 netif_stop_subqueue(netdev, q_idx);
1610 }
1611
1612 netif_trans_update(netdev);
1613
1614 if (tx_info->s.gso_segs)
1615 stats->tx_done += tx_info->s.gso_segs;
1616 else
1617 stats->tx_done++;
1618 stats->tx_tot_bytes += ndata.datasize;
1619
1620 return NETDEV_TX_OK;
1621
1622 lio_xmit_failed:
1623 stats->tx_dropped++;
1624 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
1625 iq_no, stats->tx_dropped);
1626 if (dptr)
1627 dma_unmap_single(&oct->pci_dev->dev, dptr,
1628 ndata.datasize, DMA_TO_DEVICE);
1629
1630 octeon_ring_doorbell_locked(oct, iq_no);
1631
1632 tx_buffer_free(skb);
1633 return NETDEV_TX_OK;
1634 }
1635
1636
1637
1638
1639
1640
1641 static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1642 {
1643 struct lio *lio;
1644
1645 lio = GET_LIO(netdev);
1646
1647 netif_info(lio, tx_err, lio->netdev,
1648 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
1649 netdev->stats.tx_dropped);
1650 netif_trans_update(netdev);
1651 wake_txqs(netdev);
1652 }
1653
1654 static int
1655 liquidio_vlan_rx_add_vid(struct net_device *netdev,
1656 __be16 proto __attribute__((unused)), u16 vid)
1657 {
1658 struct lio *lio = GET_LIO(netdev);
1659 struct octeon_device *oct = lio->oct_dev;
1660 struct octnic_ctrl_pkt nctrl;
1661 int ret = 0;
1662
1663 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1664
1665 nctrl.ncmd.u64 = 0;
1666 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
1667 nctrl.ncmd.s.param1 = vid;
1668 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1669 nctrl.netpndev = (u64)netdev;
1670 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1671
1672 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1673 if (ret) {
1674 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
1675 ret);
1676 return -EPERM;
1677 }
1678
1679 return 0;
1680 }
1681
1682 static int
1683 liquidio_vlan_rx_kill_vid(struct net_device *netdev,
1684 __be16 proto __attribute__((unused)), u16 vid)
1685 {
1686 struct lio *lio = GET_LIO(netdev);
1687 struct octeon_device *oct = lio->oct_dev;
1688 struct octnic_ctrl_pkt nctrl;
1689 int ret = 0;
1690
1691 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1692
1693 nctrl.ncmd.u64 = 0;
1694 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
1695 nctrl.ncmd.s.param1 = vid;
1696 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1697 nctrl.netpndev = (u64)netdev;
1698 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1699
1700 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1701 if (ret) {
1702 dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
1703 ret);
1704 if (ret > 0)
1705 ret = -EIO;
1706 }
1707 return ret;
1708 }
1709
1710
1711
1712
1713
1714
1715
1716
1717 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
1718 u8 rx_cmd)
1719 {
1720 struct lio *lio = GET_LIO(netdev);
1721 struct octeon_device *oct = lio->oct_dev;
1722 struct octnic_ctrl_pkt nctrl;
1723 int ret = 0;
1724
1725 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1726
1727 nctrl.ncmd.u64 = 0;
1728 nctrl.ncmd.s.cmd = command;
1729 nctrl.ncmd.s.param1 = rx_cmd;
1730 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1731 nctrl.netpndev = (u64)netdev;
1732 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1733
1734 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1735 if (ret) {
1736 dev_err(&oct->pci_dev->dev, "DEVFLAGS RXCSUM change failed in core (ret:0x%x)\n",
1737 ret);
1738 if (ret > 0)
1739 ret = -EIO;
1740 }
1741 return ret;
1742 }
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
1753 u16 vxlan_port, u8 vxlan_cmd_bit)
1754 {
1755 struct lio *lio = GET_LIO(netdev);
1756 struct octeon_device *oct = lio->oct_dev;
1757 struct octnic_ctrl_pkt nctrl;
1758 int ret = 0;
1759
1760 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1761
1762 nctrl.ncmd.u64 = 0;
1763 nctrl.ncmd.s.cmd = command;
1764 nctrl.ncmd.s.more = vxlan_cmd_bit;
1765 nctrl.ncmd.s.param1 = vxlan_port;
1766 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1767 nctrl.netpndev = (u64)netdev;
1768 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1769
1770 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1771 if (ret) {
1772 dev_err(&oct->pci_dev->dev,
1773 "DEVFLAGS VxLAN port add/delete failed in core (ret : 0x%x)\n",
1774 ret);
1775 if (ret > 0)
1776 ret = -EIO;
1777 }
1778 return ret;
1779 }
1780
1781 static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
1782 unsigned int table, unsigned int entry,
1783 struct udp_tunnel_info *ti)
1784 {
1785 return liquidio_vxlan_port_command(netdev,
1786 OCTNET_CMD_VXLAN_PORT_CONFIG,
1787 htons(ti->port),
1788 OCTNET_CMD_VXLAN_PORT_ADD);
1789 }
1790
1791 static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
1792 unsigned int table,
1793 unsigned int entry,
1794 struct udp_tunnel_info *ti)
1795 {
1796 return liquidio_vxlan_port_command(netdev,
1797 OCTNET_CMD_VXLAN_PORT_CONFIG,
1798 htons(ti->port),
1799 OCTNET_CMD_VXLAN_PORT_DEL);
1800 }
1801
1802 static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
1803 .set_port = liquidio_udp_tunnel_set_port,
1804 .unset_port = liquidio_udp_tunnel_unset_port,
1805 .tables = {
1806 { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
1807 },
1808 };
1809
1810
1811
1812
1813
1814
1815 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
1816 netdev_features_t request)
1817 {
1818 struct lio *lio = netdev_priv(netdev);
1819
1820 if ((request & NETIF_F_RXCSUM) &&
1821 !(lio->dev_capability & NETIF_F_RXCSUM))
1822 request &= ~NETIF_F_RXCSUM;
1823
1824 if ((request & NETIF_F_HW_CSUM) &&
1825 !(lio->dev_capability & NETIF_F_HW_CSUM))
1826 request &= ~NETIF_F_HW_CSUM;
1827
1828 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
1829 request &= ~NETIF_F_TSO;
1830
1831 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
1832 request &= ~NETIF_F_TSO6;
1833
1834 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
1835 request &= ~NETIF_F_LRO;
1836
1837
1838 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
1839 (lio->dev_capability & NETIF_F_LRO))
1840 request &= ~NETIF_F_LRO;
1841
1842 return request;
1843 }
1844
1845
1846
1847
1848
1849 static int liquidio_set_features(struct net_device *netdev,
1850 netdev_features_t features)
1851 {
1852 struct lio *lio = netdev_priv(netdev);
1853
1854 if (!((netdev->features ^ features) & NETIF_F_LRO))
1855 return 0;
1856
1857 if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
1858 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
1859 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1860 else if (!(features & NETIF_F_LRO) &&
1861 (lio->dev_capability & NETIF_F_LRO))
1862 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
1863 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
1864 if (!(netdev->features & NETIF_F_RXCSUM) &&
1865 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1866 (features & NETIF_F_RXCSUM))
1867 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1868 OCTNET_CMD_RXCSUM_ENABLE);
1869 else if ((netdev->features & NETIF_F_RXCSUM) &&
1870 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
1871 !(features & NETIF_F_RXCSUM))
1872 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
1873 OCTNET_CMD_RXCSUM_DISABLE);
1874
1875 return 0;
1876 }
1877
1878 static const struct net_device_ops lionetdevops = {
1879 .ndo_open = liquidio_open,
1880 .ndo_stop = liquidio_stop,
1881 .ndo_start_xmit = liquidio_xmit,
1882 .ndo_get_stats64 = liquidio_get_stats64,
1883 .ndo_set_mac_address = liquidio_set_mac,
1884 .ndo_set_rx_mode = liquidio_set_mcast_list,
1885 .ndo_tx_timeout = liquidio_tx_timeout,
1886 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
1887 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
1888 .ndo_change_mtu = liquidio_change_mtu,
1889 .ndo_eth_ioctl = liquidio_ioctl,
1890 .ndo_fix_features = liquidio_fix_features,
1891 .ndo_set_features = liquidio_set_features,
1892 };
1893
1894 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
1895 {
1896 struct octeon_device *oct = (struct octeon_device *)buf;
1897 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
1898 union oct_link_status *ls;
1899 int gmxport = 0;
1900 int i;
1901
1902 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
1903 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
1904 recv_pkt->buffer_size[0],
1905 recv_pkt->rh.r_nic_info.gmxport);
1906 goto nic_info_err;
1907 }
1908
1909 gmxport = recv_pkt->rh.r_nic_info.gmxport;
1910 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
1911 OCT_DROQ_INFO_SIZE);
1912
1913 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
1914
1915 for (i = 0; i < oct->ifcount; i++) {
1916 if (oct->props[i].gmxport == gmxport) {
1917 update_link_status(oct->props[i].netdev, ls);
1918 break;
1919 }
1920 }
1921
1922 nic_info_err:
1923 for (i = 0; i < recv_pkt->buffer_count; i++)
1924 recv_buffer_free(recv_pkt->buffer_ptr[i]);
1925 octeon_free_recv_info(recv_info);
1926 return 0;
1927 }
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937 static int setup_nic_devices(struct octeon_device *octeon_dev)
1938 {
1939 int retval, num_iqueues, num_oqueues;
1940 u32 resp_size, data_size;
1941 struct liquidio_if_cfg_resp *resp;
1942 struct octeon_soft_command *sc;
1943 union oct_nic_if_cfg if_cfg;
1944 struct octdev_props *props;
1945 struct net_device *netdev;
1946 struct lio_version *vdata;
1947 struct lio *lio = NULL;
1948 u8 mac[ETH_ALEN], i, j;
1949 u32 ifidx_or_pfnum;
1950
1951 ifidx_or_pfnum = octeon_dev->pf_num;
1952
1953
1954 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, OPCODE_NIC_INFO,
1955 lio_nic_info, octeon_dev);
1956
1957
1958
1959
1960 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
1961 free_netbuf);
1962
1963 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
1964 free_netsgbuf);
1965
1966 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
1967 free_netsgbuf_with_resp);
1968
1969 for (i = 0; i < octeon_dev->ifcount; i++) {
1970 resp_size = sizeof(struct liquidio_if_cfg_resp);
1971 data_size = sizeof(struct lio_version);
1972 sc = (struct octeon_soft_command *)
1973 octeon_alloc_soft_command(octeon_dev, data_size,
1974 resp_size, 0);
1975 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1976 vdata = (struct lio_version *)sc->virtdptr;
1977
1978 *((u64 *)vdata) = 0;
1979 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
1980 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
1981 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
1982
1983 if_cfg.u64 = 0;
1984
1985 if_cfg.s.num_iqueues = octeon_dev->sriov_info.rings_per_vf;
1986 if_cfg.s.num_oqueues = octeon_dev->sriov_info.rings_per_vf;
1987 if_cfg.s.base_queue = 0;
1988
1989 sc->iq_no = 0;
1990
1991 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
1992 OPCODE_NIC_IF_CFG, 0, if_cfg.u64,
1993 0);
1994
1995 init_completion(&sc->complete);
1996 sc->sc_status = OCTEON_REQUEST_PENDING;
1997
1998 retval = octeon_send_soft_command(octeon_dev, sc);
1999 if (retval == IQ_SEND_FAILED) {
2000 dev_err(&octeon_dev->pci_dev->dev,
2001 "iq/oq config failed status: %x\n", retval);
2002
2003 octeon_free_soft_command(octeon_dev, sc);
2004 return(-EIO);
2005 }
2006
2007
2008
2009
2010 retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
2011 if (retval)
2012 return retval;
2013
2014 retval = resp->status;
2015 if (retval) {
2016 dev_err(&octeon_dev->pci_dev->dev,
2017 "iq/oq config failed, retval = %d\n", retval);
2018 WRITE_ONCE(sc->caller_is_done, true);
2019 return -EIO;
2020 }
2021
2022 snprintf(octeon_dev->fw_info.liquidio_firmware_version,
2023 32, "%s",
2024 resp->cfg_info.liquidio_firmware_version);
2025
2026 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
2027 (sizeof(struct liquidio_if_cfg_info)) >> 3);
2028
2029 num_iqueues = hweight64(resp->cfg_info.iqmask);
2030 num_oqueues = hweight64(resp->cfg_info.oqmask);
2031
2032 if (!(num_iqueues) || !(num_oqueues)) {
2033 dev_err(&octeon_dev->pci_dev->dev,
2034 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
2035 resp->cfg_info.iqmask, resp->cfg_info.oqmask);
2036 WRITE_ONCE(sc->caller_is_done, true);
2037 goto setup_nic_dev_done;
2038 }
2039 dev_dbg(&octeon_dev->pci_dev->dev,
2040 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
2041 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
2042 num_iqueues, num_oqueues);
2043
2044 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
2045
2046 if (!netdev) {
2047 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
2048 WRITE_ONCE(sc->caller_is_done, true);
2049 goto setup_nic_dev_done;
2050 }
2051
2052 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
2053
2054
2055
2056
2057 netdev->netdev_ops = &lionetdevops;
2058
2059 lio = GET_LIO(netdev);
2060
2061 memset(lio, 0, sizeof(struct lio));
2062
2063 lio->ifidx = ifidx_or_pfnum;
2064
2065 props = &octeon_dev->props[i];
2066 props->gmxport = resp->cfg_info.linfo.gmxport;
2067 props->netdev = netdev;
2068
2069 lio->linfo.num_rxpciq = num_oqueues;
2070 lio->linfo.num_txpciq = num_iqueues;
2071
2072 for (j = 0; j < num_oqueues; j++) {
2073 lio->linfo.rxpciq[j].u64 =
2074 resp->cfg_info.linfo.rxpciq[j].u64;
2075 }
2076 for (j = 0; j < num_iqueues; j++) {
2077 lio->linfo.txpciq[j].u64 =
2078 resp->cfg_info.linfo.txpciq[j].u64;
2079 }
2080
2081 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
2082 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
2083 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
2084 lio->linfo.macaddr_is_admin_asgnd =
2085 resp->cfg_info.linfo.macaddr_is_admin_asgnd;
2086 lio->linfo.macaddr_spoofchk =
2087 resp->cfg_info.linfo.macaddr_spoofchk;
2088
2089 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
2090
2091 lio->dev_capability = NETIF_F_HIGHDMA
2092 | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
2093 | NETIF_F_SG | NETIF_F_RXCSUM
2094 | NETIF_F_TSO | NETIF_F_TSO6
2095 | NETIF_F_GRO
2096 | NETIF_F_LRO;
2097 netif_set_tso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
2098
2099
2100
2101
2102 lio->enc_dev_capability = NETIF_F_IP_CSUM
2103 | NETIF_F_IPV6_CSUM
2104 | NETIF_F_GSO_UDP_TUNNEL
2105 | NETIF_F_HW_CSUM | NETIF_F_SG
2106 | NETIF_F_RXCSUM
2107 | NETIF_F_TSO | NETIF_F_TSO6
2108 | NETIF_F_LRO;
2109
2110 netdev->hw_enc_features =
2111 (lio->enc_dev_capability & ~NETIF_F_LRO);
2112 netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
2113
2114 netdev->vlan_features = lio->dev_capability;
2115
2116 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
2117 NETIF_F_HW_VLAN_CTAG_RX |
2118 NETIF_F_HW_VLAN_CTAG_TX;
2119
2120 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
2121
2122 netdev->hw_features = lio->dev_capability;
2123 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
2124
2125
2126 netdev->min_mtu = LIO_MIN_MTU_SIZE;
2127 netdev->max_mtu = LIO_MAX_MTU_SIZE;
2128
2129 WRITE_ONCE(sc->caller_is_done, true);
2130
2131
2132
2133
2134 lio->oct_dev = octeon_dev;
2135 lio->octprops = props;
2136 lio->netdev = netdev;
2137
2138 dev_dbg(&octeon_dev->pci_dev->dev,
2139 "if%d gmx: %d hw_addr: 0x%llx\n", i,
2140 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
2141
2142
2143 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
2144 for (j = 0; j < ETH_ALEN; j++)
2145 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
2146
2147
2148 eth_hw_addr_set(netdev, mac);
2149
2150 if (liquidio_setup_io_queues(octeon_dev, i,
2151 lio->linfo.num_txpciq,
2152 lio->linfo.num_rxpciq)) {
2153 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
2154 goto setup_nic_dev_free;
2155 }
2156
2157 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
2158
2159
2160
2161
2162 octeon_dev->fn_list.enable_interrupt(octeon_dev,
2163 OCTEON_ALL_INTR);
2164
2165
2166
2167
2168 lio->txq = lio->linfo.txpciq[0].s.q_no;
2169 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
2170
2171 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
2172 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
2173
2174 if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
2175 dev_err(&octeon_dev->pci_dev->dev,
2176 "Gather list allocation failed\n");
2177 goto setup_nic_dev_free;
2178 }
2179
2180
2181 liquidio_set_ethtool_ops(netdev);
2182 if (lio->oct_dev->chip_id == OCTEON_CN23XX_VF_VID)
2183 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
2184 else
2185 octeon_dev->priv_flags = 0x0;
2186
2187 if (netdev->features & NETIF_F_LRO)
2188 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2189 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2190
2191 if (setup_link_status_change_wq(netdev))
2192 goto setup_nic_dev_free;
2193
2194 if (setup_rx_oom_poll_fn(netdev))
2195 goto setup_nic_dev_free;
2196
2197
2198 if (register_netdev(netdev)) {
2199 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
2200 goto setup_nic_dev_free;
2201 }
2202
2203 dev_dbg(&octeon_dev->pci_dev->dev,
2204 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
2205 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2206 netif_carrier_off(netdev);
2207 lio->link_changes++;
2208
2209 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
2210
2211
2212
2213
2214
2215 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2216 OCTNET_CMD_RXCSUM_ENABLE);
2217 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
2218 OCTNET_CMD_TXCSUM_ENABLE);
2219
2220 dev_dbg(&octeon_dev->pci_dev->dev,
2221 "NIC ifidx:%d Setup successful\n", i);
2222
2223 octeon_dev->no_speed_setting = 1;
2224 }
2225
2226 return 0;
2227
2228 setup_nic_dev_free:
2229
2230 while (i--) {
2231 dev_err(&octeon_dev->pci_dev->dev,
2232 "NIC ifidx:%d Setup failed\n", i);
2233 liquidio_destroy_nic_device(octeon_dev, i);
2234 }
2235
2236 setup_nic_dev_done:
2237
2238 return -ENODEV;
2239 }
2240
2241
2242
2243
2244
2245
2246
2247
2248 static int liquidio_init_nic_module(struct octeon_device *oct)
2249 {
2250 int num_nic_ports = 1;
2251 int i, retval = 0;
2252
2253 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
2254
2255
2256
2257
2258 oct->ifcount = num_nic_ports;
2259 memset(oct->props, 0,
2260 sizeof(struct octdev_props) * num_nic_ports);
2261
2262 for (i = 0; i < MAX_OCTEON_LINKS; i++)
2263 oct->props[i].gmxport = -1;
2264
2265 retval = setup_nic_devices(oct);
2266 if (retval) {
2267 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
2268 goto octnet_init_failure;
2269 }
2270
2271 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
2272
2273 return retval;
2274
2275 octnet_init_failure:
2276
2277 oct->ifcount = 0;
2278
2279 return retval;
2280 }
2281
2282
2283
2284
2285
2286 static int octeon_device_init(struct octeon_device *oct)
2287 {
2288 u32 rev_id;
2289 int j;
2290
2291 atomic_set(&oct->status, OCT_DEV_BEGIN_STATE);
2292
2293
2294
2295
2296 if (octeon_pci_os_setup(oct))
2297 return 1;
2298 atomic_set(&oct->status, OCT_DEV_PCI_ENABLE_DONE);
2299
2300 oct->chip_id = OCTEON_CN23XX_VF_VID;
2301 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
2302 oct->rev_id = rev_id & 0xff;
2303
2304 if (cn23xx_setup_octeon_vf_device(oct))
2305 return 1;
2306
2307 atomic_set(&oct->status, OCT_DEV_PCI_MAP_DONE);
2308
2309 oct->app_mode = CVM_DRV_NIC_APP;
2310
2311
2312
2313
2314 if (octeon_init_dispatch_list(oct))
2315 return 1;
2316
2317 atomic_set(&oct->status, OCT_DEV_DISPATCH_INIT_DONE);
2318
2319 if (octeon_set_io_queues_off(oct)) {
2320 dev_err(&oct->pci_dev->dev, "setting io queues off failed\n");
2321 return 1;
2322 }
2323
2324 if (oct->fn_list.setup_device_regs(oct)) {
2325 dev_err(&oct->pci_dev->dev, "device registers configuration failed\n");
2326 return 1;
2327 }
2328
2329
2330 if (octeon_setup_sc_buffer_pool(oct)) {
2331 dev_err(&oct->pci_dev->dev, "sc buffer pool allocation failed\n");
2332 return 1;
2333 }
2334 atomic_set(&oct->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
2335
2336
2337 if (octeon_setup_instr_queues(oct)) {
2338 dev_err(&oct->pci_dev->dev, "instruction queue initialization failed\n");
2339 return 1;
2340 }
2341 atomic_set(&oct->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
2342
2343
2344
2345
2346 if (octeon_setup_response_list(oct)) {
2347 dev_err(&oct->pci_dev->dev, "Response list allocation failed\n");
2348 return 1;
2349 }
2350 atomic_set(&oct->status, OCT_DEV_RESP_LIST_INIT_DONE);
2351
2352 if (octeon_setup_output_queues(oct)) {
2353 dev_err(&oct->pci_dev->dev, "Output queue initialization failed\n");
2354 return 1;
2355 }
2356 atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);
2357
2358 if (oct->fn_list.setup_mbox(oct)) {
2359 dev_err(&oct->pci_dev->dev, "Mailbox setup failed\n");
2360 return 1;
2361 }
2362 atomic_set(&oct->status, OCT_DEV_MBOX_SETUP_DONE);
2363
2364 if (octeon_allocate_ioq_vector(oct, oct->sriov_info.rings_per_vf)) {
2365 dev_err(&oct->pci_dev->dev, "ioq vector allocation failed\n");
2366 return 1;
2367 }
2368 atomic_set(&oct->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
2369
2370 dev_info(&oct->pci_dev->dev, "OCTEON_CN23XX VF: %d ioqs\n",
2371 oct->sriov_info.rings_per_vf);
2372
2373
2374 if (octeon_setup_interrupt(oct, oct->sriov_info.rings_per_vf))
2375 return 1;
2376
2377 atomic_set(&oct->status, OCT_DEV_INTR_SET_DONE);
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2388
2389 if (cn23xx_octeon_pfvf_handshake(oct))
2390 return 1;
2391
2392
2393
2394
2395
2396
2397 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
2398
2399
2400
2401 if (oct->fn_list.enable_io_queues(oct)) {
2402 dev_err(&oct->pci_dev->dev, "enabling io queues failed\n");
2403 return 1;
2404 }
2405
2406 atomic_set(&oct->status, OCT_DEV_IO_QUEUES_DONE);
2407
2408 atomic_set(&oct->status, OCT_DEV_HOST_OK);
2409
2410
2411
2412
2413 for (j = 0; j < oct->num_oqs; j++)
2414 writel(oct->droq[j]->max_count, oct->droq[j]->pkts_credit_reg);
2415
2416
2417
2418 atomic_set(&oct->status, OCT_DEV_CORE_OK);
2419
2420 atomic_set(&oct->status, OCT_DEV_RUNNING);
2421
2422 if (liquidio_init_nic_module(oct))
2423 return 1;
2424
2425 return 0;
2426 }
2427
2428 static int __init liquidio_vf_init(void)
2429 {
2430 octeon_init_device_list(0);
2431 return pci_register_driver(&liquidio_vf_pci_driver);
2432 }
2433
2434 static void __exit liquidio_vf_exit(void)
2435 {
2436 pci_unregister_driver(&liquidio_vf_pci_driver);
2437
2438 pr_info("LiquidIO_VF network module is now unloaded\n");
2439 }
2440
2441 module_init(liquidio_vf_init);
2442 module_exit(liquidio_vf_exit);