0001
0002
0003
0004 #include "fm10k.h"
0005 #include "fm10k_vf.h"
0006 #include "fm10k_pf.h"
0007
0008 static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
0009 struct fm10k_mbx_info *mbx)
0010 {
0011 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
0012 struct fm10k_intfc *interface = hw->back;
0013 struct pci_dev *pdev = interface->pdev;
0014
0015 dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
0016 **results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
0017
0018 return fm10k_tlv_msg_error(hw, results, mbx);
0019 }
0020
0021
0022
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033 static s32 fm10k_iov_msg_queue_mac_vlan(struct fm10k_hw *hw, u32 **results,
0034 struct fm10k_mbx_info *mbx)
0035 {
0036 struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
0037 struct fm10k_intfc *interface = hw->back;
0038 u8 mac[ETH_ALEN];
0039 u32 *result;
0040 int err = 0;
0041 bool set;
0042 u16 vlan;
0043 u32 vid;
0044
0045
0046 if (!FM10K_VF_FLAG_ENABLED(vf_info))
0047 err = FM10K_ERR_PARAM;
0048
0049 if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
0050 result = results[FM10K_MAC_VLAN_MSG_VLAN];
0051
0052
0053 err = fm10k_tlv_attr_get_u32(result, &vid);
0054 if (err)
0055 return err;
0056
0057 set = !(vid & FM10K_VLAN_CLEAR);
0058 vid &= ~FM10K_VLAN_CLEAR;
0059
0060
0061
0062
0063
0064
0065
0066
0067
0068
0069
0070 if (vid >> 16) {
0071
0072
0073
0074 if (vf_info->pf_vid)
0075 return FM10K_ERR_PARAM;
0076 } else {
0077 err = fm10k_iov_select_vid(vf_info, (u16)vid);
0078 if (err < 0)
0079 return err;
0080
0081 vid = err;
0082 }
0083
0084
0085 err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
0086 }
0087
0088 if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
0089 result = results[FM10K_MAC_VLAN_MSG_MAC];
0090
0091
0092 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
0093 if (err)
0094 return err;
0095
0096
0097 if (is_valid_ether_addr(vf_info->mac) &&
0098 !ether_addr_equal(mac, vf_info->mac))
0099 return FM10K_ERR_PARAM;
0100
0101 set = !(vlan & FM10K_VLAN_CLEAR);
0102 vlan &= ~FM10K_VLAN_CLEAR;
0103
0104 err = fm10k_iov_select_vid(vf_info, vlan);
0105 if (err < 0)
0106 return err;
0107
0108 vlan = (u16)err;
0109
0110
0111 err = fm10k_queue_mac_request(interface, vf_info->glort,
0112 mac, vlan, set);
0113 }
0114
0115 if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
0116 result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
0117
0118
0119 err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
0120 if (err)
0121 return err;
0122
0123
0124 if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
0125 return FM10K_ERR_PARAM;
0126
0127 set = !(vlan & FM10K_VLAN_CLEAR);
0128 vlan &= ~FM10K_VLAN_CLEAR;
0129
0130 err = fm10k_iov_select_vid(vf_info, vlan);
0131 if (err < 0)
0132 return err;
0133
0134 vlan = (u16)err;
0135
0136
0137 err = fm10k_queue_mac_request(interface, vf_info->glort,
0138 mac, vlan, set);
0139 }
0140
0141 return err;
0142 }
0143
0144 static const struct fm10k_msg_data iov_mbx_data[] = {
0145 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
0146 FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
0147 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_queue_mac_vlan),
0148 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
0149 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
0150 };
0151
0152 s32 fm10k_iov_event(struct fm10k_intfc *interface)
0153 {
0154 struct fm10k_hw *hw = &interface->hw;
0155 struct fm10k_iov_data *iov_data;
0156 s64 vflre;
0157 int i;
0158
0159
0160 if (!READ_ONCE(interface->iov_data))
0161 return 0;
0162
0163 rcu_read_lock();
0164
0165 iov_data = interface->iov_data;
0166
0167
0168 if (!iov_data)
0169 goto read_unlock;
0170
0171 if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
0172 goto read_unlock;
0173
0174
0175 vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1));
0176 vflre <<= 32;
0177 vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
0178
0179 i = iov_data->num_vfs;
0180
0181 for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
0182 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
0183
0184 if (vflre >= 0)
0185 continue;
0186
0187 hw->iov.ops.reset_resources(hw, vf_info);
0188 vf_info->mbx.ops.connect(hw, &vf_info->mbx);
0189 }
0190
0191 read_unlock:
0192 rcu_read_unlock();
0193
0194 return 0;
0195 }
0196
0197 s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
0198 {
0199 struct fm10k_hw *hw = &interface->hw;
0200 struct fm10k_iov_data *iov_data;
0201 int i;
0202
0203
0204 if (!READ_ONCE(interface->iov_data))
0205 return 0;
0206
0207 rcu_read_lock();
0208
0209 iov_data = interface->iov_data;
0210
0211
0212 if (!iov_data)
0213 goto read_unlock;
0214
0215
0216 fm10k_mbx_lock(interface);
0217
0218
0219
0220
0221
0222
0223
0224
0225 process_mbx:
0226 for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
0227 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
0228 struct fm10k_mbx_info *mbx = &vf_info->mbx;
0229 u16 glort = vf_info->glort;
0230
0231
0232 hw->mbx.ops.process(hw, &hw->mbx);
0233
0234
0235 if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) {
0236 hw->iov.ops.reset_lport(hw, vf_info);
0237 fm10k_clear_macvlan_queue(interface, glort, false);
0238 }
0239
0240
0241 if (!mbx->timeout) {
0242 hw->iov.ops.reset_resources(hw, vf_info);
0243 mbx->ops.connect(hw, mbx);
0244 }
0245
0246
0247 if (hw->mbx.state == FM10K_STATE_OPEN &&
0248 !hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
0249
0250 interface->hw_sm_mbx_full++;
0251
0252
0253 fm10k_service_event_schedule(interface);
0254
0255 break;
0256 }
0257
0258
0259 mbx->ops.process(hw, mbx);
0260 }
0261
0262
0263
0264
0265
0266 if (i >= 0) {
0267 iov_data->next_vf_mbx = i + 1;
0268 } else if (iov_data->next_vf_mbx) {
0269 iov_data->next_vf_mbx = 0;
0270 goto process_mbx;
0271 }
0272
0273
0274 fm10k_mbx_unlock(interface);
0275
0276 read_unlock:
0277 rcu_read_unlock();
0278
0279 return 0;
0280 }
0281
0282 void fm10k_iov_suspend(struct pci_dev *pdev)
0283 {
0284 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
0285 struct fm10k_iov_data *iov_data = interface->iov_data;
0286 struct fm10k_hw *hw = &interface->hw;
0287 int num_vfs, i;
0288
0289
0290 num_vfs = iov_data ? iov_data->num_vfs : 0;
0291
0292
0293 fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
0294 FM10K_DGLORTMAP_NONE);
0295
0296
0297 for (i = 0; i < num_vfs; i++) {
0298 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
0299
0300 hw->iov.ops.reset_resources(hw, vf_info);
0301 hw->iov.ops.reset_lport(hw, vf_info);
0302 fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
0303 }
0304 }
0305
0306 static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev)
0307 {
0308 u32 err_mask;
0309 int pos;
0310
0311 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
0312 if (!pos)
0313 return;
0314
0315
0316
0317
0318
0319
0320
0321 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
0322 err_mask |= PCI_ERR_UNC_COMP_ABORT;
0323 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
0324 }
0325
0326 int fm10k_iov_resume(struct pci_dev *pdev)
0327 {
0328 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
0329 struct fm10k_iov_data *iov_data = interface->iov_data;
0330 struct fm10k_dglort_cfg dglort = { 0 };
0331 struct fm10k_hw *hw = &interface->hw;
0332 int num_vfs, i;
0333
0334
0335 num_vfs = iov_data ? iov_data->num_vfs : 0;
0336
0337
0338 if (!iov_data)
0339 return -ENOMEM;
0340
0341
0342
0343
0344
0345 fm10k_mask_aer_comp_abort(pdev);
0346
0347
0348 hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
0349
0350
0351 dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
0352 dglort.idx = fm10k_dglort_vf_rss;
0353 dglort.inner_rss = 1;
0354 dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
0355 dglort.queue_b = fm10k_vf_queue_index(hw, 0);
0356 dglort.vsi_l = fls(hw->iov.total_vfs - 1);
0357 dglort.vsi_b = 1;
0358
0359 hw->mac.ops.configure_dglort_map(hw, &dglort);
0360
0361
0362 for (i = 0; i < num_vfs; i++) {
0363 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
0364
0365
0366 if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT))
0367 break;
0368
0369
0370 hw->iov.ops.set_lport(hw, vf_info, i,
0371 FM10K_VF_FLAG_MULTI_CAPABLE);
0372
0373
0374 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
0375
0376
0377 vf_info->mbx.ops.connect(hw, &vf_info->mbx);
0378 }
0379
0380 return 0;
0381 }
0382
0383 s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
0384 {
0385 struct fm10k_iov_data *iov_data = interface->iov_data;
0386 struct fm10k_hw *hw = &interface->hw;
0387 struct fm10k_vf_info *vf_info;
0388 u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
0389
0390
0391 if (!iov_data)
0392 return FM10K_ERR_PARAM;
0393
0394
0395 if (vf_idx >= iov_data->num_vfs)
0396 return FM10K_ERR_PARAM;
0397
0398
0399 vf_info = &iov_data->vf_info[vf_idx];
0400 if (vf_info->sw_vid != pvid) {
0401 vf_info->sw_vid = pvid;
0402 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
0403 }
0404
0405 return 0;
0406 }
0407
0408 static void fm10k_iov_free_data(struct pci_dev *pdev)
0409 {
0410 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
0411
0412 if (!interface->iov_data)
0413 return;
0414
0415
0416 fm10k_iov_suspend(pdev);
0417
0418
0419 kfree_rcu(interface->iov_data, rcu);
0420 interface->iov_data = NULL;
0421 }
0422
0423 static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
0424 {
0425 struct fm10k_intfc *interface = pci_get_drvdata(pdev);
0426 struct fm10k_iov_data *iov_data = interface->iov_data;
0427 struct fm10k_hw *hw = &interface->hw;
0428 size_t size;
0429 int i;
0430
0431
0432 if (iov_data)
0433 return -EBUSY;
0434
0435
0436 if (!hw->iov.ops.assign_resources)
0437 return -ENODEV;
0438
0439
0440 if (!num_vfs)
0441 return 0;
0442
0443
0444 size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
0445 iov_data = kzalloc(size, GFP_KERNEL);
0446 if (!iov_data)
0447 return -ENOMEM;
0448
0449
0450 iov_data->num_vfs = num_vfs;
0451
0452
0453 for (i = 0; i < num_vfs; i++) {
0454 struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
0455 int err;
0456
0457
0458 vf_info->vsi = i + 1;
0459 vf_info->vf_idx = i;
0460
0461
0462 err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
0463 if (err) {
0464 dev_err(&pdev->dev,
0465 "Unable to initialize SR-IOV mailbox\n");
0466 kfree(iov_data);
0467 return err;
0468 }
0469 }
0470
0471
0472 interface->iov_data = iov_data;
0473
0474
0475 fm10k_iov_resume(pdev);
0476
0477 return 0;
0478 }
0479
0480 void fm10k_iov_disable(struct pci_dev *pdev)
0481 {
0482 if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
0483 dev_err(&pdev->dev,
0484 "Cannot disable SR-IOV while VFs are assigned\n");
0485 else
0486 pci_disable_sriov(pdev);
0487
0488 fm10k_iov_free_data(pdev);
0489 }
0490
0491 int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
0492 {
0493 int current_vfs = pci_num_vf(pdev);
0494 int err = 0;
0495
0496 if (current_vfs && pci_vfs_assigned(pdev)) {
0497 dev_err(&pdev->dev,
0498 "Cannot modify SR-IOV while VFs are assigned\n");
0499 num_vfs = current_vfs;
0500 } else {
0501 pci_disable_sriov(pdev);
0502 fm10k_iov_free_data(pdev);
0503 }
0504
0505
0506 err = fm10k_iov_alloc_data(pdev, num_vfs);
0507 if (err)
0508 return err;
0509
0510
0511 if (num_vfs && num_vfs != current_vfs) {
0512 err = pci_enable_sriov(pdev, num_vfs);
0513 if (err) {
0514 dev_err(&pdev->dev,
0515 "Enable PCI SR-IOV failed: %d\n", err);
0516 return err;
0517 }
0518 }
0519
0520 return num_vfs;
0521 }
0522
0523
0524
0525
0526
0527
0528
0529
0530
0531 void fm10k_iov_update_stats(struct fm10k_intfc *interface)
0532 {
0533 struct fm10k_iov_data *iov_data = interface->iov_data;
0534 struct fm10k_hw *hw = &interface->hw;
0535 int i;
0536
0537 if (!iov_data)
0538 return;
0539
0540 for (i = 0; i < iov_data->num_vfs; i++)
0541 hw->iov.ops.update_stats(hw, iov_data->vf_info[i].stats, i);
0542 }
0543
0544 static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
0545 struct fm10k_vf_info *vf_info)
0546 {
0547 struct fm10k_hw *hw = &interface->hw;
0548
0549
0550 fm10k_mbx_lock(interface);
0551
0552
0553 hw->iov.ops.reset_lport(hw, vf_info);
0554
0555 fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
0556
0557
0558 hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
0559
0560
0561 hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
0562 FM10K_VF_FLAG_MULTI_CAPABLE);
0563
0564 fm10k_mbx_unlock(interface);
0565 }
0566
0567 int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
0568 {
0569 struct fm10k_intfc *interface = netdev_priv(netdev);
0570 struct fm10k_iov_data *iov_data = interface->iov_data;
0571 struct fm10k_vf_info *vf_info;
0572
0573
0574 if (!iov_data || vf_idx >= iov_data->num_vfs)
0575 return -EINVAL;
0576
0577
0578 if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
0579 return -EINVAL;
0580
0581
0582 vf_info = &iov_data->vf_info[vf_idx];
0583 ether_addr_copy(vf_info->mac, mac);
0584
0585 fm10k_reset_vf_info(interface, vf_info);
0586
0587 return 0;
0588 }
0589
0590 int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
0591 u8 qos, __be16 vlan_proto)
0592 {
0593 struct fm10k_intfc *interface = netdev_priv(netdev);
0594 struct fm10k_iov_data *iov_data = interface->iov_data;
0595 struct fm10k_hw *hw = &interface->hw;
0596 struct fm10k_vf_info *vf_info;
0597
0598
0599 if (!iov_data || vf_idx >= iov_data->num_vfs)
0600 return -EINVAL;
0601
0602
0603 if (qos || (vid > (VLAN_VID_MASK - 1)))
0604 return -EINVAL;
0605
0606
0607 if (vlan_proto != htons(ETH_P_8021Q))
0608 return -EPROTONOSUPPORT;
0609
0610 vf_info = &iov_data->vf_info[vf_idx];
0611
0612
0613 if (vf_info->pf_vid == vid)
0614 return 0;
0615
0616
0617 vf_info->pf_vid = vid;
0618
0619
0620 hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
0621
0622 fm10k_reset_vf_info(interface, vf_info);
0623
0624 return 0;
0625 }
0626
0627 int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
0628 int __always_unused min_rate, int max_rate)
0629 {
0630 struct fm10k_intfc *interface = netdev_priv(netdev);
0631 struct fm10k_iov_data *iov_data = interface->iov_data;
0632 struct fm10k_hw *hw = &interface->hw;
0633
0634
0635 if (!iov_data || vf_idx >= iov_data->num_vfs)
0636 return -EINVAL;
0637
0638
0639 if (max_rate &&
0640 (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX))
0641 return -EINVAL;
0642
0643
0644 iov_data->vf_info[vf_idx].rate = max_rate;
0645
0646
0647 hw->iov.ops.configure_tc(hw, vf_idx, max_rate);
0648
0649 return 0;
0650 }
0651
0652 int fm10k_ndo_get_vf_config(struct net_device *netdev,
0653 int vf_idx, struct ifla_vf_info *ivi)
0654 {
0655 struct fm10k_intfc *interface = netdev_priv(netdev);
0656 struct fm10k_iov_data *iov_data = interface->iov_data;
0657 struct fm10k_vf_info *vf_info;
0658
0659
0660 if (!iov_data || vf_idx >= iov_data->num_vfs)
0661 return -EINVAL;
0662
0663 vf_info = &iov_data->vf_info[vf_idx];
0664
0665 ivi->vf = vf_idx;
0666 ivi->max_tx_rate = vf_info->rate;
0667 ivi->min_tx_rate = 0;
0668 ether_addr_copy(ivi->mac, vf_info->mac);
0669 ivi->vlan = vf_info->pf_vid;
0670 ivi->qos = 0;
0671
0672 return 0;
0673 }
0674
0675 int fm10k_ndo_get_vf_stats(struct net_device *netdev,
0676 int vf_idx, struct ifla_vf_stats *stats)
0677 {
0678 struct fm10k_intfc *interface = netdev_priv(netdev);
0679 struct fm10k_iov_data *iov_data = interface->iov_data;
0680 struct fm10k_hw *hw = &interface->hw;
0681 struct fm10k_hw_stats_q *hw_stats;
0682 u32 idx, qpp;
0683
0684
0685 if (!iov_data || vf_idx >= iov_data->num_vfs)
0686 return -EINVAL;
0687
0688 qpp = fm10k_queues_per_pool(hw);
0689 hw_stats = iov_data->vf_info[vf_idx].stats;
0690
0691 for (idx = 0; idx < qpp; idx++) {
0692 stats->rx_packets += hw_stats[idx].rx_packets.count;
0693 stats->tx_packets += hw_stats[idx].tx_packets.count;
0694 stats->rx_bytes += hw_stats[idx].rx_bytes.count;
0695 stats->tx_bytes += hw_stats[idx].tx_bytes.count;
0696 stats->rx_dropped += hw_stats[idx].rx_drops.count;
0697 }
0698
0699 return 0;
0700 }