Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright(c) 2013 - 2019 Intel Corporation. */
0003 
0004 #include "fm10k.h"
0005 #include "fm10k_vf.h"
0006 #include "fm10k_pf.h"
0007 
0008 static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
0009                    struct fm10k_mbx_info *mbx)
0010 {
0011     struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
0012     struct fm10k_intfc *interface = hw->back;
0013     struct pci_dev *pdev = interface->pdev;
0014 
0015     dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
0016         **results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
0017 
0018     return fm10k_tlv_msg_error(hw, results, mbx);
0019 }
0020 
0021 /**
0022  *  fm10k_iov_msg_queue_mac_vlan - Message handler for MAC/VLAN request from VF
0023  *  @hw: Pointer to hardware structure
0024  *  @results: Pointer array to message, results[0] is pointer to message
0025  *  @mbx: Pointer to mailbox information structure
0026  *
0027  *  This function is a custom handler for MAC/VLAN requests from the VF. The
0028  *  assumption is that it is acceptable to directly hand off the message from
0029  *  the VF to the PF's switch manager. However, we use a MAC/VLAN message
0030  *  queue to avoid overloading the mailbox when a large number of requests
0031  *  come in.
0032  **/
0033 static s32 fm10k_iov_msg_queue_mac_vlan(struct fm10k_hw *hw, u32 **results,
0034                     struct fm10k_mbx_info *mbx)
0035 {
0036     struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
0037     struct fm10k_intfc *interface = hw->back;
0038     u8 mac[ETH_ALEN];
0039     u32 *result;
0040     int err = 0;
0041     bool set;
0042     u16 vlan;
0043     u32 vid;
0044 
0045     /* we shouldn't be updating rules on a disabled interface */
0046     if (!FM10K_VF_FLAG_ENABLED(vf_info))
0047         err = FM10K_ERR_PARAM;
0048 
0049     if (!err && !!results[FM10K_MAC_VLAN_MSG_VLAN]) {
0050         result = results[FM10K_MAC_VLAN_MSG_VLAN];
0051 
0052         /* record VLAN id requested */
0053         err = fm10k_tlv_attr_get_u32(result, &vid);
0054         if (err)
0055             return err;
0056 
0057         set = !(vid & FM10K_VLAN_CLEAR);
0058         vid &= ~FM10K_VLAN_CLEAR;
0059 
0060         /* if the length field has been set, this is a multi-bit
0061          * update request. For multi-bit requests, simply disallow
0062          * them when the pf_vid has been set. In this case, the PF
0063          * should have already cleared the VLAN_TABLE, and if we
0064          * allowed them, it could allow a rogue VF to receive traffic
0065          * on a VLAN it was not assigned. In the single-bit case, we
0066          * need to modify requests for VLAN 0 to use the default PF or
0067          * SW vid when assigned.
0068          */
0069 
0070         if (vid >> 16) {
0071             /* prevent multi-bit requests when PF has
0072              * administratively set the VLAN for this VF
0073              */
0074             if (vf_info->pf_vid)
0075                 return FM10K_ERR_PARAM;
0076         } else {
0077             err = fm10k_iov_select_vid(vf_info, (u16)vid);
0078             if (err < 0)
0079                 return err;
0080 
0081             vid = err;
0082         }
0083 
0084         /* update VSI info for VF in regards to VLAN table */
0085         err = hw->mac.ops.update_vlan(hw, vid, vf_info->vsi, set);
0086     }
0087 
0088     if (!err && !!results[FM10K_MAC_VLAN_MSG_MAC]) {
0089         result = results[FM10K_MAC_VLAN_MSG_MAC];
0090 
0091         /* record unicast MAC address requested */
0092         err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
0093         if (err)
0094             return err;
0095 
0096         /* block attempts to set MAC for a locked device */
0097         if (is_valid_ether_addr(vf_info->mac) &&
0098             !ether_addr_equal(mac, vf_info->mac))
0099             return FM10K_ERR_PARAM;
0100 
0101         set = !(vlan & FM10K_VLAN_CLEAR);
0102         vlan &= ~FM10K_VLAN_CLEAR;
0103 
0104         err = fm10k_iov_select_vid(vf_info, vlan);
0105         if (err < 0)
0106             return err;
0107 
0108         vlan = (u16)err;
0109 
0110         /* Add this request to the MAC/VLAN queue */
0111         err = fm10k_queue_mac_request(interface, vf_info->glort,
0112                           mac, vlan, set);
0113     }
0114 
0115     if (!err && !!results[FM10K_MAC_VLAN_MSG_MULTICAST]) {
0116         result = results[FM10K_MAC_VLAN_MSG_MULTICAST];
0117 
0118         /* record multicast MAC address requested */
0119         err = fm10k_tlv_attr_get_mac_vlan(result, mac, &vlan);
0120         if (err)
0121             return err;
0122 
0123         /* verify that the VF is allowed to request multicast */
0124         if (!(vf_info->vf_flags & FM10K_VF_FLAG_MULTI_ENABLED))
0125             return FM10K_ERR_PARAM;
0126 
0127         set = !(vlan & FM10K_VLAN_CLEAR);
0128         vlan &= ~FM10K_VLAN_CLEAR;
0129 
0130         err = fm10k_iov_select_vid(vf_info, vlan);
0131         if (err < 0)
0132             return err;
0133 
0134         vlan = (u16)err;
0135 
0136         /* Add this request to the MAC/VLAN queue */
0137         err = fm10k_queue_mac_request(interface, vf_info->glort,
0138                           mac, vlan, set);
0139     }
0140 
0141     return err;
0142 }
0143 
0144 static const struct fm10k_msg_data iov_mbx_data[] = {
0145     FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
0146     FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
0147     FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_queue_mac_vlan),
0148     FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
0149     FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
0150 };
0151 
0152 s32 fm10k_iov_event(struct fm10k_intfc *interface)
0153 {
0154     struct fm10k_hw *hw = &interface->hw;
0155     struct fm10k_iov_data *iov_data;
0156     s64 vflre;
0157     int i;
0158 
0159     /* if there is no iov_data then there is no mailbox to process */
0160     if (!READ_ONCE(interface->iov_data))
0161         return 0;
0162 
0163     rcu_read_lock();
0164 
0165     iov_data = interface->iov_data;
0166 
0167     /* check again now that we are in the RCU block */
0168     if (!iov_data)
0169         goto read_unlock;
0170 
0171     if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
0172         goto read_unlock;
0173 
0174     /* read VFLRE to determine if any VFs have been reset */
0175     vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(1));
0176     vflre <<= 32;
0177     vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
0178 
0179     i = iov_data->num_vfs;
0180 
0181     for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
0182         struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
0183 
0184         if (vflre >= 0)
0185             continue;
0186 
0187         hw->iov.ops.reset_resources(hw, vf_info);
0188         vf_info->mbx.ops.connect(hw, &vf_info->mbx);
0189     }
0190 
0191 read_unlock:
0192     rcu_read_unlock();
0193 
0194     return 0;
0195 }
0196 
0197 s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
0198 {
0199     struct fm10k_hw *hw = &interface->hw;
0200     struct fm10k_iov_data *iov_data;
0201     int i;
0202 
0203     /* if there is no iov_data then there is no mailbox to process */
0204     if (!READ_ONCE(interface->iov_data))
0205         return 0;
0206 
0207     rcu_read_lock();
0208 
0209     iov_data = interface->iov_data;
0210 
0211     /* check again now that we are in the RCU block */
0212     if (!iov_data)
0213         goto read_unlock;
0214 
0215     /* lock the mailbox for transmit and receive */
0216     fm10k_mbx_lock(interface);
0217 
0218     /* Most VF messages sent to the PF cause the PF to respond by
0219      * requesting from the SM mailbox. This means that too many VF
0220      * messages processed at once could cause a mailbox timeout on the PF.
0221      * To prevent this, store a pointer to the next VF mbx to process. Use
0222      * that as the start of the loop so that we don't starve whichever VF
0223      * got ignored on the previous run.
0224      */
0225 process_mbx:
0226     for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
0227         struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
0228         struct fm10k_mbx_info *mbx = &vf_info->mbx;
0229         u16 glort = vf_info->glort;
0230 
0231         /* process the SM mailbox first to drain outgoing messages */
0232         hw->mbx.ops.process(hw, &hw->mbx);
0233 
0234         /* verify port mapping is valid, if not reset port */
0235         if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort)) {
0236             hw->iov.ops.reset_lport(hw, vf_info);
0237             fm10k_clear_macvlan_queue(interface, glort, false);
0238         }
0239 
0240         /* reset VFs that have mailbox timed out */
0241         if (!mbx->timeout) {
0242             hw->iov.ops.reset_resources(hw, vf_info);
0243             mbx->ops.connect(hw, mbx);
0244         }
0245 
0246         /* guarantee we have free space in the SM mailbox */
0247         if (hw->mbx.state == FM10K_STATE_OPEN &&
0248             !hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU)) {
0249             /* keep track of how many times this occurs */
0250             interface->hw_sm_mbx_full++;
0251 
0252             /* make sure we try again momentarily */
0253             fm10k_service_event_schedule(interface);
0254 
0255             break;
0256         }
0257 
0258         /* cleanup mailbox and process received messages */
0259         mbx->ops.process(hw, mbx);
0260     }
0261 
0262     /* if we stopped processing mailboxes early, update next_vf_mbx.
0263      * Otherwise, reset next_vf_mbx, and restart loop so that we process
0264      * the remaining mailboxes we skipped at the start.
0265      */
0266     if (i >= 0) {
0267         iov_data->next_vf_mbx = i + 1;
0268     } else if (iov_data->next_vf_mbx) {
0269         iov_data->next_vf_mbx = 0;
0270         goto process_mbx;
0271     }
0272 
0273     /* free the lock */
0274     fm10k_mbx_unlock(interface);
0275 
0276 read_unlock:
0277     rcu_read_unlock();
0278 
0279     return 0;
0280 }
0281 
0282 void fm10k_iov_suspend(struct pci_dev *pdev)
0283 {
0284     struct fm10k_intfc *interface = pci_get_drvdata(pdev);
0285     struct fm10k_iov_data *iov_data = interface->iov_data;
0286     struct fm10k_hw *hw = &interface->hw;
0287     int num_vfs, i;
0288 
0289     /* pull out num_vfs from iov_data */
0290     num_vfs = iov_data ? iov_data->num_vfs : 0;
0291 
0292     /* shut down queue mapping for VFs */
0293     fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
0294             FM10K_DGLORTMAP_NONE);
0295 
0296     /* Stop any active VFs and reset their resources */
0297     for (i = 0; i < num_vfs; i++) {
0298         struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
0299 
0300         hw->iov.ops.reset_resources(hw, vf_info);
0301         hw->iov.ops.reset_lport(hw, vf_info);
0302         fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
0303     }
0304 }
0305 
0306 static void fm10k_mask_aer_comp_abort(struct pci_dev *pdev)
0307 {
0308     u32 err_mask;
0309     int pos;
0310 
0311     pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
0312     if (!pos)
0313         return;
0314 
0315     /* Mask the completion abort bit in the ERR_UNCOR_MASK register,
0316      * preventing the device from reporting these errors to the upstream
0317      * PCIe root device. This avoids bringing down platforms which upgrade
0318      * non-fatal completer aborts into machine check exceptions. Completer
0319      * aborts can occur whenever a VF reads a queue it doesn't own.
0320      */
0321     pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, &err_mask);
0322     err_mask |= PCI_ERR_UNC_COMP_ABORT;
0323     pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_MASK, err_mask);
0324 }
0325 
0326 int fm10k_iov_resume(struct pci_dev *pdev)
0327 {
0328     struct fm10k_intfc *interface = pci_get_drvdata(pdev);
0329     struct fm10k_iov_data *iov_data = interface->iov_data;
0330     struct fm10k_dglort_cfg dglort = { 0 };
0331     struct fm10k_hw *hw = &interface->hw;
0332     int num_vfs, i;
0333 
0334     /* pull out num_vfs from iov_data */
0335     num_vfs = iov_data ? iov_data->num_vfs : 0;
0336 
0337     /* return error if iov_data is not already populated */
0338     if (!iov_data)
0339         return -ENOMEM;
0340 
0341     /* Lower severity of completer abort error reporting as
0342      * the VFs can trigger this any time they read a queue
0343      * that they don't own.
0344      */
0345     fm10k_mask_aer_comp_abort(pdev);
0346 
0347     /* allocate hardware resources for the VFs */
0348     hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
0349 
0350     /* configure DGLORT mapping for RSS */
0351     dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
0352     dglort.idx = fm10k_dglort_vf_rss;
0353     dglort.inner_rss = 1;
0354     dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
0355     dglort.queue_b = fm10k_vf_queue_index(hw, 0);
0356     dglort.vsi_l = fls(hw->iov.total_vfs - 1);
0357     dglort.vsi_b = 1;
0358 
0359     hw->mac.ops.configure_dglort_map(hw, &dglort);
0360 
0361     /* assign resources to the device */
0362     for (i = 0; i < num_vfs; i++) {
0363         struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
0364 
0365         /* allocate all but the last GLORT to the VFs */
0366         if (i == (~hw->mac.dglort_map >> FM10K_DGLORTMAP_MASK_SHIFT))
0367             break;
0368 
0369         /* assign GLORT to VF, and restrict it to multicast */
0370         hw->iov.ops.set_lport(hw, vf_info, i,
0371                       FM10K_VF_FLAG_MULTI_CAPABLE);
0372 
0373         /* mailbox is disconnected so we don't send a message */
0374         hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
0375 
0376         /* now we are ready so we can connect */
0377         vf_info->mbx.ops.connect(hw, &vf_info->mbx);
0378     }
0379 
0380     return 0;
0381 }
0382 
0383 s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
0384 {
0385     struct fm10k_iov_data *iov_data = interface->iov_data;
0386     struct fm10k_hw *hw = &interface->hw;
0387     struct fm10k_vf_info *vf_info;
0388     u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
0389 
0390     /* no IOV support, not our message to process */
0391     if (!iov_data)
0392         return FM10K_ERR_PARAM;
0393 
0394     /* glort outside our range, not our message to process */
0395     if (vf_idx >= iov_data->num_vfs)
0396         return FM10K_ERR_PARAM;
0397 
0398     /* determine if an update has occurred and if so notify the VF */
0399     vf_info = &iov_data->vf_info[vf_idx];
0400     if (vf_info->sw_vid != pvid) {
0401         vf_info->sw_vid = pvid;
0402         hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
0403     }
0404 
0405     return 0;
0406 }
0407 
0408 static void fm10k_iov_free_data(struct pci_dev *pdev)
0409 {
0410     struct fm10k_intfc *interface = pci_get_drvdata(pdev);
0411 
0412     if (!interface->iov_data)
0413         return;
0414 
0415     /* reclaim hardware resources */
0416     fm10k_iov_suspend(pdev);
0417 
0418     /* drop iov_data from interface */
0419     kfree_rcu(interface->iov_data, rcu);
0420     interface->iov_data = NULL;
0421 }
0422 
0423 static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
0424 {
0425     struct fm10k_intfc *interface = pci_get_drvdata(pdev);
0426     struct fm10k_iov_data *iov_data = interface->iov_data;
0427     struct fm10k_hw *hw = &interface->hw;
0428     size_t size;
0429     int i;
0430 
0431     /* return error if iov_data is already populated */
0432     if (iov_data)
0433         return -EBUSY;
0434 
0435     /* The PF should always be able to assign resources */
0436     if (!hw->iov.ops.assign_resources)
0437         return -ENODEV;
0438 
0439     /* nothing to do if no VFs are requested */
0440     if (!num_vfs)
0441         return 0;
0442 
0443     /* allocate memory for VF storage */
0444     size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
0445     iov_data = kzalloc(size, GFP_KERNEL);
0446     if (!iov_data)
0447         return -ENOMEM;
0448 
0449     /* record number of VFs */
0450     iov_data->num_vfs = num_vfs;
0451 
0452     /* loop through vf_info structures initializing each entry */
0453     for (i = 0; i < num_vfs; i++) {
0454         struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
0455         int err;
0456 
0457         /* Record VF VSI value */
0458         vf_info->vsi = i + 1;
0459         vf_info->vf_idx = i;
0460 
0461         /* initialize mailbox memory */
0462         err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
0463         if (err) {
0464             dev_err(&pdev->dev,
0465                 "Unable to initialize SR-IOV mailbox\n");
0466             kfree(iov_data);
0467             return err;
0468         }
0469     }
0470 
0471     /* assign iov_data to interface */
0472     interface->iov_data = iov_data;
0473 
0474     /* allocate hardware resources for the VFs */
0475     fm10k_iov_resume(pdev);
0476 
0477     return 0;
0478 }
0479 
0480 void fm10k_iov_disable(struct pci_dev *pdev)
0481 {
0482     if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
0483         dev_err(&pdev->dev,
0484             "Cannot disable SR-IOV while VFs are assigned\n");
0485     else
0486         pci_disable_sriov(pdev);
0487 
0488     fm10k_iov_free_data(pdev);
0489 }
0490 
0491 int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
0492 {
0493     int current_vfs = pci_num_vf(pdev);
0494     int err = 0;
0495 
0496     if (current_vfs && pci_vfs_assigned(pdev)) {
0497         dev_err(&pdev->dev,
0498             "Cannot modify SR-IOV while VFs are assigned\n");
0499         num_vfs = current_vfs;
0500     } else {
0501         pci_disable_sriov(pdev);
0502         fm10k_iov_free_data(pdev);
0503     }
0504 
0505     /* allocate resources for the VFs */
0506     err = fm10k_iov_alloc_data(pdev, num_vfs);
0507     if (err)
0508         return err;
0509 
0510     /* allocate VFs if not already allocated */
0511     if (num_vfs && num_vfs != current_vfs) {
0512         err = pci_enable_sriov(pdev, num_vfs);
0513         if (err) {
0514             dev_err(&pdev->dev,
0515                 "Enable PCI SR-IOV failed: %d\n", err);
0516             return err;
0517         }
0518     }
0519 
0520     return num_vfs;
0521 }
0522 
0523 /**
0524  * fm10k_iov_update_stats - Update stats for all VFs
0525  * @interface: device private structure
0526  *
0527  * Updates the VF statistics for all enabled VFs. Expects to be called by
0528  * fm10k_update_stats and assumes that locking via the __FM10K_UPDATING_STATS
0529  * bit is already handled.
0530  */
0531 void fm10k_iov_update_stats(struct fm10k_intfc *interface)
0532 {
0533     struct fm10k_iov_data *iov_data = interface->iov_data;
0534     struct fm10k_hw *hw = &interface->hw;
0535     int i;
0536 
0537     if (!iov_data)
0538         return;
0539 
0540     for (i = 0; i < iov_data->num_vfs; i++)
0541         hw->iov.ops.update_stats(hw, iov_data->vf_info[i].stats, i);
0542 }
0543 
0544 static inline void fm10k_reset_vf_info(struct fm10k_intfc *interface,
0545                        struct fm10k_vf_info *vf_info)
0546 {
0547     struct fm10k_hw *hw = &interface->hw;
0548 
0549     /* assigning the MAC address will send a mailbox message */
0550     fm10k_mbx_lock(interface);
0551 
0552     /* disable LPORT for this VF which clears switch rules */
0553     hw->iov.ops.reset_lport(hw, vf_info);
0554 
0555     fm10k_clear_macvlan_queue(interface, vf_info->glort, false);
0556 
0557     /* assign new MAC+VLAN for this VF */
0558     hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
0559 
0560     /* re-enable the LPORT for this VF */
0561     hw->iov.ops.set_lport(hw, vf_info, vf_info->vf_idx,
0562                   FM10K_VF_FLAG_MULTI_CAPABLE);
0563 
0564     fm10k_mbx_unlock(interface);
0565 }
0566 
0567 int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
0568 {
0569     struct fm10k_intfc *interface = netdev_priv(netdev);
0570     struct fm10k_iov_data *iov_data = interface->iov_data;
0571     struct fm10k_vf_info *vf_info;
0572 
0573     /* verify SR-IOV is active and that vf idx is valid */
0574     if (!iov_data || vf_idx >= iov_data->num_vfs)
0575         return -EINVAL;
0576 
0577     /* verify MAC addr is valid */
0578     if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
0579         return -EINVAL;
0580 
0581     /* record new MAC address */
0582     vf_info = &iov_data->vf_info[vf_idx];
0583     ether_addr_copy(vf_info->mac, mac);
0584 
0585     fm10k_reset_vf_info(interface, vf_info);
0586 
0587     return 0;
0588 }
0589 
0590 int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
0591               u8 qos, __be16 vlan_proto)
0592 {
0593     struct fm10k_intfc *interface = netdev_priv(netdev);
0594     struct fm10k_iov_data *iov_data = interface->iov_data;
0595     struct fm10k_hw *hw = &interface->hw;
0596     struct fm10k_vf_info *vf_info;
0597 
0598     /* verify SR-IOV is active and that vf idx is valid */
0599     if (!iov_data || vf_idx >= iov_data->num_vfs)
0600         return -EINVAL;
0601 
0602     /* QOS is unsupported and VLAN IDs accepted range 0-4094 */
0603     if (qos || (vid > (VLAN_VID_MASK - 1)))
0604         return -EINVAL;
0605 
0606     /* VF VLAN Protocol part to default is unsupported */
0607     if (vlan_proto != htons(ETH_P_8021Q))
0608         return -EPROTONOSUPPORT;
0609 
0610     vf_info = &iov_data->vf_info[vf_idx];
0611 
0612     /* exit if there is nothing to do */
0613     if (vf_info->pf_vid == vid)
0614         return 0;
0615 
0616     /* record default VLAN ID for VF */
0617     vf_info->pf_vid = vid;
0618 
0619     /* Clear the VLAN table for the VF */
0620     hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
0621 
0622     fm10k_reset_vf_info(interface, vf_info);
0623 
0624     return 0;
0625 }
0626 
0627 int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
0628             int __always_unused min_rate, int max_rate)
0629 {
0630     struct fm10k_intfc *interface = netdev_priv(netdev);
0631     struct fm10k_iov_data *iov_data = interface->iov_data;
0632     struct fm10k_hw *hw = &interface->hw;
0633 
0634     /* verify SR-IOV is active and that vf idx is valid */
0635     if (!iov_data || vf_idx >= iov_data->num_vfs)
0636         return -EINVAL;
0637 
0638     /* rate limit cannot be less than 10Mbs or greater than link speed */
0639     if (max_rate &&
0640         (max_rate < FM10K_VF_TC_MIN || max_rate > FM10K_VF_TC_MAX))
0641         return -EINVAL;
0642 
0643     /* store values */
0644     iov_data->vf_info[vf_idx].rate = max_rate;
0645 
0646     /* update hardware configuration */
0647     hw->iov.ops.configure_tc(hw, vf_idx, max_rate);
0648 
0649     return 0;
0650 }
0651 
0652 int fm10k_ndo_get_vf_config(struct net_device *netdev,
0653                 int vf_idx, struct ifla_vf_info *ivi)
0654 {
0655     struct fm10k_intfc *interface = netdev_priv(netdev);
0656     struct fm10k_iov_data *iov_data = interface->iov_data;
0657     struct fm10k_vf_info *vf_info;
0658 
0659     /* verify SR-IOV is active and that vf idx is valid */
0660     if (!iov_data || vf_idx >= iov_data->num_vfs)
0661         return -EINVAL;
0662 
0663     vf_info = &iov_data->vf_info[vf_idx];
0664 
0665     ivi->vf = vf_idx;
0666     ivi->max_tx_rate = vf_info->rate;
0667     ivi->min_tx_rate = 0;
0668     ether_addr_copy(ivi->mac, vf_info->mac);
0669     ivi->vlan = vf_info->pf_vid;
0670     ivi->qos = 0;
0671 
0672     return 0;
0673 }
0674 
0675 int fm10k_ndo_get_vf_stats(struct net_device *netdev,
0676                int vf_idx, struct ifla_vf_stats *stats)
0677 {
0678     struct fm10k_intfc *interface = netdev_priv(netdev);
0679     struct fm10k_iov_data *iov_data = interface->iov_data;
0680     struct fm10k_hw *hw = &interface->hw;
0681     struct fm10k_hw_stats_q *hw_stats;
0682     u32 idx, qpp;
0683 
0684     /* verify SR-IOV is active and that vf idx is valid */
0685     if (!iov_data || vf_idx >= iov_data->num_vfs)
0686         return -EINVAL;
0687 
0688     qpp = fm10k_queues_per_pool(hw);
0689     hw_stats = iov_data->vf_info[vf_idx].stats;
0690 
0691     for (idx = 0; idx < qpp; idx++) {
0692         stats->rx_packets += hw_stats[idx].rx_packets.count;
0693         stats->tx_packets += hw_stats[idx].tx_packets.count;
0694         stats->rx_bytes += hw_stats[idx].rx_bytes.count;
0695         stats->tx_bytes += hw_stats[idx].tx_bytes.count;
0696         stats->rx_dropped += hw_stats[idx].rx_drops.count;
0697     }
0698 
0699     return 0;
0700 }