Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Marvell RVU Ethernet driver
0003  *
0004  * Copyright (C) 2021 Marvell.
0005  *
0006  */
0007 
0008 #include "otx2_common.h"
0009 
0010 int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
0011 {
0012     struct cgx_pfc_cfg *req;
0013     struct cgx_pfc_rsp *rsp;
0014     int err = 0;
0015 
0016     if (is_otx2_lbkvf(pfvf->pdev))
0017         return 0;
0018 
0019     mutex_lock(&pfvf->mbox.lock);
0020     req = otx2_mbox_alloc_msg_cgx_prio_flow_ctrl_cfg(&pfvf->mbox);
0021     if (!req) {
0022         err = -ENOMEM;
0023         goto unlock;
0024     }
0025 
0026     if (pfvf->pfc_en) {
0027         req->rx_pause = true;
0028         req->tx_pause = true;
0029     } else {
0030         req->rx_pause = false;
0031         req->tx_pause = false;
0032     }
0033     req->pfc_en = pfvf->pfc_en;
0034 
0035     if (!otx2_sync_mbox_msg(&pfvf->mbox)) {
0036         rsp = (struct cgx_pfc_rsp *)
0037                otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr);
0038         if (req->rx_pause != rsp->rx_pause || req->tx_pause != rsp->tx_pause) {
0039             dev_warn(pfvf->dev,
0040                  "Failed to config PFC\n");
0041             err = -EPERM;
0042         }
0043     }
0044 unlock:
0045     mutex_unlock(&pfvf->mbox.lock);
0046     return err;
0047 }
0048 
0049 void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
0050                    bool pfc_enable)
0051 {
0052     bool if_up = netif_running(pfvf->netdev);
0053     struct npa_aq_enq_req *npa_aq;
0054     struct nix_aq_enq_req *aq;
0055     int err = 0;
0056 
0057     if (pfvf->queue_to_pfc_map[qidx] && pfc_enable) {
0058         dev_warn(pfvf->dev,
0059              "PFC enable not permitted as Priority %d already mapped to Queue %d\n",
0060              pfvf->queue_to_pfc_map[qidx], qidx);
0061         return;
0062     }
0063 
0064     if (if_up) {
0065         netif_tx_stop_all_queues(pfvf->netdev);
0066         netif_carrier_off(pfvf->netdev);
0067     }
0068 
0069     pfvf->queue_to_pfc_map[qidx] = vlan_prio;
0070 
0071     aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
0072     if (!aq) {
0073         err = -ENOMEM;
0074         goto out;
0075     }
0076 
0077     aq->cq.bpid = pfvf->bpid[vlan_prio];
0078     aq->cq_mask.bpid = GENMASK(8, 0);
0079 
0080     /* Fill AQ info */
0081     aq->qidx = qidx;
0082     aq->ctype = NIX_AQ_CTYPE_CQ;
0083     aq->op = NIX_AQ_INSTOP_WRITE;
0084 
0085     otx2_sync_mbox_msg(&pfvf->mbox);
0086 
0087     npa_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
0088     if (!npa_aq) {
0089         err = -ENOMEM;
0090         goto out;
0091     }
0092     npa_aq->aura.nix0_bpid = pfvf->bpid[vlan_prio];
0093     npa_aq->aura_mask.nix0_bpid = GENMASK(8, 0);
0094 
0095     /* Fill NPA AQ info */
0096     npa_aq->aura_id = qidx;
0097     npa_aq->ctype = NPA_AQ_CTYPE_AURA;
0098     npa_aq->op = NPA_AQ_INSTOP_WRITE;
0099     otx2_sync_mbox_msg(&pfvf->mbox);
0100 
0101 out:
0102     if (if_up) {
0103         netif_carrier_on(pfvf->netdev);
0104         netif_tx_start_all_queues(pfvf->netdev);
0105     }
0106 
0107     if (err)
0108         dev_warn(pfvf->dev,
0109              "Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
0110              qidx, err);
0111 }
0112 
0113 static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
0114 {
0115     struct otx2_nic *pfvf = netdev_priv(dev);
0116 
0117     pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
0118     pfc->pfc_en = pfvf->pfc_en;
0119 
0120     return 0;
0121 }
0122 
0123 static int otx2_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
0124 {
0125     struct otx2_nic *pfvf = netdev_priv(dev);
0126     int err;
0127 
0128     /* Save PFC configuration to interface */
0129     pfvf->pfc_en = pfc->pfc_en;
0130 
0131     err = otx2_config_priority_flow_ctrl(pfvf);
0132     if (err)
0133         return err;
0134 
0135     /* Request Per channel Bpids */
0136     if (pfc->pfc_en)
0137         otx2_nix_config_bp(pfvf, true);
0138 
0139     return 0;
0140 }
0141 
0142 static u8 otx2_dcbnl_getdcbx(struct net_device __always_unused *dev)
0143 {
0144     return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
0145 }
0146 
0147 static u8 otx2_dcbnl_setdcbx(struct net_device __always_unused *dev, u8 mode)
0148 {
0149     return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
0150 }
0151 
0152 static const struct dcbnl_rtnl_ops otx2_dcbnl_ops = {
0153     .ieee_getpfc    = otx2_dcbnl_ieee_getpfc,
0154     .ieee_setpfc    = otx2_dcbnl_ieee_setpfc,
0155     .getdcbx    = otx2_dcbnl_getdcbx,
0156     .setdcbx    = otx2_dcbnl_setdcbx,
0157 };
0158 
0159 int otx2_dcbnl_set_ops(struct net_device *dev)
0160 {
0161     struct otx2_nic *pfvf = netdev_priv(dev);
0162 
0163     pfvf->queue_to_pfc_map = devm_kzalloc(pfvf->dev, pfvf->hw.rx_queues,
0164                           GFP_KERNEL);
0165     if (!pfvf->queue_to_pfc_map)
0166         return -ENOMEM;
0167     dev->dcbnl_ops = &otx2_dcbnl_ops;
0168 
0169     return 0;
0170 }