Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /* Copyright (C) 2021, Intel Corporation. */
0003 
0004 /* Inter-Driver Communication */
0005 #include "ice.h"
0006 #include "ice_lib.h"
0007 #include "ice_dcb_lib.h"
0008 
0009 /**
0010  * ice_get_auxiliary_drv - retrieve iidc_auxiliary_drv struct
0011  * @pf: pointer to PF struct
0012  *
0013  * This function has to be called with a device_lock on the
0014  * pf->adev.dev to avoid race conditions.
0015  */
0016 static struct iidc_auxiliary_drv *ice_get_auxiliary_drv(struct ice_pf *pf)
0017 {
0018     struct auxiliary_device *adev;
0019 
0020     adev = pf->adev;
0021     if (!adev || !adev->dev.driver)
0022         return NULL;
0023 
0024     return container_of(adev->dev.driver, struct iidc_auxiliary_drv,
0025                 adrv.driver);
0026 }
0027 
0028 /**
0029  * ice_send_event_to_aux - send event to RDMA AUX driver
0030  * @pf: pointer to PF struct
0031  * @event: event struct
0032  */
0033 void ice_send_event_to_aux(struct ice_pf *pf, struct iidc_event *event)
0034 {
0035     struct iidc_auxiliary_drv *iadrv;
0036 
0037     if (WARN_ON_ONCE(!in_task()))
0038         return;
0039 
0040     mutex_lock(&pf->adev_mutex);
0041     if (!pf->adev)
0042         goto finish;
0043 
0044     device_lock(&pf->adev->dev);
0045     iadrv = ice_get_auxiliary_drv(pf);
0046     if (iadrv && iadrv->event_handler)
0047         iadrv->event_handler(pf, event);
0048     device_unlock(&pf->adev->dev);
0049 finish:
0050     mutex_unlock(&pf->adev_mutex);
0051 }
0052 
0053 /**
0054  * ice_add_rdma_qset - Add Leaf Node for RDMA Qset
0055  * @pf: PF struct
0056  * @qset: Resource to be allocated
0057  */
0058 int ice_add_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
0059 {
0060     u16 max_rdmaqs[ICE_MAX_TRAFFIC_CLASS];
0061     struct ice_vsi *vsi;
0062     struct device *dev;
0063     u32 qset_teid;
0064     u16 qs_handle;
0065     int status;
0066     int i;
0067 
0068     if (WARN_ON(!pf || !qset))
0069         return -EINVAL;
0070 
0071     dev = ice_pf_to_dev(pf);
0072 
0073     if (!ice_is_rdma_ena(pf))
0074         return -EINVAL;
0075 
0076     vsi = ice_get_main_vsi(pf);
0077     if (!vsi) {
0078         dev_err(dev, "RDMA QSet invalid VSI\n");
0079         return -EINVAL;
0080     }
0081 
0082     ice_for_each_traffic_class(i)
0083         max_rdmaqs[i] = 0;
0084 
0085     max_rdmaqs[qset->tc]++;
0086     qs_handle = qset->qs_handle;
0087 
0088     status = ice_cfg_vsi_rdma(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
0089                   max_rdmaqs);
0090     if (status) {
0091         dev_err(dev, "Failed VSI RDMA Qset config\n");
0092         return status;
0093     }
0094 
0095     status = ice_ena_vsi_rdma_qset(vsi->port_info, vsi->idx, qset->tc,
0096                        &qs_handle, 1, &qset_teid);
0097     if (status) {
0098         dev_err(dev, "Failed VSI RDMA Qset enable\n");
0099         return status;
0100     }
0101     vsi->qset_handle[qset->tc] = qset->qs_handle;
0102     qset->teid = qset_teid;
0103 
0104     return 0;
0105 }
0106 EXPORT_SYMBOL_GPL(ice_add_rdma_qset);
0107 
0108 /**
0109  * ice_del_rdma_qset - Delete leaf node for RDMA Qset
0110  * @pf: PF struct
0111  * @qset: Resource to be freed
0112  */
0113 int ice_del_rdma_qset(struct ice_pf *pf, struct iidc_rdma_qset_params *qset)
0114 {
0115     struct ice_vsi *vsi;
0116     u32 teid;
0117     u16 q_id;
0118 
0119     if (WARN_ON(!pf || !qset))
0120         return -EINVAL;
0121 
0122     vsi = ice_find_vsi(pf, qset->vport_id);
0123     if (!vsi) {
0124         dev_err(ice_pf_to_dev(pf), "RDMA Invalid VSI\n");
0125         return -EINVAL;
0126     }
0127 
0128     q_id = qset->qs_handle;
0129     teid = qset->teid;
0130 
0131     vsi->qset_handle[qset->tc] = 0;
0132 
0133     return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id);
0134 }
0135 EXPORT_SYMBOL_GPL(ice_del_rdma_qset);
0136 
0137 /**
0138  * ice_rdma_request_reset - accept request from RDMA to perform a reset
0139  * @pf: struct for PF
0140  * @reset_type: type of reset
0141  */
0142 int ice_rdma_request_reset(struct ice_pf *pf, enum iidc_reset_type reset_type)
0143 {
0144     enum ice_reset_req reset;
0145 
0146     if (WARN_ON(!pf))
0147         return -EINVAL;
0148 
0149     switch (reset_type) {
0150     case IIDC_PFR:
0151         reset = ICE_RESET_PFR;
0152         break;
0153     case IIDC_CORER:
0154         reset = ICE_RESET_CORER;
0155         break;
0156     case IIDC_GLOBR:
0157         reset = ICE_RESET_GLOBR;
0158         break;
0159     default:
0160         dev_err(ice_pf_to_dev(pf), "incorrect reset request\n");
0161         return -EINVAL;
0162     }
0163 
0164     return ice_schedule_reset(pf, reset);
0165 }
0166 EXPORT_SYMBOL_GPL(ice_rdma_request_reset);
0167 
0168 /**
0169  * ice_rdma_update_vsi_filter - update main VSI filters for RDMA
0170  * @pf: pointer to struct for PF
0171  * @vsi_id: VSI HW idx to update filter on
0172  * @enable: bool whether to enable or disable filters
0173  */
0174 int ice_rdma_update_vsi_filter(struct ice_pf *pf, u16 vsi_id, bool enable)
0175 {
0176     struct ice_vsi *vsi;
0177     int status;
0178 
0179     if (WARN_ON(!pf))
0180         return -EINVAL;
0181 
0182     vsi = ice_find_vsi(pf, vsi_id);
0183     if (!vsi)
0184         return -EINVAL;
0185 
0186     status = ice_cfg_rdma_fltr(&pf->hw, vsi->idx, enable);
0187     if (status) {
0188         dev_err(ice_pf_to_dev(pf), "Failed to  %sable RDMA filtering\n",
0189             enable ? "en" : "dis");
0190     } else {
0191         if (enable)
0192             vsi->info.q_opt_flags |= ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
0193         else
0194             vsi->info.q_opt_flags &= ~ICE_AQ_VSI_Q_OPT_PE_FLTR_EN;
0195     }
0196 
0197     return status;
0198 }
0199 EXPORT_SYMBOL_GPL(ice_rdma_update_vsi_filter);
0200 
0201 /**
0202  * ice_get_qos_params - parse QoS params for RDMA consumption
0203  * @pf: pointer to PF struct
0204  * @qos: set of QoS values
0205  */
0206 void ice_get_qos_params(struct ice_pf *pf, struct iidc_qos_params *qos)
0207 {
0208     struct ice_dcbx_cfg *dcbx_cfg;
0209     unsigned int i;
0210     u32 up2tc;
0211 
0212     dcbx_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
0213     up2tc = rd32(&pf->hw, PRTDCB_TUP2TC);
0214 
0215     qos->num_tc = ice_dcb_get_num_tc(dcbx_cfg);
0216     for (i = 0; i < IIDC_MAX_USER_PRIORITY; i++)
0217         qos->up2tc[i] = (up2tc >> (i * 3)) & 0x7;
0218 
0219     for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
0220         qos->tc_info[i].rel_bw = dcbx_cfg->etscfg.tcbwtable[i];
0221 
0222     qos->pfc_mode = dcbx_cfg->pfc_mode;
0223     if (qos->pfc_mode == IIDC_DSCP_PFC_MODE)
0224         for (i = 0; i < IIDC_MAX_DSCP_MAPPING; i++)
0225             qos->dscp_map[i] = dcbx_cfg->dscp_map[i];
0226 }
0227 EXPORT_SYMBOL_GPL(ice_get_qos_params);
0228 
0229 /**
0230  * ice_reserve_rdma_qvector - Reserve vector resources for RDMA driver
0231  * @pf: board private structure to initialize
0232  */
0233 static int ice_reserve_rdma_qvector(struct ice_pf *pf)
0234 {
0235     if (ice_is_rdma_ena(pf)) {
0236         int index;
0237 
0238         index = ice_get_res(pf, pf->irq_tracker, pf->num_rdma_msix,
0239                     ICE_RES_RDMA_VEC_ID);
0240         if (index < 0)
0241             return index;
0242         pf->num_avail_sw_msix -= pf->num_rdma_msix;
0243         pf->rdma_base_vector = (u16)index;
0244     }
0245     return 0;
0246 }
0247 
0248 /**
0249  * ice_adev_release - function to be mapped to AUX dev's release op
0250  * @dev: pointer to device to free
0251  */
0252 static void ice_adev_release(struct device *dev)
0253 {
0254     struct iidc_auxiliary_dev *iadev;
0255 
0256     iadev = container_of(dev, struct iidc_auxiliary_dev, adev.dev);
0257     kfree(iadev);
0258 }
0259 
0260 /**
0261  * ice_plug_aux_dev - allocate and register AUX device
0262  * @pf: pointer to pf struct
0263  */
0264 int ice_plug_aux_dev(struct ice_pf *pf)
0265 {
0266     struct iidc_auxiliary_dev *iadev;
0267     struct auxiliary_device *adev;
0268     int ret;
0269 
0270     /* if this PF doesn't support a technology that requires auxiliary
0271      * devices, then gracefully exit
0272      */
0273     if (!ice_is_rdma_ena(pf))
0274         return 0;
0275 
0276     iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
0277     if (!iadev)
0278         return -ENOMEM;
0279 
0280     adev = &iadev->adev;
0281     iadev->pf = pf;
0282 
0283     adev->id = pf->aux_idx;
0284     adev->dev.release = ice_adev_release;
0285     adev->dev.parent = &pf->pdev->dev;
0286     adev->name = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? "roce" : "iwarp";
0287 
0288     ret = auxiliary_device_init(adev);
0289     if (ret) {
0290         kfree(iadev);
0291         return ret;
0292     }
0293 
0294     ret = auxiliary_device_add(adev);
0295     if (ret) {
0296         auxiliary_device_uninit(adev);
0297         return ret;
0298     }
0299 
0300     mutex_lock(&pf->adev_mutex);
0301     pf->adev = adev;
0302     mutex_unlock(&pf->adev_mutex);
0303 
0304     return 0;
0305 }
0306 
0307 /* ice_unplug_aux_dev - unregister and free AUX device
0308  * @pf: pointer to pf struct
0309  */
0310 void ice_unplug_aux_dev(struct ice_pf *pf)
0311 {
0312     struct auxiliary_device *adev;
0313 
0314     mutex_lock(&pf->adev_mutex);
0315     adev = pf->adev;
0316     pf->adev = NULL;
0317     mutex_unlock(&pf->adev_mutex);
0318 
0319     if (adev) {
0320         auxiliary_device_delete(adev);
0321         auxiliary_device_uninit(adev);
0322     }
0323 }
0324 
0325 /**
0326  * ice_init_rdma - initializes PF for RDMA use
0327  * @pf: ptr to ice_pf
0328  */
0329 int ice_init_rdma(struct ice_pf *pf)
0330 {
0331     struct device *dev = &pf->pdev->dev;
0332     int ret;
0333 
0334     /* Reserve vector resources */
0335     ret = ice_reserve_rdma_qvector(pf);
0336     if (ret < 0) {
0337         dev_err(dev, "failed to reserve vectors for RDMA\n");
0338         return ret;
0339     }
0340     pf->rdma_mode |= IIDC_RDMA_PROTOCOL_ROCEV2;
0341     return ice_plug_aux_dev(pf);
0342 }