0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/types.h>
0009 #include <linux/module.h>
0010 #include <linux/pci.h>
0011
0012 #include "rvu.h"
0013 #include "cgx.h"
0014 #include "lmac_common.h"
0015 #include "rvu_reg.h"
0016 #include "rvu_trace.h"
0017 #include "rvu_npc_hash.h"
0018
0019 struct cgx_evq_entry {
0020 struct list_head evq_node;
0021 struct cgx_link_event link_event;
0022 };
0023
0024 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
0025 static struct _req_type __maybe_unused \
0026 *otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
0027 { \
0028 struct _req_type *req; \
0029 \
0030 req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
0031 &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
0032 sizeof(struct _rsp_type)); \
0033 if (!req) \
0034 return NULL; \
0035 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
0036 req->hdr.id = _id; \
0037 trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
0038 return req; \
0039 }
0040
0041 MBOX_UP_CGX_MESSAGES
0042 #undef M
0043
0044 bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature)
0045 {
0046 u8 cgx_id, lmac_id;
0047 void *cgxd;
0048
0049 if (!is_pf_cgxmapped(rvu, pf))
0050 return 0;
0051
0052 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0053 cgxd = rvu_cgx_pdata(cgx_id, rvu);
0054
0055 return (cgx_features_get(cgxd) & feature);
0056 }
0057
0058
0059 static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
0060 {
0061 return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
0062 }
0063
0064 int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
0065 {
0066 unsigned long pfmap;
0067
0068 pfmap = cgxlmac_to_pfmap(rvu, cgx_id, lmac_id);
0069
0070
0071 if (!pfmap)
0072 return -ENODEV;
0073 else
0074 return find_first_bit(&pfmap, 16);
0075 }
0076
0077 static u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id)
0078 {
0079 return ((cgx_id & 0xF) << 4) | (lmac_id & 0xF);
0080 }
0081
0082 void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu)
0083 {
0084 if (cgx_id >= rvu->cgx_cnt_max)
0085 return NULL;
0086
0087 return rvu->cgx_idmap[cgx_id];
0088 }
0089
0090
0091 void *rvu_first_cgx_pdata(struct rvu *rvu)
0092 {
0093 int first_enabled_cgx = 0;
0094 void *cgxd = NULL;
0095
0096 for (; first_enabled_cgx < rvu->cgx_cnt_max; first_enabled_cgx++) {
0097 cgxd = rvu_cgx_pdata(first_enabled_cgx, rvu);
0098 if (cgxd)
0099 break;
0100 }
0101
0102 return cgxd;
0103 }
0104
0105
0106 static void rvu_map_cgx_nix_block(struct rvu *rvu, int pf,
0107 int cgx_id, int lmac_id)
0108 {
0109 struct rvu_pfvf *pfvf = &rvu->pf[pf];
0110 u8 p2x;
0111
0112 p2x = cgx_lmac_get_p2x(cgx_id, lmac_id);
0113
0114 pfvf->nix_blkaddr = BLKADDR_NIX0;
0115 if (p2x == CMR_P2X_SEL_NIX1)
0116 pfvf->nix_blkaddr = BLKADDR_NIX1;
0117 }
0118
0119 static int rvu_map_cgx_lmac_pf(struct rvu *rvu)
0120 {
0121 struct npc_pkind *pkind = &rvu->hw->pkind;
0122 int cgx_cnt_max = rvu->cgx_cnt_max;
0123 int pf = PF_CGXMAP_BASE;
0124 unsigned long lmac_bmap;
0125 int size, free_pkind;
0126 int cgx, lmac, iter;
0127 int numvfs, hwvfs;
0128
0129 if (!cgx_cnt_max)
0130 return 0;
0131
0132 if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF)
0133 return -EINVAL;
0134
0135
0136
0137
0138
0139 size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8);
0140 rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL);
0141 if (!rvu->pf2cgxlmac_map)
0142 return -ENOMEM;
0143
0144
0145 memset(rvu->pf2cgxlmac_map, 0xFF, size);
0146
0147
0148 rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev,
0149 cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16),
0150 GFP_KERNEL);
0151 if (!rvu->cgxlmac2pf_map)
0152 return -ENOMEM;
0153
0154 rvu->cgx_mapped_pfs = 0;
0155 for (cgx = 0; cgx < cgx_cnt_max; cgx++) {
0156 if (!rvu_cgx_pdata(cgx, rvu))
0157 continue;
0158 lmac_bmap = cgx_get_lmac_bmap(rvu_cgx_pdata(cgx, rvu));
0159 for_each_set_bit(iter, &lmac_bmap, MAX_LMAC_PER_CGX) {
0160 lmac = cgx_get_lmacid(rvu_cgx_pdata(cgx, rvu),
0161 iter);
0162 rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac);
0163 rvu->cgxlmac2pf_map[CGX_OFFSET(cgx) + lmac] = 1 << pf;
0164 free_pkind = rvu_alloc_rsrc(&pkind->rsrc);
0165 pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16;
0166 rvu_map_cgx_nix_block(rvu, pf, cgx, lmac);
0167 rvu->cgx_mapped_pfs++;
0168 rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs);
0169 rvu->cgx_mapped_vfs += numvfs;
0170 pf++;
0171 }
0172 }
0173 return 0;
0174 }
0175
0176 static int rvu_cgx_send_link_info(int cgx_id, int lmac_id, struct rvu *rvu)
0177 {
0178 struct cgx_evq_entry *qentry;
0179 unsigned long flags;
0180 int err;
0181
0182 qentry = kmalloc(sizeof(*qentry), GFP_KERNEL);
0183 if (!qentry)
0184 return -ENOMEM;
0185
0186
0187 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
0188 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
0189 &qentry->link_event.link_uinfo);
0190 qentry->link_event.cgx_id = cgx_id;
0191 qentry->link_event.lmac_id = lmac_id;
0192 if (err) {
0193 kfree(qentry);
0194 goto skip_add;
0195 }
0196 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
0197 skip_add:
0198 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
0199
0200
0201 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
0202
0203 return 0;
0204 }
0205
0206
0207 static int cgx_lmac_postevent(struct cgx_link_event *event, void *data)
0208 {
0209 struct cgx_evq_entry *qentry;
0210 struct rvu *rvu = data;
0211
0212
0213 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
0214 if (!qentry)
0215 return -ENOMEM;
0216 qentry->link_event = *event;
0217 spin_lock(&rvu->cgx_evq_lock);
0218 list_add_tail(&qentry->evq_node, &rvu->cgx_evq_head);
0219 spin_unlock(&rvu->cgx_evq_lock);
0220
0221
0222 queue_work(rvu->cgx_evh_wq, &rvu->cgx_evh_work);
0223
0224 return 0;
0225 }
0226
0227 static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
0228 {
0229 struct cgx_link_user_info *linfo;
0230 struct cgx_link_info_msg *msg;
0231 unsigned long pfmap;
0232 int err, pfid;
0233
0234 linfo = &event->link_uinfo;
0235 pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
0236
0237 do {
0238 pfid = find_first_bit(&pfmap, 16);
0239 clear_bit(pfid, &pfmap);
0240
0241
0242 if (!test_bit(pfid, &rvu->pf_notify_bmap)) {
0243 dev_info(rvu->dev, "cgx %d: lmac %d Link status %s\n",
0244 event->cgx_id, event->lmac_id,
0245 linfo->link_up ? "UP" : "DOWN");
0246 continue;
0247 }
0248
0249
0250 msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
0251 if (!msg)
0252 continue;
0253 msg->link_info = *linfo;
0254 otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
0255 err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
0256 if (err)
0257 dev_warn(rvu->dev, "notification to pf %d failed\n",
0258 pfid);
0259 } while (pfmap);
0260 }
0261
0262 static void cgx_evhandler_task(struct work_struct *work)
0263 {
0264 struct rvu *rvu = container_of(work, struct rvu, cgx_evh_work);
0265 struct cgx_evq_entry *qentry;
0266 struct cgx_link_event *event;
0267 unsigned long flags;
0268
0269 do {
0270
0271 spin_lock_irqsave(&rvu->cgx_evq_lock, flags);
0272 qentry = list_first_entry_or_null(&rvu->cgx_evq_head,
0273 struct cgx_evq_entry,
0274 evq_node);
0275 if (qentry)
0276 list_del(&qentry->evq_node);
0277 spin_unlock_irqrestore(&rvu->cgx_evq_lock, flags);
0278 if (!qentry)
0279 break;
0280
0281 event = &qentry->link_event;
0282
0283
0284 cgx_notify_pfs(event, rvu);
0285 kfree(qentry);
0286 } while (1);
0287 }
0288
0289 static int cgx_lmac_event_handler_init(struct rvu *rvu)
0290 {
0291 unsigned long lmac_bmap;
0292 struct cgx_event_cb cb;
0293 int cgx, lmac, err;
0294 void *cgxd;
0295
0296 spin_lock_init(&rvu->cgx_evq_lock);
0297 INIT_LIST_HEAD(&rvu->cgx_evq_head);
0298 INIT_WORK(&rvu->cgx_evh_work, cgx_evhandler_task);
0299 rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0);
0300 if (!rvu->cgx_evh_wq) {
0301 dev_err(rvu->dev, "alloc workqueue failed");
0302 return -ENOMEM;
0303 }
0304
0305 cb.notify_link_chg = cgx_lmac_postevent;
0306 cb.data = rvu;
0307
0308 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
0309 cgxd = rvu_cgx_pdata(cgx, rvu);
0310 if (!cgxd)
0311 continue;
0312 lmac_bmap = cgx_get_lmac_bmap(cgxd);
0313 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX) {
0314 err = cgx_lmac_evh_register(&cb, cgxd, lmac);
0315 if (err)
0316 dev_err(rvu->dev,
0317 "%d:%d handler register failed\n",
0318 cgx, lmac);
0319 }
0320 }
0321
0322 return 0;
0323 }
0324
0325 static void rvu_cgx_wq_destroy(struct rvu *rvu)
0326 {
0327 if (rvu->cgx_evh_wq) {
0328 destroy_workqueue(rvu->cgx_evh_wq);
0329 rvu->cgx_evh_wq = NULL;
0330 }
0331 }
0332
0333 int rvu_cgx_init(struct rvu *rvu)
0334 {
0335 int cgx, err;
0336 void *cgxd;
0337
0338
0339
0340
0341 rvu->cgx_cnt_max = cgx_get_cgxcnt_max();
0342 if (!rvu->cgx_cnt_max) {
0343 dev_info(rvu->dev, "No CGX devices found!\n");
0344 return -ENODEV;
0345 }
0346
0347 rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max *
0348 sizeof(void *), GFP_KERNEL);
0349 if (!rvu->cgx_idmap)
0350 return -ENOMEM;
0351
0352
0353 for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++)
0354 rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx);
0355
0356
0357 err = rvu_map_cgx_lmac_pf(rvu);
0358 if (err)
0359 return err;
0360
0361
0362 err = cgx_lmac_event_handler_init(rvu);
0363 if (err)
0364 return err;
0365
0366 mutex_init(&rvu->cgx_cfg_lock);
0367
0368
0369
0370
0371 mb();
0372
0373
0374 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
0375 cgxd = rvu_cgx_pdata(cgx, rvu);
0376 if (!cgxd)
0377 continue;
0378 err = cgx_lmac_linkup_start(cgxd);
0379 if (err)
0380 dev_err(rvu->dev,
0381 "Link up process failed to start on cgx %d\n",
0382 cgx);
0383 }
0384
0385 return 0;
0386 }
0387
0388 int rvu_cgx_exit(struct rvu *rvu)
0389 {
0390 unsigned long lmac_bmap;
0391 int cgx, lmac;
0392 void *cgxd;
0393
0394 for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) {
0395 cgxd = rvu_cgx_pdata(cgx, rvu);
0396 if (!cgxd)
0397 continue;
0398 lmac_bmap = cgx_get_lmac_bmap(cgxd);
0399 for_each_set_bit(lmac, &lmac_bmap, MAX_LMAC_PER_CGX)
0400 cgx_lmac_evh_unregister(cgxd, lmac);
0401 }
0402
0403
0404 mb();
0405
0406 rvu_cgx_wq_destroy(rvu);
0407 return 0;
0408 }
0409
0410
0411
0412
0413
0414 inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
0415 {
0416 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
0417 !is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
0418 return false;
0419 return true;
0420 }
0421
0422 void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
0423 {
0424 struct mac_ops *mac_ops;
0425 u8 cgx_id, lmac_id;
0426 void *cgxd;
0427
0428 if (!is_pf_cgxmapped(rvu, pf))
0429 return;
0430
0431 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0432 cgxd = rvu_cgx_pdata(cgx_id, rvu);
0433
0434 mac_ops = get_mac_ops(cgxd);
0435
0436 if (enable)
0437 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, true);
0438 else
0439 mac_ops->mac_enadis_rx_pause_fwding(cgxd, lmac_id, false);
0440 }
0441
0442 int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
0443 {
0444 int pf = rvu_get_pf(pcifunc);
0445 struct mac_ops *mac_ops;
0446 u8 cgx_id, lmac_id;
0447 void *cgxd;
0448
0449 if (!is_cgx_config_permitted(rvu, pcifunc))
0450 return LMAC_AF_ERR_PERM_DENIED;
0451
0452 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0453 cgxd = rvu_cgx_pdata(cgx_id, rvu);
0454 mac_ops = get_mac_ops(cgxd);
0455
0456 return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
0457 }
0458
0459 int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
0460 {
0461 struct mac_ops *mac_ops;
0462
0463 mac_ops = get_mac_ops(cgxd);
0464 return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
0465 }
0466
0467 void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
0468 {
0469 int pf = rvu_get_pf(pcifunc);
0470 int i = 0, lmac_count = 0;
0471 u8 max_dmac_filters;
0472 u8 cgx_id, lmac_id;
0473 void *cgx_dev;
0474
0475 if (!is_cgx_config_permitted(rvu, pcifunc))
0476 return;
0477
0478 if (rvu_npc_exact_has_match_table(rvu)) {
0479 rvu_npc_exact_reset(rvu, pcifunc);
0480 return;
0481 }
0482
0483 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0484 cgx_dev = cgx_get_pdata(cgx_id);
0485 lmac_count = cgx_get_lmac_cnt(cgx_dev);
0486 max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
0487
0488 for (i = 0; i < max_dmac_filters; i++)
0489 cgx_lmac_addr_del(cgx_id, lmac_id, i);
0490
0491
0492
0493
0494 cgx_lmac_addr_reset(cgx_id, lmac_id);
0495 }
0496
0497 int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
0498 struct msg_rsp *rsp)
0499 {
0500 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true);
0501 return 0;
0502 }
0503
0504 int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req,
0505 struct msg_rsp *rsp)
0506 {
0507 rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false);
0508 return 0;
0509 }
0510
0511 static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req,
0512 void *rsp)
0513 {
0514 int pf = rvu_get_pf(req->hdr.pcifunc);
0515 struct mac_ops *mac_ops;
0516 int stat = 0, err = 0;
0517 u64 tx_stat, rx_stat;
0518 u8 cgx_idx, lmac;
0519 void *cgxd;
0520
0521 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
0522 return LMAC_AF_ERR_PERM_DENIED;
0523
0524 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
0525 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
0526 mac_ops = get_mac_ops(cgxd);
0527
0528
0529 while (stat < mac_ops->rx_stats_cnt) {
0530 err = mac_ops->mac_get_rx_stats(cgxd, lmac, stat, &rx_stat);
0531 if (err)
0532 return err;
0533 if (mac_ops->rx_stats_cnt == RPM_RX_STATS_COUNT)
0534 ((struct rpm_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
0535 else
0536 ((struct cgx_stats_rsp *)rsp)->rx_stats[stat] = rx_stat;
0537 stat++;
0538 }
0539
0540
0541 stat = 0;
0542 while (stat < mac_ops->tx_stats_cnt) {
0543 err = mac_ops->mac_get_tx_stats(cgxd, lmac, stat, &tx_stat);
0544 if (err)
0545 return err;
0546 if (mac_ops->tx_stats_cnt == RPM_TX_STATS_COUNT)
0547 ((struct rpm_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
0548 else
0549 ((struct cgx_stats_rsp *)rsp)->tx_stats[stat] = tx_stat;
0550 stat++;
0551 }
0552 return 0;
0553 }
0554
0555 int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req,
0556 struct cgx_stats_rsp *rsp)
0557 {
0558 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
0559 }
0560
0561 int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
0562 struct rpm_stats_rsp *rsp)
0563 {
0564 return rvu_lmac_get_stats(rvu, req, (void *)rsp);
0565 }
0566
0567 int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
0568 struct msg_req *req,
0569 struct cgx_fec_stats_rsp *rsp)
0570 {
0571 int pf = rvu_get_pf(req->hdr.pcifunc);
0572 u8 cgx_idx, lmac;
0573 void *cgxd;
0574
0575 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
0576 return LMAC_AF_ERR_PERM_DENIED;
0577 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
0578
0579 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
0580 return cgx_get_fec_stats(cgxd, lmac, rsp);
0581 }
0582
0583 int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
0584 struct cgx_mac_addr_set_or_get *req,
0585 struct cgx_mac_addr_set_or_get *rsp)
0586 {
0587 int pf = rvu_get_pf(req->hdr.pcifunc);
0588 u8 cgx_id, lmac_id;
0589
0590 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
0591 return -EPERM;
0592
0593 if (rvu_npc_exact_has_match_table(rvu))
0594 return rvu_npc_exact_mac_addr_set(rvu, req, rsp);
0595
0596 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0597
0598 cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr);
0599
0600 return 0;
0601 }
0602
0603 int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
0604 struct cgx_mac_addr_add_req *req,
0605 struct cgx_mac_addr_add_rsp *rsp)
0606 {
0607 int pf = rvu_get_pf(req->hdr.pcifunc);
0608 u8 cgx_id, lmac_id;
0609 int rc = 0;
0610
0611 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
0612 return -EPERM;
0613
0614 if (rvu_npc_exact_has_match_table(rvu))
0615 return rvu_npc_exact_mac_addr_add(rvu, req, rsp);
0616
0617 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0618 rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
0619 if (rc >= 0) {
0620 rsp->index = rc;
0621 return 0;
0622 }
0623
0624 return rc;
0625 }
0626
0627 int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
0628 struct cgx_mac_addr_del_req *req,
0629 struct msg_rsp *rsp)
0630 {
0631 int pf = rvu_get_pf(req->hdr.pcifunc);
0632 u8 cgx_id, lmac_id;
0633
0634 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
0635 return -EPERM;
0636
0637 if (rvu_npc_exact_has_match_table(rvu))
0638 return rvu_npc_exact_mac_addr_del(rvu, req, rsp);
0639
0640 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0641 return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
0642 }
0643
0644 int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
0645 struct msg_req *req,
0646 struct cgx_max_dmac_entries_get_rsp
0647 *rsp)
0648 {
0649 int pf = rvu_get_pf(req->hdr.pcifunc);
0650 u8 cgx_id, lmac_id;
0651
0652
0653
0654
0655
0656 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
0657 rsp->max_dmac_filters = 0;
0658 return 0;
0659 }
0660
0661 if (rvu_npc_exact_has_match_table(rvu)) {
0662 rsp->max_dmac_filters = rvu_npc_exact_get_max_entries(rvu);
0663 return 0;
0664 }
0665
0666 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0667 rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
0668 return 0;
0669 }
0670
0671 int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
0672 struct cgx_mac_addr_set_or_get *req,
0673 struct cgx_mac_addr_set_or_get *rsp)
0674 {
0675 int pf = rvu_get_pf(req->hdr.pcifunc);
0676 u8 cgx_id, lmac_id;
0677 int rc = 0, i;
0678 u64 cfg;
0679
0680 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
0681 return -EPERM;
0682
0683 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0684
0685 rsp->hdr.rc = rc;
0686 cfg = cgx_lmac_addr_get(cgx_id, lmac_id);
0687
0688 for (i = 0; i < ETH_ALEN; i++)
0689 rsp->mac_addr[i] = cfg >> (ETH_ALEN - 1 - i) * 8;
0690 return 0;
0691 }
0692
0693 int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req,
0694 struct msg_rsp *rsp)
0695 {
0696 u16 pcifunc = req->hdr.pcifunc;
0697 int pf = rvu_get_pf(pcifunc);
0698 u8 cgx_id, lmac_id;
0699
0700 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
0701 return -EPERM;
0702
0703
0704 if (rvu_npc_exact_has_match_table(rvu))
0705 return rvu_npc_exact_promisc_enable(rvu, req->hdr.pcifunc);
0706
0707 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0708
0709 cgx_lmac_promisc_config(cgx_id, lmac_id, true);
0710 return 0;
0711 }
0712
0713 int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
0714 struct msg_rsp *rsp)
0715 {
0716 int pf = rvu_get_pf(req->hdr.pcifunc);
0717 u8 cgx_id, lmac_id;
0718
0719 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
0720 return -EPERM;
0721
0722
0723 if (rvu_npc_exact_has_match_table(rvu))
0724 return rvu_npc_exact_promisc_disable(rvu, req->hdr.pcifunc);
0725
0726 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0727
0728 cgx_lmac_promisc_config(cgx_id, lmac_id, false);
0729 return 0;
0730 }
0731
0732 static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
0733 {
0734 struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
0735 int pf = rvu_get_pf(pcifunc);
0736 struct mac_ops *mac_ops;
0737 u8 cgx_id, lmac_id;
0738 void *cgxd;
0739
0740 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_PTP))
0741 return 0;
0742
0743
0744
0745
0746 if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
0747 !is_pf_cgxmapped(rvu, pf))
0748 return -ENODEV;
0749
0750 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0751 cgxd = rvu_cgx_pdata(cgx_id, rvu);
0752
0753 mac_ops = get_mac_ops(cgxd);
0754 mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
0755
0756
0757
0758
0759 if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
0760 return -EINVAL;
0761
0762 pfvf->hw_rx_tstamp_en = enable;
0763
0764 return 0;
0765 }
0766
0767 int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
0768 struct msg_rsp *rsp)
0769 {
0770 if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
0771 return -EPERM;
0772
0773 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
0774 }
0775
0776 int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
0777 struct msg_rsp *rsp)
0778 {
0779 return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
0780 }
0781
0782 static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
0783 {
0784 int pf = rvu_get_pf(pcifunc);
0785 u8 cgx_id, lmac_id;
0786
0787 if (!is_cgx_config_permitted(rvu, pcifunc))
0788 return -EPERM;
0789
0790 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0791
0792 if (en) {
0793 set_bit(pf, &rvu->pf_notify_bmap);
0794
0795 rvu_cgx_send_link_info(cgx_id, lmac_id, rvu);
0796 } else {
0797 clear_bit(pf, &rvu->pf_notify_bmap);
0798 }
0799
0800 return 0;
0801 }
0802
0803 int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req,
0804 struct msg_rsp *rsp)
0805 {
0806 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true);
0807 return 0;
0808 }
0809
0810 int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req,
0811 struct msg_rsp *rsp)
0812 {
0813 rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false);
0814 return 0;
0815 }
0816
0817 int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req,
0818 struct cgx_link_info_msg *rsp)
0819 {
0820 u8 cgx_id, lmac_id;
0821 int pf, err;
0822
0823 pf = rvu_get_pf(req->hdr.pcifunc);
0824
0825 if (!is_pf_cgxmapped(rvu, pf))
0826 return -ENODEV;
0827
0828 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0829
0830 err = cgx_get_link_info(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
0831 &rsp->link_info);
0832 return err;
0833 }
0834
0835 int rvu_mbox_handler_cgx_features_get(struct rvu *rvu,
0836 struct msg_req *req,
0837 struct cgx_features_info_msg *rsp)
0838 {
0839 int pf = rvu_get_pf(req->hdr.pcifunc);
0840 u8 cgx_idx, lmac;
0841 void *cgxd;
0842
0843 if (!is_pf_cgxmapped(rvu, pf))
0844 return 0;
0845
0846 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
0847 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
0848 rsp->lmac_features = cgx_features_get(cgxd);
0849
0850 return 0;
0851 }
0852
0853 u32 rvu_cgx_get_fifolen(struct rvu *rvu)
0854 {
0855 struct mac_ops *mac_ops;
0856 u32 fifo_len;
0857
0858 mac_ops = get_mac_ops(rvu_first_cgx_pdata(rvu));
0859 fifo_len = mac_ops ? mac_ops->fifo_len : 0;
0860
0861 return fifo_len;
0862 }
0863
0864 u32 rvu_cgx_get_lmac_fifolen(struct rvu *rvu, int cgx, int lmac)
0865 {
0866 struct mac_ops *mac_ops;
0867 void *cgxd;
0868
0869 cgxd = rvu_cgx_pdata(cgx, rvu);
0870 if (!cgxd)
0871 return 0;
0872
0873 mac_ops = get_mac_ops(cgxd);
0874 if (!mac_ops->lmac_fifo_len)
0875 return 0;
0876
0877 return mac_ops->lmac_fifo_len(cgxd, lmac);
0878 }
0879
0880 static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en)
0881 {
0882 int pf = rvu_get_pf(pcifunc);
0883 struct mac_ops *mac_ops;
0884 u8 cgx_id, lmac_id;
0885
0886 if (!is_cgx_config_permitted(rvu, pcifunc))
0887 return -EPERM;
0888
0889 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0890 mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu));
0891
0892 return mac_ops->mac_lmac_intl_lbk(rvu_cgx_pdata(cgx_id, rvu),
0893 lmac_id, en);
0894 }
0895
0896 int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req,
0897 struct msg_rsp *rsp)
0898 {
0899 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true);
0900 return 0;
0901 }
0902
0903 int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req,
0904 struct msg_rsp *rsp)
0905 {
0906 rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false);
0907 return 0;
0908 }
0909
0910 int rvu_cgx_cfg_pause_frm(struct rvu *rvu, u16 pcifunc, u8 tx_pause, u8 rx_pause)
0911 {
0912 int pf = rvu_get_pf(pcifunc);
0913 u8 rx_pfc = 0, tx_pfc = 0;
0914 struct mac_ops *mac_ops;
0915 u8 cgx_id, lmac_id;
0916 void *cgxd;
0917
0918 if (!is_mac_feature_supported(rvu, pf, RVU_LMAC_FEAT_FC))
0919 return 0;
0920
0921
0922
0923
0924 if (!is_pf_cgxmapped(rvu, pf))
0925 return LMAC_AF_ERR_PF_NOT_MAPPED;
0926
0927 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0928 cgxd = rvu_cgx_pdata(cgx_id, rvu);
0929 mac_ops = get_mac_ops(cgxd);
0930
0931 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &tx_pfc, &rx_pfc);
0932 if (tx_pfc || rx_pfc) {
0933 dev_warn(rvu->dev,
0934 "Can not configure 802.3X flow control as PFC frames are enabled");
0935 return LMAC_AF_ERR_8023PAUSE_ENADIS_PERM_DENIED;
0936 }
0937
0938 mutex_lock(&rvu->rsrc_lock);
0939 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
0940 pcifunc & RVU_PFVF_FUNC_MASK)) {
0941 mutex_unlock(&rvu->rsrc_lock);
0942 return LMAC_AF_ERR_PERM_DENIED;
0943 }
0944 mutex_unlock(&rvu->rsrc_lock);
0945
0946 return mac_ops->mac_enadis_pause_frm(cgxd, lmac_id, tx_pause, rx_pause);
0947 }
0948
0949 int rvu_mbox_handler_cgx_cfg_pause_frm(struct rvu *rvu,
0950 struct cgx_pause_frm_cfg *req,
0951 struct cgx_pause_frm_cfg *rsp)
0952 {
0953 int pf = rvu_get_pf(req->hdr.pcifunc);
0954 struct mac_ops *mac_ops;
0955 u8 cgx_id, lmac_id;
0956 int err = 0;
0957 void *cgxd;
0958
0959
0960
0961
0962 if (!is_pf_cgxmapped(rvu, pf))
0963 return -ENODEV;
0964
0965 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0966 cgxd = rvu_cgx_pdata(cgx_id, rvu);
0967 mac_ops = get_mac_ops(cgxd);
0968
0969 if (req->set)
0970 err = rvu_cgx_cfg_pause_frm(rvu, req->hdr.pcifunc, req->tx_pause, req->rx_pause);
0971 else
0972 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
0973
0974 return err;
0975 }
0976
0977 int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req,
0978 struct msg_rsp *rsp)
0979 {
0980 int pf = rvu_get_pf(req->hdr.pcifunc);
0981 u8 cgx_id, lmac_id;
0982
0983 if (!is_pf_cgxmapped(rvu, pf))
0984 return LMAC_AF_ERR_PF_NOT_MAPPED;
0985
0986 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
0987 return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id);
0988 }
0989
0990
0991
0992
0993 int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id,
0994 int index, int rxtxflag, u64 *stat)
0995 {
0996 struct rvu_block *block;
0997 int blkaddr;
0998 u16 pcifunc;
0999 int pf, lf;
1000
1001 *stat = 0;
1002
1003 if (!cgxd || !rvu)
1004 return -EINVAL;
1005
1006 pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
1007 if (pf < 0)
1008 return pf;
1009
1010
1011
1012
1013 pcifunc = pf << RVU_PFVF_PF_SHIFT;
1014 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
1015 if (blkaddr < 0)
1016 return 0;
1017 block = &rvu->hw->block[blkaddr];
1018
1019 for (lf = 0; lf < block->lf.max; lf++) {
1020
1021 if (!((block->fn_map[lf] & ~RVU_PFVF_FUNC_MASK) == (pcifunc &
1022 ~RVU_PFVF_FUNC_MASK)))
1023 continue;
1024 if (rxtxflag == NIX_STATS_RX)
1025 *stat += rvu_read64(rvu, blkaddr,
1026 NIX_AF_LFX_RX_STATX(lf, index));
1027 else
1028 *stat += rvu_read64(rvu, blkaddr,
1029 NIX_AF_LFX_TX_STATX(lf, index));
1030 }
1031
1032 return 0;
1033 }
1034
1035 int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start)
1036 {
1037 struct rvu_pfvf *parent_pf, *pfvf;
1038 int cgx_users, err = 0;
1039
1040 if (!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
1041 return 0;
1042
1043 parent_pf = &rvu->pf[rvu_get_pf(pcifunc)];
1044 pfvf = rvu_get_pfvf(rvu, pcifunc);
1045
1046 mutex_lock(&rvu->cgx_cfg_lock);
1047
1048 if (start && pfvf->cgx_in_use)
1049 goto exit;
1050 if (!start && !pfvf->cgx_in_use)
1051 goto exit;
1052
1053 if (start) {
1054 cgx_users = parent_pf->cgx_users;
1055 parent_pf->cgx_users++;
1056 } else {
1057 parent_pf->cgx_users--;
1058 cgx_users = parent_pf->cgx_users;
1059 }
1060
1061
1062
1063
1064 if (!cgx_users) {
1065 err = rvu_cgx_config_rxtx(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK,
1066 start);
1067 if (err) {
1068 dev_err(rvu->dev, "Unable to %s CGX\n",
1069 start ? "start" : "stop");
1070
1071 parent_pf->cgx_users = start ? parent_pf->cgx_users - 1
1072 : parent_pf->cgx_users + 1;
1073 goto exit;
1074 }
1075 }
1076 pfvf->cgx_in_use = start;
1077 exit:
1078 mutex_unlock(&rvu->cgx_cfg_lock);
1079 return err;
1080 }
1081
1082 int rvu_mbox_handler_cgx_set_fec_param(struct rvu *rvu,
1083 struct fec_mode *req,
1084 struct fec_mode *rsp)
1085 {
1086 int pf = rvu_get_pf(req->hdr.pcifunc);
1087 u8 cgx_id, lmac_id;
1088
1089 if (!is_pf_cgxmapped(rvu, pf))
1090 return -EPERM;
1091
1092 if (req->fec == OTX2_FEC_OFF)
1093 req->fec = OTX2_FEC_NONE;
1094 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1095 rsp->fec = cgx_set_fec(req->fec, cgx_id, lmac_id);
1096 return 0;
1097 }
1098
1099 int rvu_mbox_handler_cgx_get_aux_link_info(struct rvu *rvu, struct msg_req *req,
1100 struct cgx_fw_data *rsp)
1101 {
1102 int pf = rvu_get_pf(req->hdr.pcifunc);
1103 u8 cgx_id, lmac_id;
1104
1105 if (!rvu->fwdata)
1106 return LMAC_AF_ERR_FIRMWARE_DATA_NOT_MAPPED;
1107
1108 if (!is_pf_cgxmapped(rvu, pf))
1109 return -EPERM;
1110
1111 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1112
1113 memcpy(&rsp->fwdata, &rvu->fwdata->cgx_fw_data[cgx_id][lmac_id],
1114 sizeof(struct cgx_lmac_fwdata_s));
1115 return 0;
1116 }
1117
1118 int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
1119 struct cgx_set_link_mode_req *req,
1120 struct cgx_set_link_mode_rsp *rsp)
1121 {
1122 int pf = rvu_get_pf(req->hdr.pcifunc);
1123 u8 cgx_idx, lmac;
1124 void *cgxd;
1125
1126 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1127 return -EPERM;
1128
1129 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
1130 cgxd = rvu_cgx_pdata(cgx_idx, rvu);
1131 rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
1132 return 0;
1133 }
1134
1135 int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct cgx_mac_addr_reset_req *req,
1136 struct msg_rsp *rsp)
1137 {
1138 int pf = rvu_get_pf(req->hdr.pcifunc);
1139 u8 cgx_id, lmac_id;
1140
1141 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1142 return LMAC_AF_ERR_PERM_DENIED;
1143
1144 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1145
1146 if (rvu_npc_exact_has_match_table(rvu))
1147 return rvu_npc_exact_mac_addr_reset(rvu, req, rsp);
1148
1149 return cgx_lmac_addr_reset(cgx_id, lmac_id);
1150 }
1151
1152 int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
1153 struct cgx_mac_addr_update_req *req,
1154 struct cgx_mac_addr_update_rsp *rsp)
1155 {
1156 int pf = rvu_get_pf(req->hdr.pcifunc);
1157 u8 cgx_id, lmac_id;
1158
1159 if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
1160 return LMAC_AF_ERR_PERM_DENIED;
1161
1162 if (rvu_npc_exact_has_match_table(rvu))
1163 return rvu_npc_exact_mac_addr_update(rvu, req, rsp);
1164
1165 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1166 return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
1167 }
1168
1169 int rvu_cgx_prio_flow_ctrl_cfg(struct rvu *rvu, u16 pcifunc, u8 tx_pause,
1170 u8 rx_pause, u16 pfc_en)
1171 {
1172 int pf = rvu_get_pf(pcifunc);
1173 u8 rx_8023 = 0, tx_8023 = 0;
1174 struct mac_ops *mac_ops;
1175 u8 cgx_id, lmac_id;
1176 void *cgxd;
1177
1178
1179
1180
1181 if (!is_pf_cgxmapped(rvu, pf))
1182 return -ENODEV;
1183
1184 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1185 cgxd = rvu_cgx_pdata(cgx_id, rvu);
1186 mac_ops = get_mac_ops(cgxd);
1187
1188 mac_ops->mac_get_pause_frm_status(cgxd, lmac_id, &tx_8023, &rx_8023);
1189 if (tx_8023 || rx_8023) {
1190 dev_warn(rvu->dev,
1191 "Can not configure PFC as 802.3X pause frames are enabled");
1192 return LMAC_AF_ERR_PFC_ENADIS_PERM_DENIED;
1193 }
1194
1195 mutex_lock(&rvu->rsrc_lock);
1196 if (verify_lmac_fc_cfg(cgxd, lmac_id, tx_pause, rx_pause,
1197 pcifunc & RVU_PFVF_FUNC_MASK)) {
1198 mutex_unlock(&rvu->rsrc_lock);
1199 return LMAC_AF_ERR_PERM_DENIED;
1200 }
1201 mutex_unlock(&rvu->rsrc_lock);
1202
1203 return mac_ops->pfc_config(cgxd, lmac_id, tx_pause, rx_pause, pfc_en);
1204 }
1205
1206 int rvu_mbox_handler_cgx_prio_flow_ctrl_cfg(struct rvu *rvu,
1207 struct cgx_pfc_cfg *req,
1208 struct cgx_pfc_rsp *rsp)
1209 {
1210 int pf = rvu_get_pf(req->hdr.pcifunc);
1211 struct mac_ops *mac_ops;
1212 u8 cgx_id, lmac_id;
1213 void *cgxd;
1214 int err;
1215
1216
1217
1218
1219 if (!is_pf_cgxmapped(rvu, pf))
1220 return -ENODEV;
1221
1222 rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
1223 cgxd = rvu_cgx_pdata(cgx_id, rvu);
1224 mac_ops = get_mac_ops(cgxd);
1225
1226 err = rvu_cgx_prio_flow_ctrl_cfg(rvu, req->hdr.pcifunc, req->tx_pause,
1227 req->rx_pause, req->pfc_en);
1228
1229 mac_ops->mac_get_pfc_frm_cfg(cgxd, lmac_id, &rsp->tx_pause, &rsp->rx_pause);
1230 return err;
1231 }