0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/acpi.h>
0009 #include <linux/module.h>
0010 #include <linux/interrupt.h>
0011 #include <linux/pci.h>
0012 #include <linux/netdevice.h>
0013 #include <linux/etherdevice.h>
0014 #include <linux/ethtool.h>
0015 #include <linux/phy.h>
0016 #include <linux/of.h>
0017 #include <linux/of_mdio.h>
0018 #include <linux/of_net.h>
0019
0020 #include "cgx.h"
0021 #include "rvu.h"
0022 #include "lmac_common.h"
0023
0024 #define DRV_NAME "Marvell-CGX/RPM"
0025 #define DRV_STRING "Marvell CGX/RPM Driver"
0026
0027 static LIST_HEAD(cgx_list);
0028
0029
0030 static const u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX] = {
0031 [CGX_LINK_NONE] = 0,
0032 [CGX_LINK_10M] = 10,
0033 [CGX_LINK_100M] = 100,
0034 [CGX_LINK_1G] = 1000,
0035 [CGX_LINK_2HG] = 2500,
0036 [CGX_LINK_5G] = 5000,
0037 [CGX_LINK_10G] = 10000,
0038 [CGX_LINK_20G] = 20000,
0039 [CGX_LINK_25G] = 25000,
0040 [CGX_LINK_40G] = 40000,
0041 [CGX_LINK_50G] = 50000,
0042 [CGX_LINK_80G] = 80000,
0043 [CGX_LINK_100G] = 100000,
0044 };
0045
0046
0047 static const char *cgx_lmactype_string[LMAC_MODE_MAX] = {
0048 [LMAC_MODE_SGMII] = "SGMII",
0049 [LMAC_MODE_XAUI] = "XAUI",
0050 [LMAC_MODE_RXAUI] = "RXAUI",
0051 [LMAC_MODE_10G_R] = "10G_R",
0052 [LMAC_MODE_40G_R] = "40G_R",
0053 [LMAC_MODE_QSGMII] = "QSGMII",
0054 [LMAC_MODE_25G_R] = "25G_R",
0055 [LMAC_MODE_50G_R] = "50G_R",
0056 [LMAC_MODE_100G_R] = "100G_R",
0057 [LMAC_MODE_USXGMII] = "USXGMII",
0058 };
0059
0060
0061 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en);
0062
0063
0064 static const struct pci_device_id cgx_id_table[] = {
0065 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
0066 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_RPM) },
0067 { 0, }
0068 };
0069
0070 MODULE_DEVICE_TABLE(pci, cgx_id_table);
0071
0072 static bool is_dev_rpm(void *cgxd)
0073 {
0074 struct cgx *cgx = cgxd;
0075
0076 return (cgx->pdev->device == PCI_DEVID_CN10K_RPM);
0077 }
0078
0079 bool is_lmac_valid(struct cgx *cgx, int lmac_id)
0080 {
0081 if (!cgx || lmac_id < 0 || lmac_id >= MAX_LMAC_PER_CGX)
0082 return false;
0083 return test_bit(lmac_id, &cgx->lmac_bmap);
0084 }
0085
0086
0087
0088
0089 static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
0090 {
0091 int tmp, id = 0;
0092
0093 for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
0094 if (tmp == lmac_id)
0095 break;
0096 id++;
0097 }
0098
0099 return id;
0100 }
0101
0102 struct mac_ops *get_mac_ops(void *cgxd)
0103 {
0104 if (!cgxd)
0105 return cgxd;
0106
0107 return ((struct cgx *)cgxd)->mac_ops;
0108 }
0109
0110 void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
0111 {
0112 writeq(val, cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
0113 offset);
0114 }
0115
0116 u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
0117 {
0118 return readq(cgx->reg_base + (lmac << cgx->mac_ops->lmac_offset) +
0119 offset);
0120 }
0121
0122 struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
0123 {
0124 if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
0125 return NULL;
0126
0127 return cgx->lmac_idmap[lmac_id];
0128 }
0129
0130 int cgx_get_cgxcnt_max(void)
0131 {
0132 struct cgx *cgx_dev;
0133 int idmax = -ENODEV;
0134
0135 list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
0136 if (cgx_dev->cgx_id > idmax)
0137 idmax = cgx_dev->cgx_id;
0138
0139 if (idmax < 0)
0140 return 0;
0141
0142 return idmax + 1;
0143 }
0144
0145 int cgx_get_lmac_cnt(void *cgxd)
0146 {
0147 struct cgx *cgx = cgxd;
0148
0149 if (!cgx)
0150 return -ENODEV;
0151
0152 return cgx->lmac_count;
0153 }
0154
0155 void *cgx_get_pdata(int cgx_id)
0156 {
0157 struct cgx *cgx_dev;
0158
0159 list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
0160 if (cgx_dev->cgx_id == cgx_id)
0161 return cgx_dev;
0162 }
0163 return NULL;
0164 }
0165
0166 void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val)
0167 {
0168 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
0169
0170 cgx_write(cgx_dev, lmac_id, offset, val);
0171 }
0172
0173 u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset)
0174 {
0175 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
0176
0177 return cgx_read(cgx_dev, lmac_id, offset);
0178 }
0179
0180 int cgx_get_cgxid(void *cgxd)
0181 {
0182 struct cgx *cgx = cgxd;
0183
0184 if (!cgx)
0185 return -EINVAL;
0186
0187 return cgx->cgx_id;
0188 }
0189
0190 u8 cgx_lmac_get_p2x(int cgx_id, int lmac_id)
0191 {
0192 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
0193 u64 cfg;
0194
0195 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_CFG);
0196
0197 return (cfg & CMR_P2X_SEL_MASK) >> CMR_P2X_SEL_SHIFT;
0198 }
0199
0200
0201
0202
0203
0204
0205 int cgx_get_link_info(void *cgxd, int lmac_id,
0206 struct cgx_link_user_info *linfo)
0207 {
0208 struct lmac *lmac = lmac_pdata(lmac_id, cgxd);
0209
0210 if (!lmac)
0211 return -ENODEV;
0212
0213 *linfo = lmac->link_info;
0214 return 0;
0215 }
0216
0217 static u64 mac2u64 (u8 *mac_addr)
0218 {
0219 u64 mac = 0;
0220 int index;
0221
0222 for (index = ETH_ALEN - 1; index >= 0; index--)
0223 mac |= ((u64)*mac_addr++) << (8 * index);
0224 return mac;
0225 }
0226
0227 static void cfg2mac(u64 cfg, u8 *mac_addr)
0228 {
0229 int i, index = 0;
0230
0231 for (i = ETH_ALEN - 1; i >= 0; i--, index++)
0232 mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
0233 }
0234
0235 int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
0236 {
0237 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
0238 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
0239 struct mac_ops *mac_ops;
0240 int index, id;
0241 u64 cfg;
0242
0243
0244 mac_ops = cgx_dev->mac_ops;
0245
0246
0247
0248
0249 cfg = mac2u64 (mac_addr);
0250
0251 id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
0252
0253 index = id * lmac->mac_to_index_bmap.max;
0254
0255 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
0256 cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
0257
0258 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
0259 cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
0260 CGX_DMAC_MCAST_MODE);
0261 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
0262
0263 return 0;
0264 }
0265
0266 u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
0267 {
0268 struct mac_ops *mac_ops;
0269 struct cgx *cgx = cgxd;
0270
0271 if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
0272 return 0;
0273
0274 cgx = cgxd;
0275
0276 mac_ops = cgx->mac_ops;
0277
0278 return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
0279 }
0280
0281 u64 cgx_read_dmac_entry(void *cgxd, int index)
0282 {
0283 struct mac_ops *mac_ops;
0284 struct cgx *cgx;
0285
0286 if (!cgxd)
0287 return 0;
0288
0289 cgx = cgxd;
0290 mac_ops = cgx->mac_ops;
0291 return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
0292 }
0293
0294 int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
0295 {
0296 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
0297 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
0298 struct mac_ops *mac_ops;
0299 int index, idx;
0300 u64 cfg = 0;
0301 int id;
0302
0303 if (!lmac)
0304 return -ENODEV;
0305
0306 mac_ops = cgx_dev->mac_ops;
0307
0308 idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
0309 if (idx < 0)
0310 return idx;
0311
0312 id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
0313
0314 index = id * lmac->mac_to_index_bmap.max + idx;
0315
0316 cfg = mac2u64 (mac_addr);
0317 cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
0318 cfg |= ((u64)lmac_id << 49);
0319 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
0320
0321 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
0322 cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
0323
0324 if (is_multicast_ether_addr(mac_addr)) {
0325 cfg &= ~GENMASK_ULL(2, 1);
0326 cfg |= CGX_DMAC_MCAST_MODE_CAM;
0327 lmac->mcast_filters_count++;
0328 } else if (!lmac->mcast_filters_count) {
0329 cfg |= CGX_DMAC_MCAST_MODE;
0330 }
0331
0332 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
0333
0334 return idx;
0335 }
0336
0337 int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
0338 {
0339 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
0340 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
0341 struct mac_ops *mac_ops;
0342 u8 index = 0, id;
0343 u64 cfg;
0344
0345 if (!lmac)
0346 return -ENODEV;
0347
0348 mac_ops = cgx_dev->mac_ops;
0349
0350
0351
0352 set_bit(0, lmac->mac_to_index_bmap.bmap);
0353
0354 id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
0355
0356 index = id * lmac->mac_to_index_bmap.max + index;
0357 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
0358
0359
0360 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
0361 cfg &= ~CGX_DMAC_CAM_ACCEPT;
0362 cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
0363 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
0364
0365 return 0;
0366 }
0367
0368
0369
0370
0371
0372 int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
0373 {
0374 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
0375 struct mac_ops *mac_ops;
0376 struct lmac *lmac;
0377 u64 cfg;
0378 int id;
0379
0380 lmac = lmac_pdata(lmac_id, cgx_dev);
0381 if (!lmac)
0382 return -ENODEV;
0383
0384 mac_ops = cgx_dev->mac_ops;
0385
0386 if (index >= lmac->mac_to_index_bmap.max)
0387 return -EINVAL;
0388
0389
0390 if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
0391 return -EINVAL;
0392
0393 id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
0394
0395 index = id * lmac->mac_to_index_bmap.max + index;
0396
0397 cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
0398 cfg &= ~CGX_RX_DMAC_ADR_MASK;
0399 cfg |= mac2u64 (mac_addr);
0400
0401 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
0402 return 0;
0403 }
0404
0405 int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
0406 {
0407 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
0408 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
0409 struct mac_ops *mac_ops;
0410 u8 mac[ETH_ALEN];
0411 u64 cfg;
0412 int id;
0413
0414 if (!lmac)
0415 return -ENODEV;
0416
0417 mac_ops = cgx_dev->mac_ops;
0418
0419 if (index >= lmac->mac_to_index_bmap.max)
0420 return -EINVAL;
0421
0422
0423 if (index == 0)
0424 return 0;
0425
0426 rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
0427
0428 id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
0429
0430 index = id * lmac->mac_to_index_bmap.max + index;
0431
0432
0433 cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
0434
0435 cfg2mac(cfg, mac);
0436 if (is_multicast_ether_addr(mac))
0437 lmac->mcast_filters_count--;
0438
0439 if (!lmac->mcast_filters_count) {
0440 cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
0441 cfg &= ~GENMASK_ULL(2, 1);
0442 cfg |= CGX_DMAC_MCAST_MODE;
0443 cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
0444 }
0445
0446 cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
0447
0448 return 0;
0449 }
0450
0451 int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
0452 {
0453 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
0454 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
0455
0456 if (lmac)
0457 return lmac->mac_to_index_bmap.max;
0458
0459 return 0;
0460 }
0461
0462 u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
0463 {
0464 struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
0465 struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
0466 struct mac_ops *mac_ops;
0467 int index;
0468 u64 cfg;
0469 int id;
0470
0471 mac_ops = cgx_dev->mac_ops;
0472
0473 id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
0474
0475 index = id * lmac->mac_to_index_bmap.max;
0476
0477 cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
0478 return cfg & CGX_RX_DMAC_ADR_MASK;
0479 }
0480
0481 int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind)
0482 {
0483 struct cgx *cgx = cgxd;
0484
0485 if (!is_lmac_valid(cgx, lmac_id))
0486 return -ENODEV;
0487
0488 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_ID_MAP, (pkind & 0x3F));
0489 return 0;
0490 }
0491
0492 static u8 cgx_get_lmac_type(void *cgxd, int lmac_id)
0493 {
0494 struct cgx *cgx = cgxd;
0495 u64 cfg;
0496
0497 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
0498 return (cfg >> CGX_LMAC_TYPE_SHIFT) & CGX_LMAC_TYPE_MASK;
0499 }
0500
0501 static u32 cgx_get_lmac_fifo_len(void *cgxd, int lmac_id)
0502 {
0503 struct cgx *cgx = cgxd;
0504 u8 num_lmacs;
0505 u32 fifo_len;
0506
0507 fifo_len = cgx->mac_ops->fifo_len;
0508 num_lmacs = cgx->mac_ops->get_nr_lmacs(cgx);
0509
0510 switch (num_lmacs) {
0511 case 1:
0512 return fifo_len;
0513 case 2:
0514 return fifo_len / 2;
0515 case 3:
0516
0517 if (lmac_id == 0)
0518 return fifo_len / 2;
0519 return fifo_len / 4;
0520 case 4:
0521 default:
0522 return fifo_len / 4;
0523 }
0524 return 0;
0525 }
0526
0527
0528 int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
0529 {
0530 struct cgx *cgx = cgxd;
0531 u8 lmac_type;
0532 u64 cfg;
0533
0534 if (!is_lmac_valid(cgx, lmac_id))
0535 return -ENODEV;
0536
0537 lmac_type = cgx->mac_ops->get_lmac_type(cgx, lmac_id);
0538 if (lmac_type == LMAC_MODE_SGMII || lmac_type == LMAC_MODE_QSGMII) {
0539 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL);
0540 if (enable)
0541 cfg |= CGXX_GMP_PCS_MRX_CTL_LBK;
0542 else
0543 cfg &= ~CGXX_GMP_PCS_MRX_CTL_LBK;
0544 cgx_write(cgx, lmac_id, CGXX_GMP_PCS_MRX_CTL, cfg);
0545 } else {
0546 cfg = cgx_read(cgx, lmac_id, CGXX_SPUX_CONTROL1);
0547 if (enable)
0548 cfg |= CGXX_SPUX_CONTROL1_LBK;
0549 else
0550 cfg &= ~CGXX_SPUX_CONTROL1_LBK;
0551 cgx_write(cgx, lmac_id, CGXX_SPUX_CONTROL1, cfg);
0552 }
0553 return 0;
0554 }
0555
0556 void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
0557 {
0558 struct cgx *cgx = cgx_get_pdata(cgx_id);
0559 struct lmac *lmac = lmac_pdata(lmac_id, cgx);
0560 u16 max_dmac = lmac->mac_to_index_bmap.max;
0561 struct mac_ops *mac_ops;
0562 int index, i;
0563 u64 cfg = 0;
0564 int id;
0565
0566 if (!cgx)
0567 return;
0568
0569 id = get_sequence_id_of_lmac(cgx, lmac_id);
0570
0571 mac_ops = cgx->mac_ops;
0572 if (enable) {
0573
0574 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
0575 cfg &= ~CGX_DMAC_CAM_ACCEPT;
0576 cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
0577 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
0578
0579 for (i = 0; i < max_dmac; i++) {
0580 index = id * max_dmac + i;
0581 cfg = cgx_read(cgx, 0,
0582 (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
0583 cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
0584 cgx_write(cgx, 0,
0585 (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
0586 }
0587 } else {
0588
0589 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
0590 cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
0591 cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
0592 for (i = 0; i < max_dmac; i++) {
0593 index = id * max_dmac + i;
0594 cfg = cgx_read(cgx, 0,
0595 (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
0596 if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
0597 cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
0598 cgx_write(cgx, 0,
0599 (CGXX_CMRX_RX_DMAC_CAM0 +
0600 index * 0x8),
0601 cfg);
0602 }
0603 }
0604 }
0605 }
0606
0607 static int cgx_lmac_get_pause_frm_status(void *cgxd, int lmac_id,
0608 u8 *tx_pause, u8 *rx_pause)
0609 {
0610 struct cgx *cgx = cgxd;
0611 u64 cfg;
0612
0613 if (is_dev_rpm(cgx))
0614 return 0;
0615
0616 if (!is_lmac_valid(cgx, lmac_id))
0617 return -ENODEV;
0618
0619 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
0620 *rx_pause = !!(cfg & CGX_SMUX_RX_FRM_CTL_CTL_BCK);
0621
0622 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
0623 *tx_pause = !!(cfg & CGX_SMUX_TX_CTL_L2P_BP_CONV);
0624 return 0;
0625 }
0626
0627
0628 void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable)
0629 {
0630 struct cgx *cgx = cgxd;
0631 u8 rx_pause, tx_pause;
0632 bool is_pfc_enabled;
0633 struct lmac *lmac;
0634 u64 cfg;
0635
0636 if (!cgx)
0637 return;
0638
0639 lmac = lmac_pdata(lmac_id, cgx);
0640 if (!lmac)
0641 return;
0642
0643
0644 if (!bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max))
0645 return;
0646
0647 cgx_lmac_get_pause_frm_status(cgx, lmac_id, &rx_pause, &tx_pause);
0648 is_pfc_enabled = rx_pause ? false : true;
0649
0650 if (enable) {
0651 if (!is_pfc_enabled) {
0652 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
0653 cfg |= CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
0654 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
0655
0656 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
0657 cfg |= CGX_SMUX_RX_FRM_CTL_CTL_BCK;
0658 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
0659 } else {
0660 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
0661 cfg |= CGXX_SMUX_CBFC_CTL_BCK_EN;
0662 cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
0663 }
0664 } else {
0665
0666 if (!is_pfc_enabled) {
0667 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
0668 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
0669 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
0670
0671 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
0672 cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
0673 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
0674 } else {
0675 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
0676 cfg &= ~CGXX_SMUX_CBFC_CTL_BCK_EN;
0677 cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
0678 }
0679 }
0680 }
0681
0682 int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat)
0683 {
0684 struct cgx *cgx = cgxd;
0685
0686 if (!is_lmac_valid(cgx, lmac_id))
0687 return -ENODEV;
0688 *rx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_STAT0 + (idx * 8));
0689 return 0;
0690 }
0691
0692 int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat)
0693 {
0694 struct cgx *cgx = cgxd;
0695
0696 if (!is_lmac_valid(cgx, lmac_id))
0697 return -ENODEV;
0698 *tx_stat = cgx_read(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (idx * 8));
0699 return 0;
0700 }
0701
0702 u64 cgx_features_get(void *cgxd)
0703 {
0704 return ((struct cgx *)cgxd)->hw_features;
0705 }
0706
0707 static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
0708 {
0709 if (!linfo->fec)
0710 return 0;
0711
0712 switch (linfo->lmac_type_id) {
0713 case LMAC_MODE_SGMII:
0714 case LMAC_MODE_XAUI:
0715 case LMAC_MODE_RXAUI:
0716 case LMAC_MODE_QSGMII:
0717 return 0;
0718 case LMAC_MODE_10G_R:
0719 case LMAC_MODE_25G_R:
0720 case LMAC_MODE_100G_R:
0721 case LMAC_MODE_USXGMII:
0722 return 1;
0723 case LMAC_MODE_40G_R:
0724 return 4;
0725 case LMAC_MODE_50G_R:
0726 if (linfo->fec == OTX2_FEC_BASER)
0727 return 2;
0728 else
0729 return 1;
0730 default:
0731 return 0;
0732 }
0733 }
0734
0735 int cgx_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp)
0736 {
0737 int stats, fec_stats_count = 0;
0738 int corr_reg, uncorr_reg;
0739 struct cgx *cgx = cgxd;
0740
0741 if (!cgx || lmac_id >= cgx->lmac_count)
0742 return -ENODEV;
0743 fec_stats_count =
0744 cgx_set_fec_stats_count(&cgx->lmac_idmap[lmac_id]->link_info);
0745 if (cgx->lmac_idmap[lmac_id]->link_info.fec == OTX2_FEC_BASER) {
0746 corr_reg = CGXX_SPUX_LNX_FEC_CORR_BLOCKS;
0747 uncorr_reg = CGXX_SPUX_LNX_FEC_UNCORR_BLOCKS;
0748 } else {
0749 corr_reg = CGXX_SPUX_RSFEC_CORR;
0750 uncorr_reg = CGXX_SPUX_RSFEC_UNCORR;
0751 }
0752 for (stats = 0; stats < fec_stats_count; stats++) {
0753 rsp->fec_corr_blks +=
0754 cgx_read(cgx, lmac_id, corr_reg + (stats * 8));
0755 rsp->fec_uncorr_blks +=
0756 cgx_read(cgx, lmac_id, uncorr_reg + (stats * 8));
0757 }
0758 return 0;
0759 }
0760
0761 int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
0762 {
0763 struct cgx *cgx = cgxd;
0764 u64 cfg;
0765
0766 if (!is_lmac_valid(cgx, lmac_id))
0767 return -ENODEV;
0768
0769 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
0770 if (enable)
0771 cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
0772 else
0773 cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
0774 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
0775 return 0;
0776 }
0777
0778 int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable)
0779 {
0780 struct cgx *cgx = cgxd;
0781 u64 cfg, last;
0782
0783 if (!is_lmac_valid(cgx, lmac_id))
0784 return -ENODEV;
0785
0786 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
0787 last = cfg;
0788 if (enable)
0789 cfg |= DATA_PKT_TX_EN;
0790 else
0791 cfg &= ~DATA_PKT_TX_EN;
0792
0793 if (cfg != last)
0794 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
0795 return !!(last & DATA_PKT_TX_EN);
0796 }
0797
0798 static int cgx_lmac_enadis_pause_frm(void *cgxd, int lmac_id,
0799 u8 tx_pause, u8 rx_pause)
0800 {
0801 struct cgx *cgx = cgxd;
0802 u64 cfg;
0803
0804 if (is_dev_rpm(cgx))
0805 return 0;
0806
0807 if (!is_lmac_valid(cgx, lmac_id))
0808 return -ENODEV;
0809
0810 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
0811 cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
0812 cfg |= rx_pause ? CGX_SMUX_RX_FRM_CTL_CTL_BCK : 0x0;
0813 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
0814
0815 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
0816 cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
0817 cfg |= tx_pause ? CGX_SMUX_TX_CTL_L2P_BP_CONV : 0x0;
0818 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
0819
0820 cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
0821 if (tx_pause) {
0822 cfg &= ~CGX_CMR_RX_OVR_BP_EN(lmac_id);
0823 } else {
0824 cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
0825 cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
0826 }
0827 cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
0828 return 0;
0829 }
0830
0831 static void cgx_lmac_pause_frm_config(void *cgxd, int lmac_id, bool enable)
0832 {
0833 struct cgx *cgx = cgxd;
0834 u64 cfg;
0835
0836 if (!is_lmac_valid(cgx, lmac_id))
0837 return;
0838
0839 if (enable) {
0840
0841 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_TIME,
0842 DEFAULT_PAUSE_TIME);
0843 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL);
0844 cfg &= ~0xFFFFULL;
0845 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_PAUSE_PKT_INTERVAL,
0846 cfg | (DEFAULT_PAUSE_TIME / 2));
0847
0848 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_TIME,
0849 DEFAULT_PAUSE_TIME);
0850
0851 cfg = cgx_read(cgx, lmac_id,
0852 CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL);
0853 cfg &= ~0xFFFFULL;
0854 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_TX_PAUSE_PKT_INTERVAL,
0855 cfg | (DEFAULT_PAUSE_TIME / 2));
0856 }
0857
0858
0859 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
0860 cfg &= ~CGX_SMUX_RX_FRM_CTL_CTL_BCK;
0861 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
0862
0863 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
0864 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK;
0865 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
0866
0867
0868 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_TX_CTL);
0869 cfg &= ~CGX_SMUX_TX_CTL_L2P_BP_CONV;
0870 cgx_write(cgx, lmac_id, CGXX_SMUX_TX_CTL, cfg);
0871
0872 cfg = cgx_read(cgx, 0, CGXX_CMR_RX_OVR_BP);
0873 cfg |= CGX_CMR_RX_OVR_BP_EN(lmac_id);
0874 cfg &= ~CGX_CMR_RX_OVR_BP_BP(lmac_id);
0875 cgx_write(cgx, 0, CGXX_CMR_RX_OVR_BP, cfg);
0876
0877
0878 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
0879 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
0880 cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
0881 }
0882
0883 int verify_lmac_fc_cfg(void *cgxd, int lmac_id, u8 tx_pause, u8 rx_pause,
0884 int pfvf_idx)
0885 {
0886 struct cgx *cgx = cgxd;
0887 struct lmac *lmac;
0888
0889 lmac = lmac_pdata(lmac_id, cgx);
0890 if (!lmac)
0891 return -ENODEV;
0892
0893 if (!rx_pause)
0894 clear_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
0895 else
0896 set_bit(pfvf_idx, lmac->rx_fc_pfvf_bmap.bmap);
0897
0898 if (!tx_pause)
0899 clear_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
0900 else
0901 set_bit(pfvf_idx, lmac->tx_fc_pfvf_bmap.bmap);
0902
0903
0904 if (!rx_pause && bitmap_weight(lmac->rx_fc_pfvf_bmap.bmap, lmac->rx_fc_pfvf_bmap.max)) {
0905 dev_warn(&cgx->pdev->dev,
0906 "Receive Flow control disable not permitted as its used by other PFVFs\n");
0907 return -EPERM;
0908 }
0909
0910 if (!tx_pause && bitmap_weight(lmac->tx_fc_pfvf_bmap.bmap, lmac->tx_fc_pfvf_bmap.max)) {
0911 dev_warn(&cgx->pdev->dev,
0912 "Transmit Flow control disable not permitted as its used by other PFVFs\n");
0913 return -EPERM;
0914 }
0915
0916 return 0;
0917 }
0918
0919 int cgx_lmac_pfc_config(void *cgxd, int lmac_id, u8 tx_pause,
0920 u8 rx_pause, u16 pfc_en)
0921 {
0922 struct cgx *cgx = cgxd;
0923 u64 cfg;
0924
0925 if (!is_lmac_valid(cgx, lmac_id))
0926 return -ENODEV;
0927
0928
0929 if (tx_pause && !pfc_en)
0930 return 0;
0931
0932 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
0933 pfc_en |= FIELD_GET(CGX_PFC_CLASS_MASK, cfg);
0934
0935 if (rx_pause) {
0936 cfg |= (CGXX_SMUX_CBFC_CTL_RX_EN |
0937 CGXX_SMUX_CBFC_CTL_BCK_EN |
0938 CGXX_SMUX_CBFC_CTL_DRP_EN);
0939 } else {
0940 cfg &= ~(CGXX_SMUX_CBFC_CTL_RX_EN |
0941 CGXX_SMUX_CBFC_CTL_BCK_EN |
0942 CGXX_SMUX_CBFC_CTL_DRP_EN);
0943 }
0944
0945 if (tx_pause) {
0946 cfg |= CGXX_SMUX_CBFC_CTL_TX_EN;
0947 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, pfc_en, cfg);
0948 } else {
0949 cfg &= ~CGXX_SMUX_CBFC_CTL_TX_EN;
0950 cfg = FIELD_SET(CGX_PFC_CLASS_MASK, 0, cfg);
0951 }
0952
0953 cgx_write(cgx, lmac_id, CGXX_SMUX_CBFC_CTL, cfg);
0954
0955
0956 cfg = cgx_lmac_addr_get(cgx->cgx_id, lmac_id);
0957 cgx_write(cgx, lmac_id, CGXX_SMUX_SMAC, cfg);
0958
0959 return 0;
0960 }
0961
0962 int cgx_lmac_get_pfc_frm_cfg(void *cgxd, int lmac_id, u8 *tx_pause,
0963 u8 *rx_pause)
0964 {
0965 struct cgx *cgx = cgxd;
0966 u64 cfg;
0967
0968 if (!is_lmac_valid(cgx, lmac_id))
0969 return -ENODEV;
0970
0971 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_CBFC_CTL);
0972
0973 *rx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_RX_EN);
0974 *tx_pause = !!(cfg & CGXX_SMUX_CBFC_CTL_TX_EN);
0975
0976 return 0;
0977 }
0978
0979 void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
0980 {
0981 struct cgx *cgx = cgxd;
0982 u64 cfg;
0983
0984 if (!cgx)
0985 return;
0986
0987 if (enable) {
0988
0989 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
0990 cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
0991 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
0992
0993 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
0994 cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
0995 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
0996 } else {
0997
0998 cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
0999 cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
1000 cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
1001
1002 cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
1003 cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
1004 cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
1005 }
1006 }
1007
1008
1009 int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
1010 {
1011 struct cgx *cgx = lmac->cgx;
1012 struct device *dev;
1013 int err = 0;
1014 u64 cmd;
1015
1016
1017 err = mutex_lock_interruptible(&lmac->cmd_lock);
1018 if (err)
1019 return err;
1020
1021
1022 cmd = cgx_read(cgx, lmac->lmac_id, CGX_COMMAND_REG);
1023 if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
1024 err = -EBUSY;
1025 goto unlock;
1026 }
1027
1028
1029 req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
1030
1031
1032 lmac->cmd_pend = true;
1033
1034
1035 cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
1036
1037
1038 if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
1039 msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
1040 dev = &cgx->pdev->dev;
1041 dev_err(dev, "cgx port %d:%d cmd %lld timeout\n",
1042 cgx->cgx_id, lmac->lmac_id, FIELD_GET(CMDREG_ID, req));
1043 err = LMAC_AF_ERR_CMD_TIMEOUT;
1044 goto unlock;
1045 }
1046
1047
1048 smp_rmb();
1049 *resp = lmac->resp;
1050
1051 unlock:
1052 mutex_unlock(&lmac->cmd_lock);
1053
1054 return err;
1055 }
1056
1057 int cgx_fwi_cmd_generic(u64 req, u64 *resp, struct cgx *cgx, int lmac_id)
1058 {
1059 struct lmac *lmac;
1060 int err;
1061
1062 lmac = lmac_pdata(lmac_id, cgx);
1063 if (!lmac)
1064 return -ENODEV;
1065
1066 err = cgx_fwi_cmd_send(req, resp, lmac);
1067
1068
1069 if (!err) {
1070 if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
1071 return -EIO;
1072 else
1073 return 0;
1074 }
1075
1076 return err;
1077 }
1078
1079 static int cgx_link_usertable_index_map(int speed)
1080 {
1081 switch (speed) {
1082 case SPEED_10:
1083 return CGX_LINK_10M;
1084 case SPEED_100:
1085 return CGX_LINK_100M;
1086 case SPEED_1000:
1087 return CGX_LINK_1G;
1088 case SPEED_2500:
1089 return CGX_LINK_2HG;
1090 case SPEED_5000:
1091 return CGX_LINK_5G;
1092 case SPEED_10000:
1093 return CGX_LINK_10G;
1094 case SPEED_20000:
1095 return CGX_LINK_20G;
1096 case SPEED_25000:
1097 return CGX_LINK_25G;
1098 case SPEED_40000:
1099 return CGX_LINK_40G;
1100 case SPEED_50000:
1101 return CGX_LINK_50G;
1102 case 80000:
1103 return CGX_LINK_80G;
1104 case SPEED_100000:
1105 return CGX_LINK_100G;
1106 case SPEED_UNKNOWN:
1107 return CGX_LINK_NONE;
1108 }
1109 return CGX_LINK_NONE;
1110 }
1111
1112 static void set_mod_args(struct cgx_set_link_mode_args *args,
1113 u32 speed, u8 duplex, u8 autoneg, u64 mode)
1114 {
1115
1116
1117
1118 if (args->duplex == DUPLEX_UNKNOWN)
1119 args->duplex = duplex;
1120 if (args->speed == SPEED_UNKNOWN)
1121 args->speed = speed;
1122 if (args->an == AUTONEG_UNKNOWN)
1123 args->an = autoneg;
1124 args->mode = mode;
1125 args->ports = 0;
1126 }
1127
1128 static void otx2_map_ethtool_link_modes(u64 bitmask,
1129 struct cgx_set_link_mode_args *args)
1130 {
1131 switch (bitmask) {
1132 case ETHTOOL_LINK_MODE_10baseT_Half_BIT:
1133 set_mod_args(args, 10, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1134 break;
1135 case ETHTOOL_LINK_MODE_10baseT_Full_BIT:
1136 set_mod_args(args, 10, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1137 break;
1138 case ETHTOOL_LINK_MODE_100baseT_Half_BIT:
1139 set_mod_args(args, 100, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1140 break;
1141 case ETHTOOL_LINK_MODE_100baseT_Full_BIT:
1142 set_mod_args(args, 100, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1143 break;
1144 case ETHTOOL_LINK_MODE_1000baseT_Half_BIT:
1145 set_mod_args(args, 1000, 1, 1, BIT_ULL(CGX_MODE_SGMII));
1146 break;
1147 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT:
1148 set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_SGMII));
1149 break;
1150 case ETHTOOL_LINK_MODE_1000baseX_Full_BIT:
1151 set_mod_args(args, 1000, 0, 0, BIT_ULL(CGX_MODE_1000_BASEX));
1152 break;
1153 case ETHTOOL_LINK_MODE_10000baseT_Full_BIT:
1154 set_mod_args(args, 1000, 0, 1, BIT_ULL(CGX_MODE_QSGMII));
1155 break;
1156 case ETHTOOL_LINK_MODE_10000baseSR_Full_BIT:
1157 set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2C));
1158 break;
1159 case ETHTOOL_LINK_MODE_10000baseLR_Full_BIT:
1160 set_mod_args(args, 10000, 0, 0, BIT_ULL(CGX_MODE_10G_C2M));
1161 break;
1162 case ETHTOOL_LINK_MODE_10000baseKR_Full_BIT:
1163 set_mod_args(args, 10000, 0, 1, BIT_ULL(CGX_MODE_10G_KR));
1164 break;
1165 case ETHTOOL_LINK_MODE_25000baseSR_Full_BIT:
1166 set_mod_args(args, 25000, 0, 0, BIT_ULL(CGX_MODE_25G_C2C));
1167 break;
1168 case ETHTOOL_LINK_MODE_25000baseCR_Full_BIT:
1169 set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_CR));
1170 break;
1171 case ETHTOOL_LINK_MODE_25000baseKR_Full_BIT:
1172 set_mod_args(args, 25000, 0, 1, BIT_ULL(CGX_MODE_25G_KR));
1173 break;
1174 case ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT:
1175 set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2C));
1176 break;
1177 case ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT:
1178 set_mod_args(args, 40000, 0, 0, BIT_ULL(CGX_MODE_40G_C2M));
1179 break;
1180 case ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT:
1181 set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_CR4));
1182 break;
1183 case ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT:
1184 set_mod_args(args, 40000, 0, 1, BIT_ULL(CGX_MODE_40G_KR4));
1185 break;
1186 case ETHTOOL_LINK_MODE_50000baseSR_Full_BIT:
1187 set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2C));
1188 break;
1189 case ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT:
1190 set_mod_args(args, 50000, 0, 0, BIT_ULL(CGX_MODE_50G_C2M));
1191 break;
1192 case ETHTOOL_LINK_MODE_50000baseCR_Full_BIT:
1193 set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_CR));
1194 break;
1195 case ETHTOOL_LINK_MODE_50000baseKR_Full_BIT:
1196 set_mod_args(args, 50000, 0, 1, BIT_ULL(CGX_MODE_50G_KR));
1197 break;
1198 case ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT:
1199 set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2C));
1200 break;
1201 case ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT:
1202 set_mod_args(args, 100000, 0, 0, BIT_ULL(CGX_MODE_100G_C2M));
1203 break;
1204 case ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT:
1205 set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_CR4));
1206 break;
1207 case ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT:
1208 set_mod_args(args, 100000, 0, 1, BIT_ULL(CGX_MODE_100G_KR4));
1209 break;
1210 default:
1211 set_mod_args(args, 0, 1, 0, BIT_ULL(CGX_MODE_MAX));
1212 break;
1213 }
1214 }
1215
1216 static inline void link_status_user_format(u64 lstat,
1217 struct cgx_link_user_info *linfo,
1218 struct cgx *cgx, u8 lmac_id)
1219 {
1220 const char *lmac_string;
1221
1222 linfo->link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
1223 linfo->full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
1224 linfo->speed = cgx_speed_mbps[FIELD_GET(RESP_LINKSTAT_SPEED, lstat)];
1225 linfo->an = FIELD_GET(RESP_LINKSTAT_AN, lstat);
1226 linfo->fec = FIELD_GET(RESP_LINKSTAT_FEC, lstat);
1227 linfo->lmac_type_id = cgx_get_lmac_type(cgx, lmac_id);
1228 lmac_string = cgx_lmactype_string[linfo->lmac_type_id];
1229 strncpy(linfo->lmac_type, lmac_string, LMACTYPE_STR_LEN - 1);
1230 }
1231
1232
1233 static inline void cgx_link_change_handler(u64 lstat,
1234 struct lmac *lmac)
1235 {
1236 struct cgx_link_user_info *linfo;
1237 struct cgx *cgx = lmac->cgx;
1238 struct cgx_link_event event;
1239 struct device *dev;
1240 int err_type;
1241
1242 dev = &cgx->pdev->dev;
1243
1244 link_status_user_format(lstat, &event.link_uinfo, cgx, lmac->lmac_id);
1245 err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
1246
1247 event.cgx_id = cgx->cgx_id;
1248 event.lmac_id = lmac->lmac_id;
1249
1250
1251 lmac->link_info = event.link_uinfo;
1252 linfo = &lmac->link_info;
1253
1254 if (err_type == CGX_ERR_SPEED_CHANGE_INVALID)
1255 return;
1256
1257
1258 spin_lock(&lmac->event_cb_lock);
1259
1260 if (!lmac->event_cb.notify_link_chg) {
1261 dev_dbg(dev, "cgx port %d:%d Link change handler null",
1262 cgx->cgx_id, lmac->lmac_id);
1263 if (err_type != CGX_ERR_NONE) {
1264 dev_err(dev, "cgx port %d:%d Link error %d\n",
1265 cgx->cgx_id, lmac->lmac_id, err_type);
1266 }
1267 dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n",
1268 cgx->cgx_id, lmac->lmac_id,
1269 linfo->link_up ? "UP" : "DOWN", linfo->speed);
1270 goto err;
1271 }
1272
1273 if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
1274 dev_err(dev, "event notification failure\n");
1275 err:
1276 spin_unlock(&lmac->event_cb_lock);
1277 }
1278
1279 static inline bool cgx_cmdresp_is_linkevent(u64 event)
1280 {
1281 u8 id;
1282
1283 id = FIELD_GET(EVTREG_ID, event);
1284 if (id == CGX_CMD_LINK_BRING_UP ||
1285 id == CGX_CMD_LINK_BRING_DOWN ||
1286 id == CGX_CMD_MODE_CHANGE)
1287 return true;
1288 else
1289 return false;
1290 }
1291
1292 static inline bool cgx_event_is_linkevent(u64 event)
1293 {
1294 if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
1295 return true;
1296 else
1297 return false;
1298 }
1299
1300 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
1301 {
1302 u64 event, offset, clear_bit;
1303 struct lmac *lmac = data;
1304 struct cgx *cgx;
1305
1306 cgx = lmac->cgx;
1307
1308
1309 offset = cgx->mac_ops->int_register;
1310 clear_bit = cgx->mac_ops->int_ena_bit;
1311
1312 event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
1313
1314 if (!FIELD_GET(EVTREG_ACK, event))
1315 return IRQ_NONE;
1316
1317 switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
1318 case CGX_EVT_CMD_RESP:
1319
1320
1321
1322 lmac->resp = event;
1323
1324 smp_wmb();
1325
1326
1327
1328
1329 if (cgx_cmdresp_is_linkevent(event))
1330 cgx_link_change_handler(event, lmac);
1331
1332
1333 lmac->cmd_pend = false;
1334 wake_up_interruptible(&lmac->wq_cmd_cmplt);
1335 break;
1336 case CGX_EVT_ASYNC:
1337 if (cgx_event_is_linkevent(event))
1338 cgx_link_change_handler(event, lmac);
1339 break;
1340 }
1341
1342
1343
1344
1345
1346 cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
1347 cgx_write(lmac->cgx, lmac->lmac_id, offset, clear_bit);
1348
1349 return IRQ_HANDLED;
1350 }
1351
1352
1353
1354
1355 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
1356 {
1357 struct cgx *cgx = cgxd;
1358 struct lmac *lmac;
1359
1360 lmac = lmac_pdata(lmac_id, cgx);
1361 if (!lmac)
1362 return -ENODEV;
1363
1364 lmac->event_cb = *cb;
1365
1366 return 0;
1367 }
1368
1369 int cgx_lmac_evh_unregister(void *cgxd, int lmac_id)
1370 {
1371 struct lmac *lmac;
1372 unsigned long flags;
1373 struct cgx *cgx = cgxd;
1374
1375 lmac = lmac_pdata(lmac_id, cgx);
1376 if (!lmac)
1377 return -ENODEV;
1378
1379 spin_lock_irqsave(&lmac->event_cb_lock, flags);
1380 lmac->event_cb.notify_link_chg = NULL;
1381 lmac->event_cb.data = NULL;
1382 spin_unlock_irqrestore(&lmac->event_cb_lock, flags);
1383
1384 return 0;
1385 }
1386
1387 int cgx_get_fwdata_base(u64 *base)
1388 {
1389 u64 req = 0, resp;
1390 struct cgx *cgx;
1391 int first_lmac;
1392 int err;
1393
1394 cgx = list_first_entry_or_null(&cgx_list, struct cgx, cgx_list);
1395 if (!cgx)
1396 return -ENXIO;
1397
1398 first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1399 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FWD_BASE, req);
1400 err = cgx_fwi_cmd_generic(req, &resp, cgx, first_lmac);
1401 if (!err)
1402 *base = FIELD_GET(RESP_FWD_BASE, resp);
1403
1404 return err;
1405 }
1406
1407 int cgx_set_link_mode(void *cgxd, struct cgx_set_link_mode_args args,
1408 int cgx_id, int lmac_id)
1409 {
1410 struct cgx *cgx = cgxd;
1411 u64 req = 0, resp;
1412
1413 if (!cgx)
1414 return -ENODEV;
1415
1416 if (args.mode)
1417 otx2_map_ethtool_link_modes(args.mode, &args);
1418 if (!args.speed && args.duplex && !args.an)
1419 return -EINVAL;
1420
1421 req = FIELD_SET(CMDREG_ID, CGX_CMD_MODE_CHANGE, req);
1422 req = FIELD_SET(CMDMODECHANGE_SPEED,
1423 cgx_link_usertable_index_map(args.speed), req);
1424 req = FIELD_SET(CMDMODECHANGE_DUPLEX, args.duplex, req);
1425 req = FIELD_SET(CMDMODECHANGE_AN, args.an, req);
1426 req = FIELD_SET(CMDMODECHANGE_PORT, args.ports, req);
1427 req = FIELD_SET(CMDMODECHANGE_FLAGS, args.mode, req);
1428
1429 return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1430 }
1431 int cgx_set_fec(u64 fec, int cgx_id, int lmac_id)
1432 {
1433 u64 req = 0, resp;
1434 struct cgx *cgx;
1435 int err = 0;
1436
1437 cgx = cgx_get_pdata(cgx_id);
1438 if (!cgx)
1439 return -ENXIO;
1440
1441 req = FIELD_SET(CMDREG_ID, CGX_CMD_SET_FEC, req);
1442 req = FIELD_SET(CMDSETFEC, fec, req);
1443 err = cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1444 if (err)
1445 return err;
1446
1447 cgx->lmac_idmap[lmac_id]->link_info.fec =
1448 FIELD_GET(RESP_LINKSTAT_FEC, resp);
1449 return cgx->lmac_idmap[lmac_id]->link_info.fec;
1450 }
1451
1452 int cgx_get_phy_fec_stats(void *cgxd, int lmac_id)
1453 {
1454 struct cgx *cgx = cgxd;
1455 u64 req = 0, resp;
1456
1457 if (!cgx)
1458 return -ENODEV;
1459
1460 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_PHY_FEC_STATS, req);
1461 return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1462 }
1463
1464 static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable)
1465 {
1466 u64 req = 0;
1467 u64 resp;
1468
1469 if (enable) {
1470 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req);
1471
1472
1473
1474
1475
1476 if (!is_dev_rpm(cgx))
1477 req = FIELD_SET(LINKCFG_TIMEOUT, 1000, req);
1478
1479 } else {
1480 req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req);
1481 }
1482 return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id);
1483 }
1484
1485 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
1486 {
1487 int first_lmac = find_first_bit(&cgx->lmac_bmap, MAX_LMAC_PER_CGX);
1488 u64 req = 0;
1489
1490 req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
1491 return cgx_fwi_cmd_generic(req, resp, cgx, first_lmac);
1492 }
1493
1494 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
1495 {
1496 struct device *dev = &cgx->pdev->dev;
1497 int major_ver, minor_ver;
1498 u64 resp;
1499 int err;
1500
1501 if (!cgx->lmac_count)
1502 return 0;
1503
1504 err = cgx_fwi_read_version(&resp, cgx);
1505 if (err)
1506 return err;
1507
1508 major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
1509 minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
1510 dev_dbg(dev, "Firmware command interface version = %d.%d\n",
1511 major_ver, minor_ver);
1512 if (major_ver != CGX_FIRMWARE_MAJOR_VER)
1513 return -EIO;
1514 else
1515 return 0;
1516 }
1517
1518 static void cgx_lmac_linkup_work(struct work_struct *work)
1519 {
1520 struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work);
1521 struct device *dev = &cgx->pdev->dev;
1522 int i, err;
1523
1524
1525 for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1526 err = cgx_fwi_link_change(cgx, i, true);
1527 if (err)
1528 dev_info(dev, "cgx port %d:%d Link up command failed\n",
1529 cgx->cgx_id, i);
1530 }
1531 }
1532
1533 int cgx_lmac_linkup_start(void *cgxd)
1534 {
1535 struct cgx *cgx = cgxd;
1536
1537 if (!cgx)
1538 return -ENODEV;
1539
1540 queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work);
1541
1542 return 0;
1543 }
1544
1545 static void cgx_lmac_get_fifolen(struct cgx *cgx)
1546 {
1547 u64 cfg;
1548
1549 cfg = cgx_read(cgx, 0, CGX_CONST);
1550 cgx->mac_ops->fifo_len = FIELD_GET(CGX_CONST_RXFIFO_SIZE, cfg);
1551 }
1552
1553 static int cgx_configure_interrupt(struct cgx *cgx, struct lmac *lmac,
1554 int cnt, bool req_free)
1555 {
1556 struct mac_ops *mac_ops = cgx->mac_ops;
1557 u64 offset, ena_bit;
1558 unsigned int irq;
1559 int err;
1560
1561 irq = pci_irq_vector(cgx->pdev, mac_ops->lmac_fwi +
1562 cnt * mac_ops->irq_offset);
1563 offset = mac_ops->int_set_reg;
1564 ena_bit = mac_ops->int_ena_bit;
1565
1566 if (req_free) {
1567 free_irq(irq, lmac);
1568 return 0;
1569 }
1570
1571 err = request_irq(irq, cgx_fwi_event_handler, 0, lmac->name, lmac);
1572 if (err)
1573 return err;
1574
1575
1576 cgx_write(cgx, lmac->lmac_id, offset, ena_bit);
1577 return 0;
1578 }
1579
1580 int cgx_get_nr_lmacs(void *cgxd)
1581 {
1582 struct cgx *cgx = cgxd;
1583
1584 return cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7ULL;
1585 }
1586
1587 u8 cgx_get_lmacid(void *cgxd, u8 lmac_index)
1588 {
1589 struct cgx *cgx = cgxd;
1590
1591 return cgx->lmac_idmap[lmac_index]->lmac_id;
1592 }
1593
1594 unsigned long cgx_get_lmac_bmap(void *cgxd)
1595 {
1596 struct cgx *cgx = cgxd;
1597
1598 return cgx->lmac_bmap;
1599 }
1600
1601 static int cgx_lmac_init(struct cgx *cgx)
1602 {
1603 struct lmac *lmac;
1604 u64 lmac_list;
1605 int i, err;
1606
1607 cgx_lmac_get_fifolen(cgx);
1608
1609 cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1610
1611
1612
1613 if (cgx->mac_ops->non_contiguous_serdes_lane)
1614 lmac_list = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0xFULL;
1615
1616 if (cgx->lmac_count > MAX_LMAC_PER_CGX)
1617 cgx->lmac_count = MAX_LMAC_PER_CGX;
1618
1619 for (i = 0; i < cgx->lmac_count; i++) {
1620 lmac = kzalloc(sizeof(struct lmac), GFP_KERNEL);
1621 if (!lmac)
1622 return -ENOMEM;
1623 lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
1624 if (!lmac->name) {
1625 err = -ENOMEM;
1626 goto err_lmac_free;
1627 }
1628 sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
1629 if (cgx->mac_ops->non_contiguous_serdes_lane) {
1630 lmac->lmac_id = __ffs64(lmac_list);
1631 lmac_list &= ~BIT_ULL(lmac->lmac_id);
1632 } else {
1633 lmac->lmac_id = i;
1634 }
1635
1636 lmac->cgx = cgx;
1637 lmac->mac_to_index_bmap.max =
1638 MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
1639 err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
1640 if (err)
1641 goto err_name_free;
1642
1643
1644 set_bit(0, lmac->mac_to_index_bmap.bmap);
1645
1646 lmac->rx_fc_pfvf_bmap.max = 128;
1647 err = rvu_alloc_bitmap(&lmac->rx_fc_pfvf_bmap);
1648 if (err)
1649 goto err_dmac_bmap_free;
1650
1651 lmac->tx_fc_pfvf_bmap.max = 128;
1652 err = rvu_alloc_bitmap(&lmac->tx_fc_pfvf_bmap);
1653 if (err)
1654 goto err_rx_fc_bmap_free;
1655
1656 init_waitqueue_head(&lmac->wq_cmd_cmplt);
1657 mutex_init(&lmac->cmd_lock);
1658 spin_lock_init(&lmac->event_cb_lock);
1659 err = cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, false);
1660 if (err)
1661 goto err_bitmap_free;
1662
1663
1664 cgx->lmac_idmap[lmac->lmac_id] = lmac;
1665 set_bit(lmac->lmac_id, &cgx->lmac_bmap);
1666 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, true);
1667 }
1668
1669 return cgx_lmac_verify_fwi_version(cgx);
1670
1671 err_bitmap_free:
1672 rvu_free_bitmap(&lmac->tx_fc_pfvf_bmap);
1673 err_rx_fc_bmap_free:
1674 rvu_free_bitmap(&lmac->rx_fc_pfvf_bmap);
1675 err_dmac_bmap_free:
1676 rvu_free_bitmap(&lmac->mac_to_index_bmap);
1677 err_name_free:
1678 kfree(lmac->name);
1679 err_lmac_free:
1680 kfree(lmac);
1681 return err;
1682 }
1683
1684 static int cgx_lmac_exit(struct cgx *cgx)
1685 {
1686 struct lmac *lmac;
1687 int i;
1688
1689 if (cgx->cgx_cmd_workq) {
1690 destroy_workqueue(cgx->cgx_cmd_workq);
1691 cgx->cgx_cmd_workq = NULL;
1692 }
1693
1694
1695 for_each_set_bit(i, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
1696 lmac = cgx->lmac_idmap[i];
1697 if (!lmac)
1698 continue;
1699 cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
1700 cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
1701 kfree(lmac->mac_to_index_bmap.bmap);
1702 kfree(lmac->name);
1703 kfree(lmac);
1704 }
1705
1706 return 0;
1707 }
1708
1709 static void cgx_populate_features(struct cgx *cgx)
1710 {
1711 if (is_dev_rpm(cgx))
1712 cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
1713 RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
1714 else
1715 cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 |
1716 RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
1717 }
1718
1719 static struct mac_ops cgx_mac_ops = {
1720 .name = "cgx",
1721 .csr_offset = 0,
1722 .lmac_offset = 18,
1723 .int_register = CGXX_CMRX_INT,
1724 .int_set_reg = CGXX_CMRX_INT_ENA_W1S,
1725 .irq_offset = 9,
1726 .int_ena_bit = FW_CGX_INT,
1727 .lmac_fwi = CGX_LMAC_FWI,
1728 .non_contiguous_serdes_lane = false,
1729 .rx_stats_cnt = 9,
1730 .tx_stats_cnt = 18,
1731 .get_nr_lmacs = cgx_get_nr_lmacs,
1732 .get_lmac_type = cgx_get_lmac_type,
1733 .lmac_fifo_len = cgx_get_lmac_fifo_len,
1734 .mac_lmac_intl_lbk = cgx_lmac_internal_loopback,
1735 .mac_get_rx_stats = cgx_get_rx_stats,
1736 .mac_get_tx_stats = cgx_get_tx_stats,
1737 .mac_enadis_rx_pause_fwding = cgx_lmac_enadis_rx_pause_fwding,
1738 .mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
1739 .mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
1740 .mac_pause_frm_config = cgx_lmac_pause_frm_config,
1741 .mac_enadis_ptp_config = cgx_lmac_ptp_config,
1742 .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
1743 .mac_tx_enable = cgx_lmac_tx_enable,
1744 .pfc_config = cgx_lmac_pfc_config,
1745 .mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
1746 };
1747
1748 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1749 {
1750 struct device *dev = &pdev->dev;
1751 struct cgx *cgx;
1752 int err, nvec;
1753
1754 cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
1755 if (!cgx)
1756 return -ENOMEM;
1757 cgx->pdev = pdev;
1758
1759 pci_set_drvdata(pdev, cgx);
1760
1761
1762 if (pdev->device == PCI_DEVID_CN10K_RPM)
1763 cgx->mac_ops = rpm_get_mac_ops();
1764 else
1765 cgx->mac_ops = &cgx_mac_ops;
1766
1767 err = pci_enable_device(pdev);
1768 if (err) {
1769 dev_err(dev, "Failed to enable PCI device\n");
1770 pci_set_drvdata(pdev, NULL);
1771 return err;
1772 }
1773
1774 err = pci_request_regions(pdev, DRV_NAME);
1775 if (err) {
1776 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1777 goto err_disable_device;
1778 }
1779
1780
1781 cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1782 if (!cgx->reg_base) {
1783 dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
1784 err = -ENOMEM;
1785 goto err_release_regions;
1786 }
1787
1788 cgx->lmac_count = cgx->mac_ops->get_nr_lmacs(cgx);
1789 if (!cgx->lmac_count) {
1790 dev_notice(dev, "CGX %d LMAC count is zero, skipping probe\n", cgx->cgx_id);
1791 err = -EOPNOTSUPP;
1792 goto err_release_regions;
1793 }
1794
1795 nvec = pci_msix_vec_count(cgx->pdev);
1796 err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1797 if (err < 0 || err != nvec) {
1798 dev_err(dev, "Request for %d msix vectors failed, err %d\n",
1799 nvec, err);
1800 goto err_release_regions;
1801 }
1802
1803 cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
1804 & CGX_ID_MASK;
1805
1806
1807 INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work);
1808 cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0);
1809 if (!cgx->cgx_cmd_workq) {
1810 dev_err(dev, "alloc workqueue failed for cgx cmd");
1811 err = -ENOMEM;
1812 goto err_free_irq_vectors;
1813 }
1814
1815 list_add(&cgx->cgx_list, &cgx_list);
1816
1817
1818 cgx_populate_features(cgx);
1819
1820 mutex_init(&cgx->lock);
1821
1822 err = cgx_lmac_init(cgx);
1823 if (err)
1824 goto err_release_lmac;
1825
1826 return 0;
1827
1828 err_release_lmac:
1829 cgx_lmac_exit(cgx);
1830 list_del(&cgx->cgx_list);
1831 err_free_irq_vectors:
1832 pci_free_irq_vectors(pdev);
1833 err_release_regions:
1834 pci_release_regions(pdev);
1835 err_disable_device:
1836 pci_disable_device(pdev);
1837 pci_set_drvdata(pdev, NULL);
1838 return err;
1839 }
1840
1841 static void cgx_remove(struct pci_dev *pdev)
1842 {
1843 struct cgx *cgx = pci_get_drvdata(pdev);
1844
1845 if (cgx) {
1846 cgx_lmac_exit(cgx);
1847 list_del(&cgx->cgx_list);
1848 }
1849 pci_free_irq_vectors(pdev);
1850 pci_release_regions(pdev);
1851 pci_disable_device(pdev);
1852 pci_set_drvdata(pdev, NULL);
1853 }
1854
1855 struct pci_driver cgx_driver = {
1856 .name = DRV_NAME,
1857 .id_table = cgx_id_table,
1858 .probe = cgx_probe,
1859 .remove = cgx_remove,
1860 };