0001
0002
0003
0004
0005
0006
0007
0008 #include <linux/module.h>
0009 #include <linux/interrupt.h>
0010 #include <linux/pci.h>
0011 #include <linux/etherdevice.h>
0012 #include <linux/of.h>
0013 #include <linux/if_vlan.h>
0014 #include <linux/iommu.h>
0015 #include <net/ip.h>
0016 #include <linux/bpf.h>
0017 #include <linux/bpf_trace.h>
0018
0019 #include "otx2_reg.h"
0020 #include "otx2_common.h"
0021 #include "otx2_txrx.h"
0022 #include "otx2_struct.h"
0023 #include "otx2_ptp.h"
0024 #include "cn10k.h"
0025 #include <rvu_trace.h>
0026
0027 #define DRV_NAME "rvu_nicpf"
0028 #define DRV_STRING "Marvell RVU NIC Physical Function Driver"
0029
0030
0031 static const struct pci_device_id otx2_pf_id_table[] = {
0032 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_RVU_PF) },
0033 { 0, }
0034 };
0035
0036 MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
0037 MODULE_DESCRIPTION(DRV_STRING);
0038 MODULE_LICENSE("GPL v2");
0039 MODULE_DEVICE_TABLE(pci, otx2_pf_id_table);
0040
0041 static void otx2_vf_link_event_task(struct work_struct *work);
0042
0043 enum {
0044 TYPE_PFAF,
0045 TYPE_PFVF,
0046 };
0047
0048 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
0049 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
0050
0051 static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
0052 {
0053 struct otx2_nic *pf = netdev_priv(netdev);
0054 bool if_up = netif_running(netdev);
0055 int err = 0;
0056
0057 if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
0058 netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
0059 netdev->mtu);
0060 return -EINVAL;
0061 }
0062 if (if_up)
0063 otx2_stop(netdev);
0064
0065 netdev_info(netdev, "Changing MTU from %d to %d\n",
0066 netdev->mtu, new_mtu);
0067 netdev->mtu = new_mtu;
0068
0069 if (if_up)
0070 err = otx2_open(netdev);
0071
0072 return err;
0073 }
0074
0075 static void otx2_disable_flr_me_intr(struct otx2_nic *pf)
0076 {
0077 int irq, vfs = pf->total_vfs;
0078
0079
0080 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
0081 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0);
0082 free_irq(irq, pf);
0083
0084
0085 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
0086 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0);
0087 free_irq(irq, pf);
0088
0089 if (vfs <= 64)
0090 return;
0091
0092 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
0093 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME1);
0094 free_irq(irq, pf);
0095
0096 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
0097 irq = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR1);
0098 free_irq(irq, pf);
0099 }
0100
0101 static void otx2_flr_wq_destroy(struct otx2_nic *pf)
0102 {
0103 if (!pf->flr_wq)
0104 return;
0105 destroy_workqueue(pf->flr_wq);
0106 pf->flr_wq = NULL;
0107 devm_kfree(pf->dev, pf->flr_wrk);
0108 }
0109
0110 static void otx2_flr_handler(struct work_struct *work)
0111 {
0112 struct flr_work *flrwork = container_of(work, struct flr_work, work);
0113 struct otx2_nic *pf = flrwork->pf;
0114 struct mbox *mbox = &pf->mbox;
0115 struct msg_req *req;
0116 int vf, reg = 0;
0117
0118 vf = flrwork - pf->flr_wrk;
0119
0120 mutex_lock(&mbox->lock);
0121 req = otx2_mbox_alloc_msg_vf_flr(mbox);
0122 if (!req) {
0123 mutex_unlock(&mbox->lock);
0124 return;
0125 }
0126 req->hdr.pcifunc &= RVU_PFVF_FUNC_MASK;
0127 req->hdr.pcifunc |= (vf + 1) & RVU_PFVF_FUNC_MASK;
0128
0129 if (!otx2_sync_mbox_msg(&pf->mbox)) {
0130 if (vf >= 64) {
0131 reg = 1;
0132 vf = vf - 64;
0133 }
0134
0135 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
0136 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf));
0137 }
0138
0139 mutex_unlock(&mbox->lock);
0140 }
0141
0142 static irqreturn_t otx2_pf_flr_intr_handler(int irq, void *pf_irq)
0143 {
0144 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
0145 int reg, dev, vf, start_vf, num_reg = 1;
0146 u64 intr;
0147
0148 if (pf->total_vfs > 64)
0149 num_reg = 2;
0150
0151 for (reg = 0; reg < num_reg; reg++) {
0152 intr = otx2_read64(pf, RVU_PF_VFFLR_INTX(reg));
0153 if (!intr)
0154 continue;
0155 start_vf = 64 * reg;
0156 for (vf = 0; vf < 64; vf++) {
0157 if (!(intr & BIT_ULL(vf)))
0158 continue;
0159 dev = vf + start_vf;
0160 queue_work(pf->flr_wq, &pf->flr_wrk[dev].work);
0161
0162 otx2_write64(pf, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf));
0163
0164 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1CX(reg),
0165 BIT_ULL(vf));
0166 }
0167 }
0168 return IRQ_HANDLED;
0169 }
0170
0171 static irqreturn_t otx2_pf_me_intr_handler(int irq, void *pf_irq)
0172 {
0173 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
0174 int vf, reg, num_reg = 1;
0175 u64 intr;
0176
0177 if (pf->total_vfs > 64)
0178 num_reg = 2;
0179
0180 for (reg = 0; reg < num_reg; reg++) {
0181 intr = otx2_read64(pf, RVU_PF_VFME_INTX(reg));
0182 if (!intr)
0183 continue;
0184 for (vf = 0; vf < 64; vf++) {
0185 if (!(intr & BIT_ULL(vf)))
0186 continue;
0187
0188 otx2_write64(pf, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf));
0189
0190 otx2_write64(pf, RVU_PF_VFME_INTX(reg), BIT_ULL(vf));
0191 }
0192 }
0193 return IRQ_HANDLED;
0194 }
0195
0196 static int otx2_register_flr_me_intr(struct otx2_nic *pf, int numvfs)
0197 {
0198 struct otx2_hw *hw = &pf->hw;
0199 char *irq_name;
0200 int ret;
0201
0202
0203 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME0 * NAME_SIZE];
0204 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME0", rvu_get_pf(pf->pcifunc));
0205 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFME0),
0206 otx2_pf_me_intr_handler, 0, irq_name, pf);
0207 if (ret) {
0208 dev_err(pf->dev,
0209 "RVUPF: IRQ registration failed for ME0\n");
0210 }
0211
0212
0213 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR0 * NAME_SIZE];
0214 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR0", rvu_get_pf(pf->pcifunc));
0215 ret = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFFLR0),
0216 otx2_pf_flr_intr_handler, 0, irq_name, pf);
0217 if (ret) {
0218 dev_err(pf->dev,
0219 "RVUPF: IRQ registration failed for FLR0\n");
0220 return ret;
0221 }
0222
0223 if (numvfs > 64) {
0224 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFME1 * NAME_SIZE];
0225 snprintf(irq_name, NAME_SIZE, "RVUPF%d_ME1",
0226 rvu_get_pf(pf->pcifunc));
0227 ret = request_irq(pci_irq_vector
0228 (pf->pdev, RVU_PF_INT_VEC_VFME1),
0229 otx2_pf_me_intr_handler, 0, irq_name, pf);
0230 if (ret) {
0231 dev_err(pf->dev,
0232 "RVUPF: IRQ registration failed for ME1\n");
0233 }
0234 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFFLR1 * NAME_SIZE];
0235 snprintf(irq_name, NAME_SIZE, "RVUPF%d_FLR1",
0236 rvu_get_pf(pf->pcifunc));
0237 ret = request_irq(pci_irq_vector
0238 (pf->pdev, RVU_PF_INT_VEC_VFFLR1),
0239 otx2_pf_flr_intr_handler, 0, irq_name, pf);
0240 if (ret) {
0241 dev_err(pf->dev,
0242 "RVUPF: IRQ registration failed for FLR1\n");
0243 return ret;
0244 }
0245 }
0246
0247
0248 otx2_write64(pf, RVU_PF_VFME_INTX(0), INTR_MASK(numvfs));
0249 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(numvfs));
0250
0251
0252 otx2_write64(pf, RVU_PF_VFFLR_INTX(0), INTR_MASK(numvfs));
0253 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(numvfs));
0254
0255 if (numvfs > 64) {
0256 numvfs -= 64;
0257
0258 otx2_write64(pf, RVU_PF_VFME_INTX(1), INTR_MASK(numvfs));
0259 otx2_write64(pf, RVU_PF_VFME_INT_ENA_W1SX(1),
0260 INTR_MASK(numvfs));
0261
0262 otx2_write64(pf, RVU_PF_VFFLR_INTX(1), INTR_MASK(numvfs));
0263 otx2_write64(pf, RVU_PF_VFFLR_INT_ENA_W1SX(1),
0264 INTR_MASK(numvfs));
0265 }
0266 return 0;
0267 }
0268
0269 static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
0270 {
0271 int vf;
0272
0273 pf->flr_wq = alloc_workqueue("otx2_pf_flr_wq",
0274 WQ_UNBOUND | WQ_HIGHPRI, 1);
0275 if (!pf->flr_wq)
0276 return -ENOMEM;
0277
0278 pf->flr_wrk = devm_kcalloc(pf->dev, num_vfs,
0279 sizeof(struct flr_work), GFP_KERNEL);
0280 if (!pf->flr_wrk) {
0281 destroy_workqueue(pf->flr_wq);
0282 return -ENOMEM;
0283 }
0284
0285 for (vf = 0; vf < num_vfs; vf++) {
0286 pf->flr_wrk[vf].pf = pf;
0287 INIT_WORK(&pf->flr_wrk[vf].work, otx2_flr_handler);
0288 }
0289
0290 return 0;
0291 }
0292
0293 static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
0294 int first, int mdevs, u64 intr, int type)
0295 {
0296 struct otx2_mbox_dev *mdev;
0297 struct otx2_mbox *mbox;
0298 struct mbox_hdr *hdr;
0299 int i;
0300
0301 for (i = first; i < mdevs; i++) {
0302
0303 if (!(intr & BIT_ULL(i - first)))
0304 continue;
0305
0306 mbox = &mw->mbox;
0307 mdev = &mbox->dev[i];
0308 if (type == TYPE_PFAF)
0309 otx2_sync_mbox_bbuf(mbox, i);
0310 hdr = mdev->mbase + mbox->rx_start;
0311
0312
0313
0314
0315
0316
0317
0318 if (hdr->num_msgs) {
0319 mw[i].num_msgs = hdr->num_msgs;
0320 hdr->num_msgs = 0;
0321 if (type == TYPE_PFAF)
0322 memset(mbox->hwbase + mbox->rx_start, 0,
0323 ALIGN(sizeof(struct mbox_hdr),
0324 sizeof(u64)));
0325
0326 queue_work(mbox_wq, &mw[i].mbox_wrk);
0327 }
0328
0329 mbox = &mw->mbox_up;
0330 mdev = &mbox->dev[i];
0331 if (type == TYPE_PFAF)
0332 otx2_sync_mbox_bbuf(mbox, i);
0333 hdr = mdev->mbase + mbox->rx_start;
0334 if (hdr->num_msgs) {
0335 mw[i].up_num_msgs = hdr->num_msgs;
0336 hdr->num_msgs = 0;
0337 if (type == TYPE_PFAF)
0338 memset(mbox->hwbase + mbox->rx_start, 0,
0339 ALIGN(sizeof(struct mbox_hdr),
0340 sizeof(u64)));
0341
0342 queue_work(mbox_wq, &mw[i].mbox_up_wrk);
0343 }
0344 }
0345 }
0346
0347 static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
0348 struct otx2_mbox *pfvf_mbox, void *bbuf_base,
0349 int devid)
0350 {
0351 struct otx2_mbox_dev *src_mdev = mdev;
0352 int offset;
0353
0354
0355 smp_wmb();
0356
0357 offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
0358 writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
0359
0360
0361 src_mdev->mbase = bbuf_base;
0362 }
0363
0364 static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
0365 struct otx2_mbox *src_mbox,
0366 int dir, int vf, int num_msgs)
0367 {
0368 struct otx2_mbox_dev *src_mdev, *dst_mdev;
0369 struct mbox_hdr *mbox_hdr;
0370 struct mbox_hdr *req_hdr;
0371 struct mbox *dst_mbox;
0372 int dst_size, err;
0373
0374 if (dir == MBOX_DIR_PFAF) {
0375
0376
0377
0378
0379 src_mdev = &src_mbox->dev[vf];
0380 mbox_hdr = src_mbox->hwbase +
0381 src_mbox->rx_start + (vf * MBOX_SIZE);
0382
0383 dst_mbox = &pf->mbox;
0384 dst_size = dst_mbox->mbox.tx_size -
0385 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
0386
0387 if (mbox_hdr->msg_size > dst_size || !mbox_hdr->msg_size)
0388 return -EINVAL;
0389
0390 dst_mdev = &dst_mbox->mbox.dev[0];
0391
0392 mutex_lock(&pf->mbox.lock);
0393 dst_mdev->mbase = src_mdev->mbase;
0394 dst_mdev->msg_size = mbox_hdr->msg_size;
0395 dst_mdev->num_msgs = num_msgs;
0396 err = otx2_sync_mbox_msg(dst_mbox);
0397
0398
0399
0400
0401
0402 if (err == -EIO) {
0403 dev_warn(pf->dev,
0404 "AF not responding to VF%d messages\n", vf);
0405
0406 dst_mdev->mbase = pf->mbox.bbuf_base;
0407 mutex_unlock(&pf->mbox.lock);
0408 return err;
0409 }
0410
0411
0412
0413
0414 req_hdr = (struct mbox_hdr *)(dst_mdev->mbase +
0415 dst_mbox->mbox.rx_start);
0416 req_hdr->num_msgs = num_msgs;
0417
0418 otx2_forward_msg_pfvf(dst_mdev, &pf->mbox_pfvf[0].mbox,
0419 pf->mbox.bbuf_base, vf);
0420 mutex_unlock(&pf->mbox.lock);
0421 } else if (dir == MBOX_DIR_PFVF_UP) {
0422 src_mdev = &src_mbox->dev[0];
0423 mbox_hdr = src_mbox->hwbase + src_mbox->rx_start;
0424 req_hdr = (struct mbox_hdr *)(src_mdev->mbase +
0425 src_mbox->rx_start);
0426 req_hdr->num_msgs = num_msgs;
0427
0428 dst_mbox = &pf->mbox_pfvf[0];
0429 dst_size = dst_mbox->mbox_up.tx_size -
0430 ALIGN(sizeof(*mbox_hdr), MBOX_MSG_ALIGN);
0431
0432 if (mbox_hdr->msg_size > dst_size)
0433 return -EINVAL;
0434
0435 dst_mdev = &dst_mbox->mbox_up.dev[vf];
0436 dst_mdev->mbase = src_mdev->mbase;
0437 dst_mdev->msg_size = mbox_hdr->msg_size;
0438 dst_mdev->num_msgs = mbox_hdr->num_msgs;
0439 err = otx2_sync_mbox_up_msg(dst_mbox, vf);
0440 if (err) {
0441 dev_warn(pf->dev,
0442 "VF%d is not responding to mailbox\n", vf);
0443 return err;
0444 }
0445 } else if (dir == MBOX_DIR_VFPF_UP) {
0446 req_hdr = (struct mbox_hdr *)(src_mbox->dev[0].mbase +
0447 src_mbox->rx_start);
0448 req_hdr->num_msgs = num_msgs;
0449 otx2_forward_msg_pfvf(&pf->mbox_pfvf->mbox_up.dev[vf],
0450 &pf->mbox.mbox_up,
0451 pf->mbox_pfvf[vf].bbuf_base,
0452 0);
0453 }
0454
0455 return 0;
0456 }
0457
0458 static void otx2_pfvf_mbox_handler(struct work_struct *work)
0459 {
0460 struct mbox_msghdr *msg = NULL;
0461 int offset, vf_idx, id, err;
0462 struct otx2_mbox_dev *mdev;
0463 struct mbox_hdr *req_hdr;
0464 struct otx2_mbox *mbox;
0465 struct mbox *vf_mbox;
0466 struct otx2_nic *pf;
0467
0468 vf_mbox = container_of(work, struct mbox, mbox_wrk);
0469 pf = vf_mbox->pfvf;
0470 vf_idx = vf_mbox - pf->mbox_pfvf;
0471
0472 mbox = &pf->mbox_pfvf[0].mbox;
0473 mdev = &mbox->dev[vf_idx];
0474 req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
0475
0476 offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
0477
0478 for (id = 0; id < vf_mbox->num_msgs; id++) {
0479 msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
0480 offset);
0481
0482 if (msg->sig != OTX2_MBOX_REQ_SIG)
0483 goto inval_msg;
0484
0485
0486 msg->pcifunc &= RVU_PFVF_FUNC_MASK;
0487 msg->pcifunc |= (vf_idx + 1) & RVU_PFVF_FUNC_MASK;
0488 offset = msg->next_msgoff;
0489 }
0490 err = otx2_forward_vf_mbox_msgs(pf, mbox, MBOX_DIR_PFAF, vf_idx,
0491 vf_mbox->num_msgs);
0492 if (err)
0493 goto inval_msg;
0494 return;
0495
0496 inval_msg:
0497 otx2_reply_invalid_msg(mbox, vf_idx, 0, msg->id);
0498 otx2_mbox_msg_send(mbox, vf_idx);
0499 }
0500
0501 static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
0502 {
0503 struct mbox *vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
0504 struct otx2_nic *pf = vf_mbox->pfvf;
0505 struct otx2_mbox_dev *mdev;
0506 int offset, id, vf_idx = 0;
0507 struct mbox_hdr *rsp_hdr;
0508 struct mbox_msghdr *msg;
0509 struct otx2_mbox *mbox;
0510
0511 vf_idx = vf_mbox - pf->mbox_pfvf;
0512 mbox = &pf->mbox_pfvf[0].mbox_up;
0513 mdev = &mbox->dev[vf_idx];
0514
0515 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
0516 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
0517
0518 for (id = 0; id < vf_mbox->up_num_msgs; id++) {
0519 msg = mdev->mbase + offset;
0520
0521 if (msg->id >= MBOX_MSG_MAX) {
0522 dev_err(pf->dev,
0523 "Mbox msg with unknown ID 0x%x\n", msg->id);
0524 goto end;
0525 }
0526
0527 if (msg->sig != OTX2_MBOX_RSP_SIG) {
0528 dev_err(pf->dev,
0529 "Mbox msg with wrong signature %x, ID 0x%x\n",
0530 msg->sig, msg->id);
0531 goto end;
0532 }
0533
0534 switch (msg->id) {
0535 case MBOX_MSG_CGX_LINK_EVENT:
0536 break;
0537 default:
0538 if (msg->rc)
0539 dev_err(pf->dev,
0540 "Mbox msg response has err %d, ID 0x%x\n",
0541 msg->rc, msg->id);
0542 break;
0543 }
0544
0545 end:
0546 offset = mbox->rx_start + msg->next_msgoff;
0547 if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
0548 __otx2_mbox_reset(mbox, 0);
0549 mdev->msgs_acked++;
0550 }
0551 }
0552
0553 static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
0554 {
0555 struct otx2_nic *pf = (struct otx2_nic *)(pf_irq);
0556 int vfs = pf->total_vfs;
0557 struct mbox *mbox;
0558 u64 intr;
0559
0560 mbox = pf->mbox_pfvf;
0561
0562 if (vfs > 64) {
0563 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
0564 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
0565 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
0566 TYPE_PFVF);
0567 vfs -= 64;
0568 }
0569
0570 intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
0571 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
0572
0573 otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
0574
0575 trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
0576
0577 return IRQ_HANDLED;
0578 }
0579
0580 static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
0581 {
0582 void __iomem *hwbase;
0583 struct mbox *mbox;
0584 int err, vf;
0585 u64 base;
0586
0587 if (!numvfs)
0588 return -EINVAL;
0589
0590 pf->mbox_pfvf = devm_kcalloc(&pf->pdev->dev, numvfs,
0591 sizeof(struct mbox), GFP_KERNEL);
0592 if (!pf->mbox_pfvf)
0593 return -ENOMEM;
0594
0595 pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
0596 WQ_UNBOUND | WQ_HIGHPRI |
0597 WQ_MEM_RECLAIM, 1);
0598 if (!pf->mbox_pfvf_wq)
0599 return -ENOMEM;
0600
0601
0602
0603
0604 if (test_bit(CN10K_MBOX, &pf->hw.cap_flag))
0605 base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
0606 MBOX_SIZE;
0607 else
0608 base = readq((void __iomem *)((u64)pf->reg_base +
0609 RVU_PF_VF_BAR4_ADDR));
0610
0611 hwbase = ioremap_wc(base, MBOX_SIZE * pf->total_vfs);
0612 if (!hwbase) {
0613 err = -ENOMEM;
0614 goto free_wq;
0615 }
0616
0617 mbox = &pf->mbox_pfvf[0];
0618 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
0619 MBOX_DIR_PFVF, numvfs);
0620 if (err)
0621 goto free_iomem;
0622
0623 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
0624 MBOX_DIR_PFVF_UP, numvfs);
0625 if (err)
0626 goto free_iomem;
0627
0628 for (vf = 0; vf < numvfs; vf++) {
0629 mbox->pfvf = pf;
0630 INIT_WORK(&mbox->mbox_wrk, otx2_pfvf_mbox_handler);
0631 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfvf_mbox_up_handler);
0632 mbox++;
0633 }
0634
0635 return 0;
0636
0637 free_iomem:
0638 if (hwbase)
0639 iounmap(hwbase);
0640 free_wq:
0641 destroy_workqueue(pf->mbox_pfvf_wq);
0642 return err;
0643 }
0644
0645 static void otx2_pfvf_mbox_destroy(struct otx2_nic *pf)
0646 {
0647 struct mbox *mbox = &pf->mbox_pfvf[0];
0648
0649 if (!mbox)
0650 return;
0651
0652 if (pf->mbox_pfvf_wq) {
0653 destroy_workqueue(pf->mbox_pfvf_wq);
0654 pf->mbox_pfvf_wq = NULL;
0655 }
0656
0657 if (mbox->mbox.hwbase)
0658 iounmap(mbox->mbox.hwbase);
0659
0660 otx2_mbox_destroy(&mbox->mbox);
0661 }
0662
0663 static void otx2_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
0664 {
0665
0666 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
0667 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
0668
0669
0670 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(numvfs));
0671 if (numvfs > 64) {
0672 numvfs -= 64;
0673 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1),
0674 INTR_MASK(numvfs));
0675 }
0676 }
0677
0678 static void otx2_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
0679 {
0680 int vector;
0681
0682
0683 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), ~0ull);
0684 otx2_write64(pf, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), ~0ull);
0685
0686 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), ~0ull);
0687 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0);
0688 free_irq(vector, pf);
0689
0690 if (numvfs > 64) {
0691 otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), ~0ull);
0692 vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX1);
0693 free_irq(vector, pf);
0694 }
0695 }
0696
0697 static int otx2_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
0698 {
0699 struct otx2_hw *hw = &pf->hw;
0700 char *irq_name;
0701 int err;
0702
0703
0704 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX0 * NAME_SIZE];
0705 if (pf->pcifunc)
0706 snprintf(irq_name, NAME_SIZE,
0707 "RVUPF%d_VF Mbox0", rvu_get_pf(pf->pcifunc));
0708 else
0709 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox0");
0710 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_VFPF_MBOX0),
0711 otx2_pfvf_mbox_intr_handler, 0, irq_name, pf);
0712 if (err) {
0713 dev_err(pf->dev,
0714 "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
0715 return err;
0716 }
0717
0718 if (numvfs > 64) {
0719
0720 irq_name = &hw->irq_name[RVU_PF_INT_VEC_VFPF_MBOX1 * NAME_SIZE];
0721 if (pf->pcifunc)
0722 snprintf(irq_name, NAME_SIZE,
0723 "RVUPF%d_VF Mbox1", rvu_get_pf(pf->pcifunc));
0724 else
0725 snprintf(irq_name, NAME_SIZE, "RVUPF_VF Mbox1");
0726 err = request_irq(pci_irq_vector(pf->pdev,
0727 RVU_PF_INT_VEC_VFPF_MBOX1),
0728 otx2_pfvf_mbox_intr_handler,
0729 0, irq_name, pf);
0730 if (err) {
0731 dev_err(pf->dev,
0732 "RVUPF: IRQ registration failed for PFVF mbox1 irq\n");
0733 return err;
0734 }
0735 }
0736
0737 otx2_enable_pfvf_mbox_intr(pf, numvfs);
0738
0739 return 0;
0740 }
0741
0742 static void otx2_process_pfaf_mbox_msg(struct otx2_nic *pf,
0743 struct mbox_msghdr *msg)
0744 {
0745 int devid;
0746
0747 if (msg->id >= MBOX_MSG_MAX) {
0748 dev_err(pf->dev,
0749 "Mbox msg with unknown ID 0x%x\n", msg->id);
0750 return;
0751 }
0752
0753 if (msg->sig != OTX2_MBOX_RSP_SIG) {
0754 dev_err(pf->dev,
0755 "Mbox msg with wrong signature %x, ID 0x%x\n",
0756 msg->sig, msg->id);
0757 return;
0758 }
0759
0760
0761 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
0762 if (devid) {
0763 struct otx2_vf_config *config = &pf->vf_configs[devid - 1];
0764 struct delayed_work *dwork;
0765
0766 switch (msg->id) {
0767 case MBOX_MSG_NIX_LF_START_RX:
0768 config->intf_down = false;
0769 dwork = &config->link_event_work;
0770 schedule_delayed_work(dwork, msecs_to_jiffies(100));
0771 break;
0772 case MBOX_MSG_NIX_LF_STOP_RX:
0773 config->intf_down = true;
0774 break;
0775 }
0776
0777 return;
0778 }
0779
0780 switch (msg->id) {
0781 case MBOX_MSG_READY:
0782 pf->pcifunc = msg->pcifunc;
0783 break;
0784 case MBOX_MSG_MSIX_OFFSET:
0785 mbox_handler_msix_offset(pf, (struct msix_offset_rsp *)msg);
0786 break;
0787 case MBOX_MSG_NPA_LF_ALLOC:
0788 mbox_handler_npa_lf_alloc(pf, (struct npa_lf_alloc_rsp *)msg);
0789 break;
0790 case MBOX_MSG_NIX_LF_ALLOC:
0791 mbox_handler_nix_lf_alloc(pf, (struct nix_lf_alloc_rsp *)msg);
0792 break;
0793 case MBOX_MSG_NIX_TXSCH_ALLOC:
0794 mbox_handler_nix_txsch_alloc(pf,
0795 (struct nix_txsch_alloc_rsp *)msg);
0796 break;
0797 case MBOX_MSG_NIX_BP_ENABLE:
0798 mbox_handler_nix_bp_enable(pf, (struct nix_bp_cfg_rsp *)msg);
0799 break;
0800 case MBOX_MSG_CGX_STATS:
0801 mbox_handler_cgx_stats(pf, (struct cgx_stats_rsp *)msg);
0802 break;
0803 case MBOX_MSG_CGX_FEC_STATS:
0804 mbox_handler_cgx_fec_stats(pf, (struct cgx_fec_stats_rsp *)msg);
0805 break;
0806 default:
0807 if (msg->rc)
0808 dev_err(pf->dev,
0809 "Mbox msg response has err %d, ID 0x%x\n",
0810 msg->rc, msg->id);
0811 break;
0812 }
0813 }
0814
0815 static void otx2_pfaf_mbox_handler(struct work_struct *work)
0816 {
0817 struct otx2_mbox_dev *mdev;
0818 struct mbox_hdr *rsp_hdr;
0819 struct mbox_msghdr *msg;
0820 struct otx2_mbox *mbox;
0821 struct mbox *af_mbox;
0822 struct otx2_nic *pf;
0823 int offset, id;
0824
0825 af_mbox = container_of(work, struct mbox, mbox_wrk);
0826 mbox = &af_mbox->mbox;
0827 mdev = &mbox->dev[0];
0828 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
0829
0830 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
0831 pf = af_mbox->pfvf;
0832
0833 for (id = 0; id < af_mbox->num_msgs; id++) {
0834 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
0835 otx2_process_pfaf_mbox_msg(pf, msg);
0836 offset = mbox->rx_start + msg->next_msgoff;
0837 if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
0838 __otx2_mbox_reset(mbox, 0);
0839 mdev->msgs_acked++;
0840 }
0841
0842 }
0843
0844 static void otx2_handle_link_event(struct otx2_nic *pf)
0845 {
0846 struct cgx_link_user_info *linfo = &pf->linfo;
0847 struct net_device *netdev = pf->netdev;
0848
0849 pr_info("%s NIC Link is %s %d Mbps %s duplex\n", netdev->name,
0850 linfo->link_up ? "UP" : "DOWN", linfo->speed,
0851 linfo->full_duplex ? "Full" : "Half");
0852 if (linfo->link_up) {
0853 netif_carrier_on(netdev);
0854 netif_tx_start_all_queues(netdev);
0855 } else {
0856 netif_tx_stop_all_queues(netdev);
0857 netif_carrier_off(netdev);
0858 }
0859 }
0860
0861 int otx2_mbox_up_handler_cgx_link_event(struct otx2_nic *pf,
0862 struct cgx_link_info_msg *msg,
0863 struct msg_rsp *rsp)
0864 {
0865 int i;
0866
0867
0868 pf->linfo = msg->link_info;
0869
0870
0871 for (i = 0; i < pci_num_vf(pf->pdev); i++) {
0872 struct otx2_vf_config *config = &pf->vf_configs[i];
0873 struct delayed_work *dwork = &config->link_event_work;
0874
0875 if (config->intf_down)
0876 continue;
0877
0878 schedule_delayed_work(dwork, msecs_to_jiffies(100));
0879 }
0880
0881
0882 if (pf->flags & OTX2_FLAG_INTF_DOWN)
0883 return 0;
0884
0885 otx2_handle_link_event(pf);
0886 return 0;
0887 }
0888
0889 static int otx2_process_mbox_msg_up(struct otx2_nic *pf,
0890 struct mbox_msghdr *req)
0891 {
0892
0893 if (req->sig != OTX2_MBOX_REQ_SIG) {
0894 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
0895 return -ENODEV;
0896 }
0897
0898 switch (req->id) {
0899 #define M(_name, _id, _fn_name, _req_type, _rsp_type) \
0900 case _id: { \
0901 struct _rsp_type *rsp; \
0902 int err; \
0903 \
0904 rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \
0905 &pf->mbox.mbox_up, 0, \
0906 sizeof(struct _rsp_type)); \
0907 if (!rsp) \
0908 return -ENOMEM; \
0909 \
0910 rsp->hdr.id = _id; \
0911 rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \
0912 rsp->hdr.pcifunc = 0; \
0913 rsp->hdr.rc = 0; \
0914 \
0915 err = otx2_mbox_up_handler_ ## _fn_name( \
0916 pf, (struct _req_type *)req, rsp); \
0917 return err; \
0918 }
0919 MBOX_UP_CGX_MESSAGES
0920 #undef M
0921 break;
0922 default:
0923 otx2_reply_invalid_msg(&pf->mbox.mbox_up, 0, 0, req->id);
0924 return -ENODEV;
0925 }
0926 return 0;
0927 }
0928
0929 static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
0930 {
0931 struct mbox *af_mbox = container_of(work, struct mbox, mbox_up_wrk);
0932 struct otx2_mbox *mbox = &af_mbox->mbox_up;
0933 struct otx2_mbox_dev *mdev = &mbox->dev[0];
0934 struct otx2_nic *pf = af_mbox->pfvf;
0935 int offset, id, devid = 0;
0936 struct mbox_hdr *rsp_hdr;
0937 struct mbox_msghdr *msg;
0938
0939 rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
0940
0941 offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
0942
0943 for (id = 0; id < af_mbox->up_num_msgs; id++) {
0944 msg = (struct mbox_msghdr *)(mdev->mbase + offset);
0945
0946 devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
0947
0948 if (!devid)
0949 otx2_process_mbox_msg_up(pf, msg);
0950 offset = mbox->rx_start + msg->next_msgoff;
0951 }
0952 if (devid) {
0953 otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
0954 MBOX_DIR_PFVF_UP, devid - 1,
0955 af_mbox->up_num_msgs);
0956 return;
0957 }
0958
0959 otx2_mbox_msg_send(mbox, 0);
0960 }
0961
0962 static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
0963 {
0964 struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
0965 struct mbox *mbox;
0966
0967
0968 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
0969
0970 mbox = &pf->mbox;
0971
0972 trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
0973
0974 otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
0975
0976 return IRQ_HANDLED;
0977 }
0978
0979 static void otx2_disable_mbox_intr(struct otx2_nic *pf)
0980 {
0981 int vector = pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX);
0982
0983
0984 otx2_write64(pf, RVU_PF_INT_ENA_W1C, BIT_ULL(0));
0985 free_irq(vector, pf);
0986 }
0987
0988 static int otx2_register_mbox_intr(struct otx2_nic *pf, bool probe_af)
0989 {
0990 struct otx2_hw *hw = &pf->hw;
0991 struct msg_req *req;
0992 char *irq_name;
0993 int err;
0994
0995
0996 irq_name = &hw->irq_name[RVU_PF_INT_VEC_AFPF_MBOX * NAME_SIZE];
0997 snprintf(irq_name, NAME_SIZE, "RVUPFAF Mbox");
0998 err = request_irq(pci_irq_vector(pf->pdev, RVU_PF_INT_VEC_AFPF_MBOX),
0999 otx2_pfaf_mbox_intr_handler, 0, irq_name, pf);
1000 if (err) {
1001 dev_err(pf->dev,
1002 "RVUPF: IRQ registration failed for PFAF mbox irq\n");
1003 return err;
1004 }
1005
1006
1007
1008
1009 otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
1010 otx2_write64(pf, RVU_PF_INT_ENA_W1S, BIT_ULL(0));
1011
1012 if (!probe_af)
1013 return 0;
1014
1015
1016 req = otx2_mbox_alloc_msg_ready(&pf->mbox);
1017 if (!req) {
1018 otx2_disable_mbox_intr(pf);
1019 return -ENOMEM;
1020 }
1021 err = otx2_sync_mbox_msg(&pf->mbox);
1022 if (err) {
1023 dev_warn(pf->dev,
1024 "AF not responding to mailbox, deferring probe\n");
1025 otx2_disable_mbox_intr(pf);
1026 return -EPROBE_DEFER;
1027 }
1028
1029 return 0;
1030 }
1031
1032 static void otx2_pfaf_mbox_destroy(struct otx2_nic *pf)
1033 {
1034 struct mbox *mbox = &pf->mbox;
1035
1036 if (pf->mbox_wq) {
1037 destroy_workqueue(pf->mbox_wq);
1038 pf->mbox_wq = NULL;
1039 }
1040
1041 if (mbox->mbox.hwbase)
1042 iounmap((void __iomem *)mbox->mbox.hwbase);
1043
1044 otx2_mbox_destroy(&mbox->mbox);
1045 otx2_mbox_destroy(&mbox->mbox_up);
1046 }
1047
1048 static int otx2_pfaf_mbox_init(struct otx2_nic *pf)
1049 {
1050 struct mbox *mbox = &pf->mbox;
1051 void __iomem *hwbase;
1052 int err;
1053
1054 mbox->pfvf = pf;
1055 pf->mbox_wq = alloc_workqueue("otx2_pfaf_mailbox",
1056 WQ_UNBOUND | WQ_HIGHPRI |
1057 WQ_MEM_RECLAIM, 1);
1058 if (!pf->mbox_wq)
1059 return -ENOMEM;
1060
1061
1062
1063
1064
1065 hwbase = ioremap_wc(pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM),
1066 MBOX_SIZE);
1067 if (!hwbase) {
1068 dev_err(pf->dev, "Unable to map PFAF mailbox region\n");
1069 err = -ENOMEM;
1070 goto exit;
1071 }
1072
1073 err = otx2_mbox_init(&mbox->mbox, hwbase, pf->pdev, pf->reg_base,
1074 MBOX_DIR_PFAF, 1);
1075 if (err)
1076 goto exit;
1077
1078 err = otx2_mbox_init(&mbox->mbox_up, hwbase, pf->pdev, pf->reg_base,
1079 MBOX_DIR_PFAF_UP, 1);
1080 if (err)
1081 goto exit;
1082
1083 err = otx2_mbox_bbuf_init(mbox, pf->pdev);
1084 if (err)
1085 goto exit;
1086
1087 INIT_WORK(&mbox->mbox_wrk, otx2_pfaf_mbox_handler);
1088 INIT_WORK(&mbox->mbox_up_wrk, otx2_pfaf_mbox_up_handler);
1089 mutex_init(&mbox->lock);
1090
1091 return 0;
1092 exit:
1093 otx2_pfaf_mbox_destroy(pf);
1094 return err;
1095 }
1096
1097 static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
1098 {
1099 struct msg_req *msg;
1100 int err;
1101
1102 mutex_lock(&pf->mbox.lock);
1103 if (enable)
1104 msg = otx2_mbox_alloc_msg_cgx_start_linkevents(&pf->mbox);
1105 else
1106 msg = otx2_mbox_alloc_msg_cgx_stop_linkevents(&pf->mbox);
1107
1108 if (!msg) {
1109 mutex_unlock(&pf->mbox.lock);
1110 return -ENOMEM;
1111 }
1112
1113 err = otx2_sync_mbox_msg(&pf->mbox);
1114 mutex_unlock(&pf->mbox.lock);
1115 return err;
1116 }
1117
1118 static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
1119 {
1120 struct msg_req *msg;
1121 int err;
1122
1123 if (enable && !bitmap_empty(pf->flow_cfg->dmacflt_bmap,
1124 pf->flow_cfg->dmacflt_max_flows))
1125 netdev_warn(pf->netdev,
1126 "CGX/RPM internal loopback might not work as DMAC filters are active\n");
1127
1128 mutex_lock(&pf->mbox.lock);
1129 if (enable)
1130 msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
1131 else
1132 msg = otx2_mbox_alloc_msg_cgx_intlbk_disable(&pf->mbox);
1133
1134 if (!msg) {
1135 mutex_unlock(&pf->mbox.lock);
1136 return -ENOMEM;
1137 }
1138
1139 err = otx2_sync_mbox_msg(&pf->mbox);
1140 mutex_unlock(&pf->mbox.lock);
1141 return err;
1142 }
1143
1144 int otx2_set_real_num_queues(struct net_device *netdev,
1145 int tx_queues, int rx_queues)
1146 {
1147 int err;
1148
1149 err = netif_set_real_num_tx_queues(netdev, tx_queues);
1150 if (err) {
1151 netdev_err(netdev,
1152 "Failed to set no of Tx queues: %d\n", tx_queues);
1153 return err;
1154 }
1155
1156 err = netif_set_real_num_rx_queues(netdev, rx_queues);
1157 if (err)
1158 netdev_err(netdev,
1159 "Failed to set no of Rx queues: %d\n", rx_queues);
1160 return err;
1161 }
1162 EXPORT_SYMBOL(otx2_set_real_num_queues);
1163
1164 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
1165 {
1166 struct otx2_nic *pf = data;
1167 u64 val, *ptr;
1168 u64 qidx = 0;
1169
1170
1171 for (qidx = 0; qidx < pf->qset.cq_cnt; qidx++) {
1172 ptr = otx2_get_regaddr(pf, NIX_LF_CQ_OP_INT);
1173 val = otx2_atomic64_add((qidx << 44), ptr);
1174
1175 otx2_write64(pf, NIX_LF_CQ_OP_INT, (qidx << 44) |
1176 (val & NIX_CQERRINT_BITS));
1177 if (!(val & (NIX_CQERRINT_BITS | BIT_ULL(42))))
1178 continue;
1179
1180 if (val & BIT_ULL(42)) {
1181 netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1182 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1183 } else {
1184 if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
1185 netdev_err(pf->netdev, "CQ%lld: Doorbell error",
1186 qidx);
1187 if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
1188 netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
1189 qidx);
1190 }
1191
1192 schedule_work(&pf->reset_task);
1193 }
1194
1195
1196 for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
1197 ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
1198 val = otx2_atomic64_add((qidx << 44), ptr);
1199 otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
1200 (val & NIX_SQINT_BITS));
1201
1202 if (!(val & (NIX_SQINT_BITS | BIT_ULL(42))))
1203 continue;
1204
1205 if (val & BIT_ULL(42)) {
1206 netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
1207 qidx, otx2_read64(pf, NIX_LF_ERR_INT));
1208 } else {
1209 if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) {
1210 netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx",
1211 qidx,
1212 otx2_read64(pf,
1213 NIX_LF_SQ_OP_ERR_DBG));
1214 otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG,
1215 BIT_ULL(44));
1216 }
1217 if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) {
1218 netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n",
1219 qidx,
1220 otx2_read64(pf, NIX_LF_MNQ_ERR_DBG));
1221 otx2_write64(pf, NIX_LF_MNQ_ERR_DBG,
1222 BIT_ULL(44));
1223 }
1224 if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) {
1225 netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx",
1226 qidx,
1227 otx2_read64(pf,
1228 NIX_LF_SEND_ERR_DBG));
1229 otx2_write64(pf, NIX_LF_SEND_ERR_DBG,
1230 BIT_ULL(44));
1231 }
1232 if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL))
1233 netdev_err(pf->netdev, "SQ%lld: SQB allocation failed",
1234 qidx);
1235 }
1236
1237 schedule_work(&pf->reset_task);
1238 }
1239
1240 return IRQ_HANDLED;
1241 }
1242
1243 static irqreturn_t otx2_cq_intr_handler(int irq, void *cq_irq)
1244 {
1245 struct otx2_cq_poll *cq_poll = (struct otx2_cq_poll *)cq_irq;
1246 struct otx2_nic *pf = (struct otx2_nic *)cq_poll->dev;
1247 int qidx = cq_poll->cint_idx;
1248
1249
1250
1251
1252
1253
1254 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1255
1256
1257 pf->napi_events++;
1258 napi_schedule_irqoff(&cq_poll->napi);
1259
1260 return IRQ_HANDLED;
1261 }
1262
1263 static void otx2_disable_napi(struct otx2_nic *pf)
1264 {
1265 struct otx2_qset *qset = &pf->qset;
1266 struct otx2_cq_poll *cq_poll;
1267 int qidx;
1268
1269 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1270 cq_poll = &qset->napi[qidx];
1271 cancel_work_sync(&cq_poll->dim.work);
1272 napi_disable(&cq_poll->napi);
1273 netif_napi_del(&cq_poll->napi);
1274 }
1275 }
1276
1277 static void otx2_free_cq_res(struct otx2_nic *pf)
1278 {
1279 struct otx2_qset *qset = &pf->qset;
1280 struct otx2_cq_queue *cq;
1281 int qidx;
1282
1283
1284 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_CQ, false);
1285 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1286 cq = &qset->cq[qidx];
1287 qmem_free(pf->dev, cq->cqe);
1288 }
1289 }
1290
1291 static void otx2_free_sq_res(struct otx2_nic *pf)
1292 {
1293 struct otx2_qset *qset = &pf->qset;
1294 struct otx2_snd_queue *sq;
1295 int qidx;
1296
1297
1298 otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
1299
1300 otx2_sq_free_sqbs(pf);
1301 for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
1302 sq = &qset->sq[qidx];
1303 qmem_free(pf->dev, sq->sqe);
1304 qmem_free(pf->dev, sq->tso_hdrs);
1305 kfree(sq->sg);
1306 kfree(sq->sqb_ptrs);
1307 }
1308 }
1309
1310 static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
1311 {
1312 int frame_size;
1313 int total_size;
1314 int rbuf_size;
1315
1316 if (pf->hw.rbuf_len)
1317 return ALIGN(pf->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331 frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
1332 total_size = frame_size + OTX2_HEAD_ROOM * 6;
1333 rbuf_size = total_size / 6;
1334
1335 return ALIGN(rbuf_size, 2048);
1336 }
1337
1338 static int otx2_init_hw_resources(struct otx2_nic *pf)
1339 {
1340 struct nix_lf_free_req *free_req;
1341 struct mbox *mbox = &pf->mbox;
1342 struct otx2_hw *hw = &pf->hw;
1343 struct msg_req *req;
1344 int err = 0, lvl;
1345
1346
1347
1348
1349
1350 hw->rqpool_cnt = hw->rx_queues;
1351 hw->sqpool_cnt = hw->tot_tx_queues;
1352 hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
1353
1354
1355 pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
1356
1357 pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
1358
1359 mutex_lock(&mbox->lock);
1360
1361 err = otx2_config_npa(pf);
1362 if (err)
1363 goto exit;
1364
1365
1366 err = otx2_config_nix(pf);
1367 if (err)
1368 goto err_free_npa_lf;
1369
1370
1371 otx2_nix_config_bp(pf, true);
1372
1373
1374 err = otx2_rq_aura_pool_init(pf);
1375 if (err) {
1376 mutex_unlock(&mbox->lock);
1377 goto err_free_nix_lf;
1378 }
1379
1380 err = otx2_sq_aura_pool_init(pf);
1381 if (err) {
1382 mutex_unlock(&mbox->lock);
1383 goto err_free_rq_ptrs;
1384 }
1385
1386 err = otx2_txsch_alloc(pf);
1387 if (err) {
1388 mutex_unlock(&mbox->lock);
1389 goto err_free_sq_ptrs;
1390 }
1391
1392 err = otx2_config_nix_queues(pf);
1393 if (err) {
1394 mutex_unlock(&mbox->lock);
1395 goto err_free_txsch;
1396 }
1397 for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
1398 err = otx2_txschq_config(pf, lvl);
1399 if (err) {
1400 mutex_unlock(&mbox->lock);
1401 goto err_free_nix_queues;
1402 }
1403 }
1404 mutex_unlock(&mbox->lock);
1405 return err;
1406
1407 err_free_nix_queues:
1408 otx2_free_sq_res(pf);
1409 otx2_free_cq_res(pf);
1410 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1411 err_free_txsch:
1412 if (otx2_txschq_stop(pf))
1413 dev_err(pf->dev, "%s failed to stop TX schedulers\n", __func__);
1414 err_free_sq_ptrs:
1415 otx2_sq_free_sqbs(pf);
1416 err_free_rq_ptrs:
1417 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1418 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1419 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1420 otx2_aura_pool_free(pf);
1421 err_free_nix_lf:
1422 mutex_lock(&mbox->lock);
1423 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1424 if (free_req) {
1425 free_req->flags = NIX_LF_DISABLE_FLOWS;
1426 if (otx2_sync_mbox_msg(mbox))
1427 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1428 }
1429 err_free_npa_lf:
1430
1431 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1432 if (req) {
1433 if (otx2_sync_mbox_msg(mbox))
1434 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1435 }
1436 exit:
1437 mutex_unlock(&mbox->lock);
1438 return err;
1439 }
1440
1441 static void otx2_free_hw_resources(struct otx2_nic *pf)
1442 {
1443 struct otx2_qset *qset = &pf->qset;
1444 struct nix_lf_free_req *free_req;
1445 struct mbox *mbox = &pf->mbox;
1446 struct otx2_cq_queue *cq;
1447 struct msg_req *req;
1448 int qidx, err;
1449
1450
1451 otx2_sqb_flush(pf);
1452
1453
1454 err = otx2_txschq_stop(pf);
1455 if (err)
1456 dev_err(pf->dev, "RVUPF: Failed to stop/free TX schedulers\n");
1457
1458 mutex_lock(&mbox->lock);
1459
1460 if (!(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1461 otx2_nix_config_bp(pf, false);
1462 mutex_unlock(&mbox->lock);
1463
1464
1465 otx2_ctx_disable(mbox, NIX_AQ_CTYPE_RQ, false);
1466
1467
1468 for (qidx = 0; qidx < qset->cq_cnt; qidx++) {
1469 cq = &qset->cq[qidx];
1470 if (cq->cq_type == CQ_RX)
1471 otx2_cleanup_rx_cqes(pf, cq);
1472 else
1473 otx2_cleanup_tx_cqes(pf, cq);
1474 }
1475
1476 otx2_free_sq_res(pf);
1477
1478
1479 otx2_free_aura_ptr(pf, AURA_NIX_RQ);
1480
1481 otx2_free_cq_res(pf);
1482
1483
1484 cn10k_free_all_ipolicers(pf);
1485
1486 mutex_lock(&mbox->lock);
1487
1488 free_req = otx2_mbox_alloc_msg_nix_lf_free(mbox);
1489 if (free_req) {
1490 free_req->flags = NIX_LF_DISABLE_FLOWS;
1491 if (!(pf->flags & OTX2_FLAG_PF_SHUTDOWN))
1492 free_req->flags |= NIX_LF_DONT_FREE_TX_VTAG;
1493 if (otx2_sync_mbox_msg(mbox))
1494 dev_err(pf->dev, "%s failed to free nixlf\n", __func__);
1495 }
1496 mutex_unlock(&mbox->lock);
1497
1498
1499 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_POOL, true);
1500 otx2_ctx_disable(mbox, NPA_AQ_CTYPE_AURA, true);
1501 otx2_aura_pool_free(pf);
1502
1503 mutex_lock(&mbox->lock);
1504
1505 req = otx2_mbox_alloc_msg_npa_lf_free(mbox);
1506 if (req) {
1507 if (otx2_sync_mbox_msg(mbox))
1508 dev_err(pf->dev, "%s failed to free npalf\n", __func__);
1509 }
1510 mutex_unlock(&mbox->lock);
1511 }
1512
1513 static void otx2_do_set_rx_mode(struct otx2_nic *pf)
1514 {
1515 struct net_device *netdev = pf->netdev;
1516 struct nix_rx_mode *req;
1517 bool promisc = false;
1518
1519 if (!(netdev->flags & IFF_UP))
1520 return;
1521
1522 if ((netdev->flags & IFF_PROMISC) ||
1523 (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
1524 promisc = true;
1525 }
1526
1527
1528 if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
1529 __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
1530
1531 mutex_lock(&pf->mbox.lock);
1532 req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
1533 if (!req) {
1534 mutex_unlock(&pf->mbox.lock);
1535 return;
1536 }
1537
1538 req->mode = NIX_RX_MODE_UCAST;
1539
1540 if (promisc)
1541 req->mode |= NIX_RX_MODE_PROMISC;
1542 if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
1543 req->mode |= NIX_RX_MODE_ALLMULTI;
1544
1545 req->mode |= NIX_RX_MODE_USE_MCE;
1546
1547 otx2_sync_mbox_msg(&pf->mbox);
1548 mutex_unlock(&pf->mbox.lock);
1549 }
1550
1551 static void otx2_dim_work(struct work_struct *w)
1552 {
1553 struct dim_cq_moder cur_moder;
1554 struct otx2_cq_poll *cq_poll;
1555 struct otx2_nic *pfvf;
1556 struct dim *dim;
1557
1558 dim = container_of(w, struct dim, work);
1559 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
1560 cq_poll = container_of(dim, struct otx2_cq_poll, dim);
1561 pfvf = (struct otx2_nic *)cq_poll->dev;
1562 pfvf->hw.cq_time_wait = (cur_moder.usec > CQ_TIMER_THRESH_MAX) ?
1563 CQ_TIMER_THRESH_MAX : cur_moder.usec;
1564 pfvf->hw.cq_ecount_wait = (cur_moder.pkts > NAPI_POLL_WEIGHT) ?
1565 NAPI_POLL_WEIGHT : cur_moder.pkts;
1566 dim->state = DIM_START_MEASURE;
1567 }
1568
1569 int otx2_open(struct net_device *netdev)
1570 {
1571 struct otx2_nic *pf = netdev_priv(netdev);
1572 struct otx2_cq_poll *cq_poll = NULL;
1573 struct otx2_qset *qset = &pf->qset;
1574 int err = 0, qidx, vec;
1575 char *irq_name;
1576
1577 netif_carrier_off(netdev);
1578
1579 pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
1580
1581
1582
1583 pf->hw.cint_cnt = max(pf->hw.rx_queues, pf->hw.tx_queues);
1584 qset->napi = kcalloc(pf->hw.cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
1585 if (!qset->napi)
1586 return -ENOMEM;
1587
1588
1589 qset->rqe_cnt = qset->rqe_cnt ? qset->rqe_cnt : Q_COUNT(Q_SIZE_256);
1590
1591 qset->sqe_cnt = qset->sqe_cnt ? qset->sqe_cnt : Q_COUNT(Q_SIZE_4K);
1592
1593 err = -ENOMEM;
1594 qset->cq = kcalloc(pf->qset.cq_cnt,
1595 sizeof(struct otx2_cq_queue), GFP_KERNEL);
1596 if (!qset->cq)
1597 goto err_free_mem;
1598
1599 qset->sq = kcalloc(pf->hw.tot_tx_queues,
1600 sizeof(struct otx2_snd_queue), GFP_KERNEL);
1601 if (!qset->sq)
1602 goto err_free_mem;
1603
1604 qset->rq = kcalloc(pf->hw.rx_queues,
1605 sizeof(struct otx2_rcv_queue), GFP_KERNEL);
1606 if (!qset->rq)
1607 goto err_free_mem;
1608
1609 err = otx2_init_hw_resources(pf);
1610 if (err)
1611 goto err_free_mem;
1612
1613
1614 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1615 cq_poll = &qset->napi[qidx];
1616 cq_poll->cint_idx = qidx;
1617
1618
1619
1620
1621
1622 cq_poll->cq_ids[CQ_RX] =
1623 (qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
1624 cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
1625 qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
1626 if (pf->xdp_prog)
1627 cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
1628 (qidx + pf->hw.rx_queues +
1629 pf->hw.tx_queues) :
1630 CINT_INVALID_CQ;
1631 else
1632 cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
1633
1634 cq_poll->dev = (void *)pf;
1635 cq_poll->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
1636 INIT_WORK(&cq_poll->dim.work, otx2_dim_work);
1637 netif_napi_add(netdev, &cq_poll->napi,
1638 otx2_napi_handler, NAPI_POLL_WEIGHT);
1639 napi_enable(&cq_poll->napi);
1640 }
1641
1642
1643 err = otx2_hw_set_mtu(pf, netdev->mtu);
1644 if (err)
1645 goto err_disable_napi;
1646
1647
1648 otx2_setup_segmentation(pf);
1649
1650
1651 err = otx2_rss_init(pf);
1652 if (err)
1653 goto err_disable_napi;
1654
1655
1656 vec = pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START;
1657 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1658
1659 snprintf(irq_name, NAME_SIZE, "%s-qerr", pf->netdev->name);
1660
1661 err = request_irq(pci_irq_vector(pf->pdev, vec),
1662 otx2_q_intr_handler, 0, irq_name, pf);
1663 if (err) {
1664 dev_err(pf->dev,
1665 "RVUPF%d: IRQ registration failed for QERR\n",
1666 rvu_get_pf(pf->pcifunc));
1667 goto err_disable_napi;
1668 }
1669
1670
1671 otx2_write64(pf, NIX_LF_QINTX_ENA_W1S(0), BIT_ULL(0));
1672
1673
1674 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1675 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1676 irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
1677
1678 snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
1679 qidx);
1680
1681 err = request_irq(pci_irq_vector(pf->pdev, vec),
1682 otx2_cq_intr_handler, 0, irq_name,
1683 &qset->napi[qidx]);
1684 if (err) {
1685 dev_err(pf->dev,
1686 "RVUPF%d: IRQ registration failed for CQ%d\n",
1687 rvu_get_pf(pf->pcifunc), qidx);
1688 goto err_free_cints;
1689 }
1690 vec++;
1691
1692 otx2_config_irq_coalescing(pf, qidx);
1693
1694
1695 otx2_write64(pf, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
1696 otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
1697 }
1698
1699 otx2_set_cints_affinity(pf);
1700
1701 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
1702 otx2_enable_rxvlan(pf, true);
1703
1704
1705 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
1706 pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1707 otx2_config_hw_tx_tstamp(pf, true);
1708 }
1709 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
1710 pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1711 otx2_config_hw_rx_tstamp(pf, true);
1712 }
1713
1714 pf->flags &= ~OTX2_FLAG_INTF_DOWN;
1715
1716 smp_wmb();
1717
1718
1719 if (pf->linfo.link_up && !(pf->pcifunc & RVU_PFVF_FUNC_MASK))
1720 otx2_handle_link_event(pf);
1721
1722
1723 if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
1724 otx2_dmacflt_reinstall_flows(pf);
1725
1726 err = otx2_rxtx_enable(pf, true);
1727 if (err)
1728 goto err_tx_stop_queues;
1729
1730 otx2_do_set_rx_mode(pf);
1731
1732 return 0;
1733
1734 err_tx_stop_queues:
1735 netif_tx_stop_all_queues(netdev);
1736 netif_carrier_off(netdev);
1737 pf->flags |= OTX2_FLAG_INTF_DOWN;
1738 err_free_cints:
1739 otx2_free_cints(pf, qidx);
1740 vec = pci_irq_vector(pf->pdev,
1741 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1742 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1743 free_irq(vec, pf);
1744 err_disable_napi:
1745 otx2_disable_napi(pf);
1746 otx2_free_hw_resources(pf);
1747 err_free_mem:
1748 kfree(qset->sq);
1749 kfree(qset->cq);
1750 kfree(qset->rq);
1751 kfree(qset->napi);
1752 return err;
1753 }
1754 EXPORT_SYMBOL(otx2_open);
1755
1756 int otx2_stop(struct net_device *netdev)
1757 {
1758 struct otx2_nic *pf = netdev_priv(netdev);
1759 struct otx2_cq_poll *cq_poll = NULL;
1760 struct otx2_qset *qset = &pf->qset;
1761 struct otx2_rss_info *rss;
1762 int qidx, vec, wrk;
1763
1764
1765 if (pf->flags & OTX2_FLAG_INTF_DOWN)
1766 return 0;
1767
1768 netif_carrier_off(netdev);
1769 netif_tx_stop_all_queues(netdev);
1770
1771 pf->flags |= OTX2_FLAG_INTF_DOWN;
1772
1773 smp_wmb();
1774
1775
1776 otx2_rxtx_enable(pf, false);
1777
1778
1779 rss = &pf->hw.rss_info;
1780 rss->enable = false;
1781
1782
1783 vec = pci_irq_vector(pf->pdev,
1784 pf->hw.nix_msixoff + NIX_LF_QINT_VEC_START);
1785 otx2_write64(pf, NIX_LF_QINTX_ENA_W1C(0), BIT_ULL(0));
1786 free_irq(vec, pf);
1787
1788
1789 vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
1790 for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
1791
1792 otx2_write64(pf, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
1793
1794 synchronize_irq(pci_irq_vector(pf->pdev, vec));
1795
1796 cq_poll = &qset->napi[qidx];
1797 napi_synchronize(&cq_poll->napi);
1798 vec++;
1799 }
1800
1801 netif_tx_disable(netdev);
1802
1803 otx2_free_hw_resources(pf);
1804 otx2_free_cints(pf, pf->hw.cint_cnt);
1805 otx2_disable_napi(pf);
1806
1807 for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
1808 netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
1809
1810 for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
1811 cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
1812 devm_kfree(pf->dev, pf->refill_wrk);
1813
1814 kfree(qset->sq);
1815 kfree(qset->cq);
1816 kfree(qset->rq);
1817 kfree(qset->napi);
1818
1819 memset_startat(qset, 0, sqe_cnt);
1820 return 0;
1821 }
1822 EXPORT_SYMBOL(otx2_stop);
1823
1824 static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
1825 {
1826 struct otx2_nic *pf = netdev_priv(netdev);
1827 int qidx = skb_get_queue_mapping(skb);
1828 struct otx2_snd_queue *sq;
1829 struct netdev_queue *txq;
1830
1831
1832 if (skb->len <= ETH_HLEN ||
1833 (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
1834 dev_kfree_skb(skb);
1835 return NETDEV_TX_OK;
1836 }
1837
1838 sq = &pf->qset.sq[qidx];
1839 txq = netdev_get_tx_queue(netdev, qidx);
1840
1841 if (!otx2_sq_append_skb(netdev, sq, skb, qidx)) {
1842 netif_tx_stop_queue(txq);
1843
1844
1845 smp_mb();
1846 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
1847 > sq->sqe_thresh)
1848 netif_tx_wake_queue(txq);
1849
1850 return NETDEV_TX_BUSY;
1851 }
1852
1853 return NETDEV_TX_OK;
1854 }
1855
1856 static netdev_features_t otx2_fix_features(struct net_device *dev,
1857 netdev_features_t features)
1858 {
1859 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1860 features |= NETIF_F_HW_VLAN_STAG_RX;
1861 else
1862 features &= ~NETIF_F_HW_VLAN_STAG_RX;
1863
1864 return features;
1865 }
1866
1867 static void otx2_set_rx_mode(struct net_device *netdev)
1868 {
1869 struct otx2_nic *pf = netdev_priv(netdev);
1870
1871 queue_work(pf->otx2_wq, &pf->rx_mode_work);
1872 }
1873
1874 static void otx2_rx_mode_wrk_handler(struct work_struct *work)
1875 {
1876 struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
1877
1878 otx2_do_set_rx_mode(pf);
1879 }
1880
1881 static int otx2_set_features(struct net_device *netdev,
1882 netdev_features_t features)
1883 {
1884 netdev_features_t changed = features ^ netdev->features;
1885 struct otx2_nic *pf = netdev_priv(netdev);
1886
1887 if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev))
1888 return otx2_cgx_config_loopback(pf,
1889 features & NETIF_F_LOOPBACK);
1890
1891 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(netdev))
1892 return otx2_enable_rxvlan(pf,
1893 features & NETIF_F_HW_VLAN_CTAG_RX);
1894
1895 return otx2_handle_ntuple_tc_features(netdev, features);
1896 }
1897
1898 static void otx2_reset_task(struct work_struct *work)
1899 {
1900 struct otx2_nic *pf = container_of(work, struct otx2_nic, reset_task);
1901
1902 if (!netif_running(pf->netdev))
1903 return;
1904
1905 rtnl_lock();
1906 otx2_stop(pf->netdev);
1907 pf->reset_count++;
1908 otx2_open(pf->netdev);
1909 netif_trans_update(pf->netdev);
1910 rtnl_unlock();
1911 }
1912
1913 static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
1914 {
1915 struct msg_req *req;
1916 int err;
1917
1918 if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
1919 return 0;
1920
1921 mutex_lock(&pfvf->mbox.lock);
1922 if (enable)
1923 req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
1924 else
1925 req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
1926 if (!req) {
1927 mutex_unlock(&pfvf->mbox.lock);
1928 return -ENOMEM;
1929 }
1930
1931 err = otx2_sync_mbox_msg(&pfvf->mbox);
1932 if (err) {
1933 mutex_unlock(&pfvf->mbox.lock);
1934 return err;
1935 }
1936
1937 mutex_unlock(&pfvf->mbox.lock);
1938 if (enable)
1939 pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
1940 else
1941 pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
1942 return 0;
1943 }
1944
1945 static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
1946 {
1947 struct msg_req *req;
1948 int err;
1949
1950 if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
1951 return 0;
1952
1953 mutex_lock(&pfvf->mbox.lock);
1954 if (enable)
1955 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
1956 else
1957 req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
1958 if (!req) {
1959 mutex_unlock(&pfvf->mbox.lock);
1960 return -ENOMEM;
1961 }
1962
1963 err = otx2_sync_mbox_msg(&pfvf->mbox);
1964 if (err) {
1965 mutex_unlock(&pfvf->mbox.lock);
1966 return err;
1967 }
1968
1969 mutex_unlock(&pfvf->mbox.lock);
1970 if (enable)
1971 pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
1972 else
1973 pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
1974 return 0;
1975 }
1976
1977 int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
1978 {
1979 struct otx2_nic *pfvf = netdev_priv(netdev);
1980 struct hwtstamp_config config;
1981
1982 if (!pfvf->ptp)
1983 return -ENODEV;
1984
1985 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1986 return -EFAULT;
1987
1988 switch (config.tx_type) {
1989 case HWTSTAMP_TX_OFF:
1990 otx2_config_hw_tx_tstamp(pfvf, false);
1991 break;
1992 case HWTSTAMP_TX_ON:
1993 otx2_config_hw_tx_tstamp(pfvf, true);
1994 break;
1995 default:
1996 return -ERANGE;
1997 }
1998
1999 switch (config.rx_filter) {
2000 case HWTSTAMP_FILTER_NONE:
2001 otx2_config_hw_rx_tstamp(pfvf, false);
2002 break;
2003 case HWTSTAMP_FILTER_ALL:
2004 case HWTSTAMP_FILTER_SOME:
2005 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2006 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2007 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2008 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2009 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2010 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2011 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2012 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2013 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2014 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2015 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2016 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2017 otx2_config_hw_rx_tstamp(pfvf, true);
2018 config.rx_filter = HWTSTAMP_FILTER_ALL;
2019 break;
2020 default:
2021 return -ERANGE;
2022 }
2023
2024 memcpy(&pfvf->tstamp, &config, sizeof(config));
2025
2026 return copy_to_user(ifr->ifr_data, &config,
2027 sizeof(config)) ? -EFAULT : 0;
2028 }
2029 EXPORT_SYMBOL(otx2_config_hwtstamp);
2030
2031 int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
2032 {
2033 struct otx2_nic *pfvf = netdev_priv(netdev);
2034 struct hwtstamp_config *cfg = &pfvf->tstamp;
2035
2036 switch (cmd) {
2037 case SIOCSHWTSTAMP:
2038 return otx2_config_hwtstamp(netdev, req);
2039 case SIOCGHWTSTAMP:
2040 return copy_to_user(req->ifr_data, cfg,
2041 sizeof(*cfg)) ? -EFAULT : 0;
2042 default:
2043 return -EOPNOTSUPP;
2044 }
2045 }
2046 EXPORT_SYMBOL(otx2_ioctl);
2047
2048 static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
2049 {
2050 struct npc_install_flow_req *req;
2051 int err;
2052
2053 mutex_lock(&pf->mbox.lock);
2054 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2055 if (!req) {
2056 err = -ENOMEM;
2057 goto out;
2058 }
2059
2060 ether_addr_copy(req->packet.dmac, mac);
2061 eth_broadcast_addr((u8 *)&req->mask.dmac);
2062 req->features = BIT_ULL(NPC_DMAC);
2063 req->channel = pf->hw.rx_chan_base;
2064 req->intf = NIX_INTF_RX;
2065 req->default_rule = 1;
2066 req->append = 1;
2067 req->vf = vf + 1;
2068 req->op = NIX_RX_ACTION_DEFAULT;
2069
2070 err = otx2_sync_mbox_msg(&pf->mbox);
2071 out:
2072 mutex_unlock(&pf->mbox.lock);
2073 return err;
2074 }
2075
2076 static int otx2_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
2077 {
2078 struct otx2_nic *pf = netdev_priv(netdev);
2079 struct pci_dev *pdev = pf->pdev;
2080 struct otx2_vf_config *config;
2081 int ret;
2082
2083 if (!netif_running(netdev))
2084 return -EAGAIN;
2085
2086 if (vf >= pf->total_vfs)
2087 return -EINVAL;
2088
2089 if (!is_valid_ether_addr(mac))
2090 return -EINVAL;
2091
2092 config = &pf->vf_configs[vf];
2093 ether_addr_copy(config->mac, mac);
2094
2095 ret = otx2_do_set_vf_mac(pf, vf, mac);
2096 if (ret == 0)
2097 dev_info(&pdev->dev,
2098 "Load/Reload VF driver\n");
2099
2100 return ret;
2101 }
2102
2103 static int otx2_do_set_vf_vlan(struct otx2_nic *pf, int vf, u16 vlan, u8 qos,
2104 __be16 proto)
2105 {
2106 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
2107 struct nix_vtag_config_rsp *vtag_rsp;
2108 struct npc_delete_flow_req *del_req;
2109 struct nix_vtag_config *vtag_req;
2110 struct npc_install_flow_req *req;
2111 struct otx2_vf_config *config;
2112 int err = 0;
2113 u32 idx;
2114
2115 config = &pf->vf_configs[vf];
2116
2117 if (!vlan && !config->vlan)
2118 goto out;
2119
2120 mutex_lock(&pf->mbox.lock);
2121
2122
2123 if (config->vlan) {
2124 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2125 if (!vtag_req) {
2126 err = -ENOMEM;
2127 goto out;
2128 }
2129 vtag_req->cfg_type = 0;
2130 vtag_req->tx.free_vtag0 = 1;
2131 vtag_req->tx.vtag0_idx = config->tx_vtag_idx;
2132
2133 err = otx2_sync_mbox_msg(&pf->mbox);
2134 if (err)
2135 goto out;
2136 }
2137
2138 if (!vlan && config->vlan) {
2139
2140 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2141 if (!del_req) {
2142 err = -ENOMEM;
2143 goto out;
2144 }
2145 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2146 del_req->entry =
2147 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2148 err = otx2_sync_mbox_msg(&pf->mbox);
2149 if (err)
2150 goto out;
2151
2152
2153 del_req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
2154 if (!del_req) {
2155 err = -ENOMEM;
2156 goto out;
2157 }
2158 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2159 del_req->entry =
2160 flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2161 err = otx2_sync_mbox_msg(&pf->mbox);
2162
2163 goto out;
2164 }
2165
2166
2167 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2168 if (!req) {
2169 err = -ENOMEM;
2170 goto out;
2171 }
2172
2173 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_RX_INDEX);
2174 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2175 req->packet.vlan_tci = htons(vlan);
2176 req->mask.vlan_tci = htons(VLAN_VID_MASK);
2177
2178 eth_broadcast_addr((u8 *)&req->mask.dmac);
2179 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
2180 req->channel = pf->hw.rx_chan_base;
2181 req->intf = NIX_INTF_RX;
2182 req->vf = vf + 1;
2183 req->op = NIX_RX_ACTION_DEFAULT;
2184 req->vtag0_valid = true;
2185 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
2186 req->set_cntr = 1;
2187
2188 err = otx2_sync_mbox_msg(&pf->mbox);
2189 if (err)
2190 goto out;
2191
2192
2193 vtag_req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
2194 if (!vtag_req) {
2195 err = -ENOMEM;
2196 goto out;
2197 }
2198
2199
2200 vtag_req->vtag_size = VTAGSIZE_T4;
2201 vtag_req->cfg_type = 0;
2202 vtag_req->tx.cfg_vtag0 = 1;
2203 vtag_req->tx.vtag0 = ((u64)ntohs(proto) << 16) | vlan;
2204
2205 err = otx2_sync_mbox_msg(&pf->mbox);
2206 if (err)
2207 goto out;
2208
2209 vtag_rsp = (struct nix_vtag_config_rsp *)otx2_mbox_get_rsp
2210 (&pf->mbox.mbox, 0, &vtag_req->hdr);
2211 if (IS_ERR(vtag_rsp)) {
2212 err = PTR_ERR(vtag_rsp);
2213 goto out;
2214 }
2215 config->tx_vtag_idx = vtag_rsp->vtag0_idx;
2216
2217 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
2218 if (!req) {
2219 err = -ENOMEM;
2220 goto out;
2221 }
2222
2223 eth_zero_addr((u8 *)&req->mask.dmac);
2224 idx = ((vf * OTX2_PER_VF_VLAN_FLOWS) + OTX2_VF_VLAN_TX_INDEX);
2225 req->entry = flow_cfg->def_ent[flow_cfg->vf_vlan_offset + idx];
2226 req->features = BIT_ULL(NPC_DMAC);
2227 req->channel = pf->hw.tx_chan_base;
2228 req->intf = NIX_INTF_TX;
2229 req->vf = vf + 1;
2230 req->op = NIX_TX_ACTIONOP_UCAST_DEFAULT;
2231 req->vtag0_def = vtag_rsp->vtag0_idx;
2232 req->vtag0_op = VTAG_INSERT;
2233 req->set_cntr = 1;
2234
2235 err = otx2_sync_mbox_msg(&pf->mbox);
2236 out:
2237 config->vlan = vlan;
2238 mutex_unlock(&pf->mbox.lock);
2239 return err;
2240 }
2241
2242 static int otx2_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
2243 __be16 proto)
2244 {
2245 struct otx2_nic *pf = netdev_priv(netdev);
2246 struct pci_dev *pdev = pf->pdev;
2247
2248 if (!netif_running(netdev))
2249 return -EAGAIN;
2250
2251 if (vf >= pci_num_vf(pdev))
2252 return -EINVAL;
2253
2254
2255 if (vlan >= VLAN_N_VID || qos)
2256 return -EINVAL;
2257
2258 if (proto != htons(ETH_P_8021Q))
2259 return -EPROTONOSUPPORT;
2260
2261 if (!(pf->flags & OTX2_FLAG_VF_VLAN_SUPPORT))
2262 return -EOPNOTSUPP;
2263
2264 return otx2_do_set_vf_vlan(pf, vf, vlan, qos, proto);
2265 }
2266
2267 static int otx2_get_vf_config(struct net_device *netdev, int vf,
2268 struct ifla_vf_info *ivi)
2269 {
2270 struct otx2_nic *pf = netdev_priv(netdev);
2271 struct pci_dev *pdev = pf->pdev;
2272 struct otx2_vf_config *config;
2273
2274 if (!netif_running(netdev))
2275 return -EAGAIN;
2276
2277 if (vf >= pci_num_vf(pdev))
2278 return -EINVAL;
2279
2280 config = &pf->vf_configs[vf];
2281 ivi->vf = vf;
2282 ether_addr_copy(ivi->mac, config->mac);
2283 ivi->vlan = config->vlan;
2284 ivi->trusted = config->trusted;
2285
2286 return 0;
2287 }
2288
2289 static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
2290 int qidx)
2291 {
2292 struct page *page;
2293 u64 dma_addr;
2294 int err = 0;
2295
2296 dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
2297 offset_in_page(xdpf->data), xdpf->len,
2298 DMA_TO_DEVICE);
2299 if (dma_mapping_error(pf->dev, dma_addr))
2300 return -ENOMEM;
2301
2302 err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
2303 if (!err) {
2304 otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
2305 page = virt_to_page(xdpf->data);
2306 put_page(page);
2307 return -ENOMEM;
2308 }
2309 return 0;
2310 }
2311
2312 static int otx2_xdp_xmit(struct net_device *netdev, int n,
2313 struct xdp_frame **frames, u32 flags)
2314 {
2315 struct otx2_nic *pf = netdev_priv(netdev);
2316 int qidx = smp_processor_id();
2317 struct otx2_snd_queue *sq;
2318 int drops = 0, i;
2319
2320 if (!netif_running(netdev))
2321 return -ENETDOWN;
2322
2323 qidx += pf->hw.tx_queues;
2324 sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
2325
2326
2327 if (unlikely(!sq))
2328 return -ENXIO;
2329
2330 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2331 return -EINVAL;
2332
2333 for (i = 0; i < n; i++) {
2334 struct xdp_frame *xdpf = frames[i];
2335 int err;
2336
2337 err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
2338 if (err)
2339 drops++;
2340 }
2341 return n - drops;
2342 }
2343
2344 static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
2345 {
2346 struct net_device *dev = pf->netdev;
2347 bool if_up = netif_running(pf->netdev);
2348 struct bpf_prog *old_prog;
2349
2350 if (prog && dev->mtu > MAX_XDP_MTU) {
2351 netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
2352 return -EOPNOTSUPP;
2353 }
2354
2355 if (if_up)
2356 otx2_stop(pf->netdev);
2357
2358 old_prog = xchg(&pf->xdp_prog, prog);
2359
2360 if (old_prog)
2361 bpf_prog_put(old_prog);
2362
2363 if (pf->xdp_prog)
2364 bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
2365
2366
2367
2368
2369 if (pf->xdp_prog)
2370 pf->hw.xdp_queues = pf->hw.rx_queues;
2371 else
2372 pf->hw.xdp_queues = 0;
2373
2374 pf->hw.tot_tx_queues += pf->hw.xdp_queues;
2375
2376 if (if_up)
2377 otx2_open(pf->netdev);
2378
2379 return 0;
2380 }
2381
2382 static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
2383 {
2384 struct otx2_nic *pf = netdev_priv(netdev);
2385
2386 switch (xdp->command) {
2387 case XDP_SETUP_PROG:
2388 return otx2_xdp_setup(pf, xdp->prog);
2389 default:
2390 return -EINVAL;
2391 }
2392 }
2393
2394 static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
2395 int req_perm)
2396 {
2397 struct set_vf_perm *req;
2398 int rc;
2399
2400 mutex_lock(&pf->mbox.lock);
2401 req = otx2_mbox_alloc_msg_set_vf_perm(&pf->mbox);
2402 if (!req) {
2403 rc = -ENOMEM;
2404 goto out;
2405 }
2406
2407
2408 if (req_perm == OTX2_RESET_VF_PERM) {
2409 req->flags |= RESET_VF_PERM;
2410 } else if (req_perm == OTX2_TRUSTED_VF) {
2411 if (pf->vf_configs[vf].trusted)
2412 req->flags |= VF_TRUSTED;
2413 }
2414
2415 req->vf = vf;
2416 rc = otx2_sync_mbox_msg(&pf->mbox);
2417 out:
2418 mutex_unlock(&pf->mbox.lock);
2419 return rc;
2420 }
2421
2422 static int otx2_ndo_set_vf_trust(struct net_device *netdev, int vf,
2423 bool enable)
2424 {
2425 struct otx2_nic *pf = netdev_priv(netdev);
2426 struct pci_dev *pdev = pf->pdev;
2427 int rc;
2428
2429 if (vf >= pci_num_vf(pdev))
2430 return -EINVAL;
2431
2432 if (pf->vf_configs[vf].trusted == enable)
2433 return 0;
2434
2435 pf->vf_configs[vf].trusted = enable;
2436 rc = otx2_set_vf_permissions(pf, vf, OTX2_TRUSTED_VF);
2437
2438 if (rc)
2439 pf->vf_configs[vf].trusted = !enable;
2440 else
2441 netdev_info(pf->netdev, "VF %d is %strusted\n",
2442 vf, enable ? "" : "not ");
2443 return rc;
2444 }
2445
2446 static const struct net_device_ops otx2_netdev_ops = {
2447 .ndo_open = otx2_open,
2448 .ndo_stop = otx2_stop,
2449 .ndo_start_xmit = otx2_xmit,
2450 .ndo_fix_features = otx2_fix_features,
2451 .ndo_set_mac_address = otx2_set_mac_address,
2452 .ndo_change_mtu = otx2_change_mtu,
2453 .ndo_set_rx_mode = otx2_set_rx_mode,
2454 .ndo_set_features = otx2_set_features,
2455 .ndo_tx_timeout = otx2_tx_timeout,
2456 .ndo_get_stats64 = otx2_get_stats64,
2457 .ndo_eth_ioctl = otx2_ioctl,
2458 .ndo_set_vf_mac = otx2_set_vf_mac,
2459 .ndo_set_vf_vlan = otx2_set_vf_vlan,
2460 .ndo_get_vf_config = otx2_get_vf_config,
2461 .ndo_bpf = otx2_xdp,
2462 .ndo_xdp_xmit = otx2_xdp_xmit,
2463 .ndo_setup_tc = otx2_setup_tc,
2464 .ndo_set_vf_trust = otx2_ndo_set_vf_trust,
2465 };
2466
2467 static int otx2_wq_init(struct otx2_nic *pf)
2468 {
2469 pf->otx2_wq = create_singlethread_workqueue("otx2_wq");
2470 if (!pf->otx2_wq)
2471 return -ENOMEM;
2472
2473 INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
2474 INIT_WORK(&pf->reset_task, otx2_reset_task);
2475 return 0;
2476 }
2477
2478 static int otx2_check_pf_usable(struct otx2_nic *nic)
2479 {
2480 u64 rev;
2481
2482 rev = otx2_read64(nic, RVU_PF_BLOCK_ADDRX_DISC(BLKADDR_RVUM));
2483 rev = (rev >> 12) & 0xFF;
2484
2485
2486
2487
2488 if (!rev) {
2489 dev_warn(nic->dev,
2490 "AF is not initialized, deferring probe\n");
2491 return -EPROBE_DEFER;
2492 }
2493 return 0;
2494 }
2495
2496 static int otx2_realloc_msix_vectors(struct otx2_nic *pf)
2497 {
2498 struct otx2_hw *hw = &pf->hw;
2499 int num_vec, err;
2500
2501
2502
2503
2504 num_vec = hw->nix_msixoff;
2505 num_vec += NIX_LF_CINT_VEC_START + hw->max_queues;
2506
2507 otx2_disable_mbox_intr(pf);
2508 pci_free_irq_vectors(hw->pdev);
2509 err = pci_alloc_irq_vectors(hw->pdev, num_vec, num_vec, PCI_IRQ_MSIX);
2510 if (err < 0) {
2511 dev_err(pf->dev, "%s: Failed to realloc %d IRQ vectors\n",
2512 __func__, num_vec);
2513 return err;
2514 }
2515
2516 return otx2_register_mbox_intr(pf, false);
2517 }
2518
2519 static int otx2_sriov_vfcfg_init(struct otx2_nic *pf)
2520 {
2521 int i;
2522
2523 pf->vf_configs = devm_kcalloc(pf->dev, pf->total_vfs,
2524 sizeof(struct otx2_vf_config),
2525 GFP_KERNEL);
2526 if (!pf->vf_configs)
2527 return -ENOMEM;
2528
2529 for (i = 0; i < pf->total_vfs; i++) {
2530 pf->vf_configs[i].pf = pf;
2531 pf->vf_configs[i].intf_down = true;
2532 pf->vf_configs[i].trusted = false;
2533 INIT_DELAYED_WORK(&pf->vf_configs[i].link_event_work,
2534 otx2_vf_link_event_task);
2535 }
2536
2537 return 0;
2538 }
2539
2540 static void otx2_sriov_vfcfg_cleanup(struct otx2_nic *pf)
2541 {
2542 int i;
2543
2544 if (!pf->vf_configs)
2545 return;
2546
2547 for (i = 0; i < pf->total_vfs; i++) {
2548 cancel_delayed_work_sync(&pf->vf_configs[i].link_event_work);
2549 otx2_set_vf_permissions(pf, i, OTX2_RESET_VF_PERM);
2550 }
2551 }
2552
2553 static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2554 {
2555 struct device *dev = &pdev->dev;
2556 struct net_device *netdev;
2557 struct otx2_nic *pf;
2558 struct otx2_hw *hw;
2559 int err, qcount;
2560 int num_vec;
2561
2562 err = pcim_enable_device(pdev);
2563 if (err) {
2564 dev_err(dev, "Failed to enable PCI device\n");
2565 return err;
2566 }
2567
2568 err = pci_request_regions(pdev, DRV_NAME);
2569 if (err) {
2570 dev_err(dev, "PCI request regions failed 0x%x\n", err);
2571 return err;
2572 }
2573
2574 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
2575 if (err) {
2576 dev_err(dev, "DMA mask config failed, abort\n");
2577 goto err_release_regions;
2578 }
2579
2580 pci_set_master(pdev);
2581
2582
2583 qcount = min_t(int, num_online_cpus(), OTX2_MAX_CQ_CNT);
2584
2585 netdev = alloc_etherdev_mqs(sizeof(*pf), qcount, qcount);
2586 if (!netdev) {
2587 err = -ENOMEM;
2588 goto err_release_regions;
2589 }
2590
2591 pci_set_drvdata(pdev, netdev);
2592 SET_NETDEV_DEV(netdev, &pdev->dev);
2593 pf = netdev_priv(netdev);
2594 pf->netdev = netdev;
2595 pf->pdev = pdev;
2596 pf->dev = dev;
2597 pf->total_vfs = pci_sriov_get_totalvfs(pdev);
2598 pf->flags |= OTX2_FLAG_INTF_DOWN;
2599
2600 hw = &pf->hw;
2601 hw->pdev = pdev;
2602 hw->rx_queues = qcount;
2603 hw->tx_queues = qcount;
2604 hw->tot_tx_queues = qcount;
2605 hw->max_queues = qcount;
2606 hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
2607
2608 hw->xqe_size = 128;
2609
2610 num_vec = pci_msix_vec_count(pdev);
2611 hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
2612 GFP_KERNEL);
2613 if (!hw->irq_name) {
2614 err = -ENOMEM;
2615 goto err_free_netdev;
2616 }
2617
2618 hw->affinity_mask = devm_kcalloc(&hw->pdev->dev, num_vec,
2619 sizeof(cpumask_var_t), GFP_KERNEL);
2620 if (!hw->affinity_mask) {
2621 err = -ENOMEM;
2622 goto err_free_netdev;
2623 }
2624
2625
2626 pf->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
2627 if (!pf->reg_base) {
2628 dev_err(dev, "Unable to map physical function CSRs, aborting\n");
2629 err = -ENOMEM;
2630 goto err_free_netdev;
2631 }
2632
2633 err = otx2_check_pf_usable(pf);
2634 if (err)
2635 goto err_free_netdev;
2636
2637 err = pci_alloc_irq_vectors(hw->pdev, RVU_PF_INT_VEC_CNT,
2638 RVU_PF_INT_VEC_CNT, PCI_IRQ_MSIX);
2639 if (err < 0) {
2640 dev_err(dev, "%s: Failed to alloc %d IRQ vectors\n",
2641 __func__, num_vec);
2642 goto err_free_netdev;
2643 }
2644
2645 otx2_setup_dev_hw_settings(pf);
2646
2647
2648 err = otx2_pfaf_mbox_init(pf);
2649 if (err)
2650 goto err_free_irq_vectors;
2651
2652
2653 err = otx2_register_mbox_intr(pf, true);
2654 if (err)
2655 goto err_mbox_destroy;
2656
2657
2658
2659
2660 err = otx2_attach_npa_nix(pf);
2661 if (err)
2662 goto err_disable_mbox_intr;
2663
2664 err = otx2_realloc_msix_vectors(pf);
2665 if (err)
2666 goto err_detach_rsrc;
2667
2668 err = otx2_set_real_num_queues(netdev, hw->tx_queues, hw->rx_queues);
2669 if (err)
2670 goto err_detach_rsrc;
2671
2672 err = cn10k_lmtst_init(pf);
2673 if (err)
2674 goto err_detach_rsrc;
2675
2676
2677 otx2_get_mac_from_af(netdev);
2678
2679
2680 otx2_ptp_init(pf);
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693 pf->iommu_domain = iommu_get_domain_for_dev(dev);
2694
2695 netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
2696 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
2697 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
2698 NETIF_F_GSO_UDP_L4);
2699 netdev->features |= netdev->hw_features;
2700
2701 err = otx2_mcam_flow_init(pf);
2702 if (err)
2703 goto err_ptp_destroy;
2704
2705 if (pf->flags & OTX2_FLAG_NTUPLE_SUPPORT)
2706 netdev->hw_features |= NETIF_F_NTUPLE;
2707
2708 if (pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)
2709 netdev->priv_flags |= IFF_UNICAST_FLT;
2710
2711
2712 netdev->vlan_features |= netdev->features;
2713 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
2714 NETIF_F_HW_VLAN_STAG_TX;
2715 if (pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
2716 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX |
2717 NETIF_F_HW_VLAN_STAG_RX;
2718 netdev->features |= netdev->hw_features;
2719
2720
2721 if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)
2722 netdev->hw_features |= NETIF_F_HW_TC;
2723
2724 netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
2725
2726 netif_set_tso_max_segs(netdev, OTX2_MAX_GSO_SEGS);
2727 netdev->watchdog_timeo = OTX2_TX_TIMEOUT;
2728
2729 netdev->netdev_ops = &otx2_netdev_ops;
2730
2731 netdev->min_mtu = OTX2_MIN_MTU;
2732 netdev->max_mtu = otx2_get_max_mtu(pf);
2733
2734 err = register_netdev(netdev);
2735 if (err) {
2736 dev_err(dev, "Failed to register netdevice\n");
2737 goto err_del_mcam_entries;
2738 }
2739
2740 err = otx2_wq_init(pf);
2741 if (err)
2742 goto err_unreg_netdev;
2743
2744 otx2_set_ethtool_ops(netdev);
2745
2746 err = otx2_init_tc(pf);
2747 if (err)
2748 goto err_mcam_flow_del;
2749
2750 err = otx2_register_dl(pf);
2751 if (err)
2752 goto err_mcam_flow_del;
2753
2754
2755 err = otx2_sriov_vfcfg_init(pf);
2756 if (err)
2757 goto err_pf_sriov_init;
2758
2759
2760 otx2_cgx_config_linkevents(pf, true);
2761
2762 #ifdef CONFIG_DCB
2763 err = otx2_dcbnl_set_ops(netdev);
2764 if (err)
2765 goto err_pf_sriov_init;
2766 #endif
2767
2768 return 0;
2769
2770 err_pf_sriov_init:
2771 otx2_shutdown_tc(pf);
2772 err_mcam_flow_del:
2773 otx2_mcam_flow_del(pf);
2774 err_unreg_netdev:
2775 unregister_netdev(netdev);
2776 err_del_mcam_entries:
2777 otx2_mcam_flow_del(pf);
2778 err_ptp_destroy:
2779 otx2_ptp_destroy(pf);
2780 err_detach_rsrc:
2781 if (pf->hw.lmt_info)
2782 free_percpu(pf->hw.lmt_info);
2783 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
2784 qmem_free(pf->dev, pf->dync_lmt);
2785 otx2_detach_resources(&pf->mbox);
2786 err_disable_mbox_intr:
2787 otx2_disable_mbox_intr(pf);
2788 err_mbox_destroy:
2789 otx2_pfaf_mbox_destroy(pf);
2790 err_free_irq_vectors:
2791 pci_free_irq_vectors(hw->pdev);
2792 err_free_netdev:
2793 pci_set_drvdata(pdev, NULL);
2794 free_netdev(netdev);
2795 err_release_regions:
2796 pci_release_regions(pdev);
2797 return err;
2798 }
2799
2800 static void otx2_vf_link_event_task(struct work_struct *work)
2801 {
2802 struct otx2_vf_config *config;
2803 struct cgx_link_info_msg *req;
2804 struct mbox_msghdr *msghdr;
2805 struct otx2_nic *pf;
2806 int vf_idx;
2807
2808 config = container_of(work, struct otx2_vf_config,
2809 link_event_work.work);
2810 vf_idx = config - config->pf->vf_configs;
2811 pf = config->pf;
2812
2813 msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
2814 sizeof(*req), sizeof(struct msg_rsp));
2815 if (!msghdr) {
2816 dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
2817 return;
2818 }
2819
2820 req = (struct cgx_link_info_msg *)msghdr;
2821 req->hdr.id = MBOX_MSG_CGX_LINK_EVENT;
2822 req->hdr.sig = OTX2_MBOX_REQ_SIG;
2823 memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
2824
2825 otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
2826 }
2827
2828 static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
2829 {
2830 struct net_device *netdev = pci_get_drvdata(pdev);
2831 struct otx2_nic *pf = netdev_priv(netdev);
2832 int ret;
2833
2834
2835 ret = otx2_pfvf_mbox_init(pf, numvfs);
2836 if (ret)
2837 return ret;
2838
2839 ret = otx2_register_pfvf_mbox_intr(pf, numvfs);
2840 if (ret)
2841 goto free_mbox;
2842
2843 ret = otx2_pf_flr_init(pf, numvfs);
2844 if (ret)
2845 goto free_intr;
2846
2847 ret = otx2_register_flr_me_intr(pf, numvfs);
2848 if (ret)
2849 goto free_flr;
2850
2851 ret = pci_enable_sriov(pdev, numvfs);
2852 if (ret)
2853 goto free_flr_intr;
2854
2855 return numvfs;
2856 free_flr_intr:
2857 otx2_disable_flr_me_intr(pf);
2858 free_flr:
2859 otx2_flr_wq_destroy(pf);
2860 free_intr:
2861 otx2_disable_pfvf_mbox_intr(pf, numvfs);
2862 free_mbox:
2863 otx2_pfvf_mbox_destroy(pf);
2864 return ret;
2865 }
2866
2867 static int otx2_sriov_disable(struct pci_dev *pdev)
2868 {
2869 struct net_device *netdev = pci_get_drvdata(pdev);
2870 struct otx2_nic *pf = netdev_priv(netdev);
2871 int numvfs = pci_num_vf(pdev);
2872
2873 if (!numvfs)
2874 return 0;
2875
2876 pci_disable_sriov(pdev);
2877
2878 otx2_disable_flr_me_intr(pf);
2879 otx2_flr_wq_destroy(pf);
2880 otx2_disable_pfvf_mbox_intr(pf, numvfs);
2881 otx2_pfvf_mbox_destroy(pf);
2882
2883 return 0;
2884 }
2885
2886 static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
2887 {
2888 if (numvfs == 0)
2889 return otx2_sriov_disable(pdev);
2890 else
2891 return otx2_sriov_enable(pdev, numvfs);
2892 }
2893
2894 static void otx2_remove(struct pci_dev *pdev)
2895 {
2896 struct net_device *netdev = pci_get_drvdata(pdev);
2897 struct otx2_nic *pf;
2898
2899 if (!netdev)
2900 return;
2901
2902 pf = netdev_priv(netdev);
2903
2904 pf->flags |= OTX2_FLAG_PF_SHUTDOWN;
2905
2906 if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
2907 otx2_config_hw_tx_tstamp(pf, false);
2908 if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
2909 otx2_config_hw_rx_tstamp(pf, false);
2910
2911
2912 if (pf->flags & OTX2_FLAG_RX_PAUSE_ENABLED ||
2913 (pf->flags & OTX2_FLAG_TX_PAUSE_ENABLED)) {
2914 pf->flags &= ~OTX2_FLAG_RX_PAUSE_ENABLED;
2915 pf->flags &= ~OTX2_FLAG_TX_PAUSE_ENABLED;
2916 otx2_config_pause_frm(pf);
2917 }
2918
2919 #ifdef CONFIG_DCB
2920
2921 if (pf->pfc_en) {
2922 pf->pfc_en = 0;
2923 otx2_config_priority_flow_ctrl(pf);
2924 }
2925 #endif
2926 cancel_work_sync(&pf->reset_task);
2927
2928 otx2_cgx_config_linkevents(pf, false);
2929
2930 otx2_unregister_dl(pf);
2931 unregister_netdev(netdev);
2932 otx2_sriov_disable(pf->pdev);
2933 otx2_sriov_vfcfg_cleanup(pf);
2934 if (pf->otx2_wq)
2935 destroy_workqueue(pf->otx2_wq);
2936
2937 otx2_ptp_destroy(pf);
2938 otx2_mcam_flow_del(pf);
2939 otx2_shutdown_tc(pf);
2940 otx2_detach_resources(&pf->mbox);
2941 if (pf->hw.lmt_info)
2942 free_percpu(pf->hw.lmt_info);
2943 if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
2944 qmem_free(pf->dev, pf->dync_lmt);
2945 otx2_disable_mbox_intr(pf);
2946 otx2_pfaf_mbox_destroy(pf);
2947 pci_free_irq_vectors(pf->pdev);
2948 pci_set_drvdata(pdev, NULL);
2949 free_netdev(netdev);
2950
2951 pci_release_regions(pdev);
2952 }
2953
2954 static struct pci_driver otx2_pf_driver = {
2955 .name = DRV_NAME,
2956 .id_table = otx2_pf_id_table,
2957 .probe = otx2_probe,
2958 .shutdown = otx2_remove,
2959 .remove = otx2_remove,
2960 .sriov_configure = otx2_sriov_configure
2961 };
2962
2963 static int __init otx2_rvupf_init_module(void)
2964 {
2965 pr_info("%s: %s\n", DRV_NAME, DRV_STRING);
2966
2967 return pci_register_driver(&otx2_pf_driver);
2968 }
2969
2970 static void __exit otx2_rvupf_cleanup_module(void)
2971 {
2972 pci_unregister_driver(&otx2_pf_driver);
2973 }
2974
2975 module_init(otx2_rvupf_init_module);
2976 module_exit(otx2_rvupf_cleanup_module);